[7/8] crypto/scheduler: update for new datapath framework

Message ID 20210829125139.2173235-8-gakhil@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: akhil goyal
Headers
Series cryptodev: hide internal strutures |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Akhil Goyal Aug. 29, 2021, 12:51 p.m. UTC
  PMD is updated to use the new API for all enqueue
and dequeue paths.

Signed-off-by: Akhil Goyal <gakhil@marvell.com>
---
 drivers/crypto/scheduler/scheduler_failover.c | 23 +++++++++++++++----
 .../crypto/scheduler/scheduler_multicore.c    | 22 ++++++++++++++----
 .../scheduler/scheduler_pkt_size_distr.c      | 22 ++++++++++++++----
 .../crypto/scheduler/scheduler_roundrobin.c   | 22 ++++++++++++++----
 4 files changed, 72 insertions(+), 17 deletions(-)
  

Comments

Fan Zhang Sept. 13, 2021, 2:21 p.m. UTC | #1
> -----Original Message-----
> From: Akhil Goyal <gakhil@marvell.com>
> Sent: Sunday, August 29, 2021 1:52 PM
> To: dev@dpdk.org
> Cc: anoobj@marvell.com; Nicolau, Radu <radu.nicolau@intel.com>; Doherty,
> Declan <declan.doherty@intel.com>; hemant.agrawal@nxp.com;
> matan@nvidia.com; Ananyev, Konstantin <konstantin.ananyev@intel.com>;
> thomas@monjalon.net; Zhang, Roy Fan <roy.fan.zhang@intel.com>;
> asomalap@amd.com; ruifeng.wang@arm.com;
> ajit.khaparde@broadcom.com; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; Trahe, Fiona <fiona.trahe@intel.com>;
> adwivedi@marvell.com; michaelsh@marvell.com;
> rnagadheeraj@marvell.com; jianjay.zhou@huawei.com; jerinj@marvell.com;
> Akhil Goyal <gakhil@marvell.com>
> Subject: [PATCH 7/8] crypto/scheduler: update for new datapath framework
> 
> PMD is updated to use the new API for all enqueue
> and dequeue paths.
> 
Tested-by: Rebecca Troy <rebecca.troy@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
  

Patch

diff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c
index 88cc8f05f7..0ccebfa6d1 100644
--- a/drivers/crypto/scheduler/scheduler_failover.c
+++ b/drivers/crypto/scheduler/scheduler_failover.c
@@ -3,6 +3,7 @@ 
  */
 
 #include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
 #include <rte_malloc.h>
 
 #include "rte_cryptodev_scheduler_operations.h"
@@ -13,6 +14,11 @@ 
 #define NB_FAILOVER_WORKERS	2
 #define WORKER_SWITCH_MASK	(0x01)
 
+_RTE_CRYPTO_ENQ_PROTO(schedule_fo_enqueue);
+_RTE_CRYPTO_DEQ_PROTO(schedule_fo_dequeue);
+_RTE_CRYPTO_ENQ_PROTO(schedule_fo_enqueue_ordering);
+_RTE_CRYPTO_DEQ_PROTO(schedule_fo_dequeue_ordering);
+
 struct fo_scheduler_qp_ctx {
 	struct scheduler_worker primary_worker;
 	struct scheduler_worker secondary_worker;
@@ -57,7 +63,7 @@  schedule_fo_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 
 	return enqueued_ops;
 }
-
+_RTE_CRYPTO_ENQ_DEF(schedule_fo_enqueue)
 
 static uint16_t
 schedule_fo_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
@@ -74,6 +80,7 @@  schedule_fo_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
 
 	return nb_ops_enqd;
 }
+_RTE_CRYPTO_ENQ_DEF(schedule_fo_enqueue_ordering)
 
 static uint16_t
 schedule_fo_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
@@ -106,6 +113,7 @@  schedule_fo_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 
 	return nb_deq_ops + nb_deq_ops2;
 }
+_RTE_CRYPTO_DEQ_DEF(schedule_fo_dequeue)
 
 static uint16_t
 schedule_fo_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
@@ -118,6 +126,7 @@  schedule_fo_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
 
 	return scheduler_order_drain(order_ring, ops, nb_ops);
 }
+_RTE_CRYPTO_DEQ_DEF(schedule_fo_dequeue_ordering)
 
 static int
 worker_attach(__rte_unused struct rte_cryptodev *dev,
@@ -145,11 +154,15 @@  scheduler_start(struct rte_cryptodev *dev)
 	}
 
 	if (sched_ctx->reordering_enabled) {
-		dev->enqueue_burst = schedule_fo_enqueue_ordering;
-		dev->dequeue_burst = schedule_fo_dequeue_ordering;
+		rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+			_RTE_CRYPTO_ENQ_FUNC(schedule_fo_enqueue_ordering));
+		rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+			_RTE_CRYPTO_DEQ_FUNC(schedule_fo_dequeue_ordering));
 	} else {
-		dev->enqueue_burst = schedule_fo_enqueue;
-		dev->dequeue_burst = schedule_fo_dequeue;
+		rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+			_RTE_CRYPTO_ENQ_FUNC(schedule_fo_enqueue));
+		rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+			_RTE_CRYPTO_DEQ_FUNC(schedule_fo_dequeue));
 	}
 
 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c
index bf97343e52..4c145dae88 100644
--- a/drivers/crypto/scheduler/scheduler_multicore.c
+++ b/drivers/crypto/scheduler/scheduler_multicore.c
@@ -4,6 +4,7 @@ 
 #include <unistd.h>
 
 #include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
 #include <rte_malloc.h>
 
 #include "rte_cryptodev_scheduler_operations.h"
@@ -16,6 +17,11 @@ 
 
 #define CRYPTO_OP_STATUS_BIT_COMPLETE	0x80
 
+_RTE_CRYPTO_ENQ_PROTO(schedule_mc_enqueue);
+_RTE_CRYPTO_DEQ_PROTO(schedule_mc_dequeue);
+_RTE_CRYPTO_ENQ_PROTO(schedule_mc_enqueue_ordering);
+_RTE_CRYPTO_DEQ_PROTO(schedule_mc_dequeue_ordering);
+
 /** multi-core scheduler context */
 struct mc_scheduler_ctx {
 	uint32_t num_workers;             /**< Number of workers polling */
@@ -62,6 +68,7 @@  schedule_mc_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 
 	return processed_ops;
 }
+_RTE_CRYPTO_ENQ_DEF(schedule_mc_enqueue)
 
 static uint16_t
 schedule_mc_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
@@ -78,6 +85,7 @@  schedule_mc_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
 
 	return nb_ops_enqd;
 }
+_RTE_CRYPTO_ENQ_DEF(schedule_mc_enqueue_ordering)
 
 
 static uint16_t
@@ -105,6 +113,7 @@  schedule_mc_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 	return processed_ops;
 
 }
+_RTE_CRYPTO_DEQ_DEF(schedule_mc_dequeue)
 
 static uint16_t
 schedule_mc_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
@@ -130,6 +139,7 @@  schedule_mc_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
 	rte_ring_dequeue_finish(order_ring, nb_ops_to_deq);
 	return nb_ops_to_deq;
 }
+_RTE_CRYPTO_DEQ_DEF(schedule_mc_dequeue_ordering)
 
 static int
 worker_attach(__rte_unused struct rte_cryptodev *dev,
@@ -253,11 +263,15 @@  scheduler_start(struct rte_cryptodev *dev)
 					sched_ctx->wc_pool[i]);
 
 	if (sched_ctx->reordering_enabled) {
-		dev->enqueue_burst = &schedule_mc_enqueue_ordering;
-		dev->dequeue_burst = &schedule_mc_dequeue_ordering;
+		rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+			_RTE_CRYPTO_ENQ_FUNC(schedule_mc_enqueue_ordering));
+		rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+			_RTE_CRYPTO_DEQ_FUNC(schedule_mc_dequeue_ordering));
 	} else {
-		dev->enqueue_burst = &schedule_mc_enqueue;
-		dev->dequeue_burst = &schedule_mc_dequeue;
+		rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+			_RTE_CRYPTO_ENQ_FUNC(schedule_mc_enqueue));
+		rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+			_RTE_CRYPTO_DEQ_FUNC(schedule_mc_dequeue));
 	}
 
 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
index b025ab9736..811f30ca0d 100644
--- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
+++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
@@ -3,6 +3,7 @@ 
  */
 
 #include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
 #include <rte_malloc.h>
 
 #include "rte_cryptodev_scheduler_operations.h"
@@ -14,6 +15,11 @@ 
 #define SECONDARY_WORKER_IDX			1
 #define NB_PKT_SIZE_WORKERS			2
 
+_RTE_CRYPTO_ENQ_PROTO(schedule_dist_enqueue);
+_RTE_CRYPTO_DEQ_PROTO(schedule_dist_dequeue);
+_RTE_CRYPTO_ENQ_PROTO(schedule_dist_enqueue_ordering);
+_RTE_CRYPTO_DEQ_PROTO(schedule_dist_dequeue_ordering);
+
 /** pkt size based scheduler context */
 struct psd_scheduler_ctx {
 	uint32_t threshold;
@@ -169,6 +175,7 @@  schedule_dist_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 
 	return processed_ops_pri + processed_ops_sec;
 }
+_RTE_CRYPTO_ENQ_DEF(schedule_dist_enqueue)
 
 static uint16_t
 schedule_dist_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
@@ -185,6 +192,7 @@  schedule_dist_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
 
 	return nb_ops_enqd;
 }
+_RTE_CRYPTO_ENQ_DEF(schedule_dist_enqueue_ordering)
 
 static uint16_t
 schedule_dist_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
@@ -222,6 +230,7 @@  schedule_dist_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 
 	return nb_deq_ops_pri + nb_deq_ops_sec;
 }
+_RTE_CRYPTO_DEQ_DEF(schedule_dist_dequeue)
 
 static uint16_t
 schedule_dist_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
@@ -234,6 +243,7 @@  schedule_dist_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
 
 	return scheduler_order_drain(order_ring, ops, nb_ops);
 }
+_RTE_CRYPTO_DEQ_DEF(schedule_dist_dequeue_ordering)
 
 static int
 worker_attach(__rte_unused struct rte_cryptodev *dev,
@@ -281,11 +291,15 @@  scheduler_start(struct rte_cryptodev *dev)
 	}
 
 	if (sched_ctx->reordering_enabled) {
-		dev->enqueue_burst = &schedule_dist_enqueue_ordering;
-		dev->dequeue_burst = &schedule_dist_dequeue_ordering;
+		rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+			_RTE_CRYPTO_ENQ_FUNC(schedule_dist_enqueue_ordering));
+		rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+			_RTE_CRYPTO_DEQ_FUNC(schedule_dist_dequeue_ordering));
 	} else {
-		dev->enqueue_burst = &schedule_dist_enqueue;
-		dev->dequeue_burst = &schedule_dist_dequeue;
+		rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+			_RTE_CRYPTO_ENQ_FUNC(schedule_dist_enqueue));
+		rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+			_RTE_CRYPTO_DEQ_FUNC(schedule_dist_dequeue));
 	}
 
 	return 0;
diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c
index 95e34401ce..139e227cfe 100644
--- a/drivers/crypto/scheduler/scheduler_roundrobin.c
+++ b/drivers/crypto/scheduler/scheduler_roundrobin.c
@@ -3,11 +3,17 @@ 
  */
 
 #include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
 #include <rte_malloc.h>
 
 #include "rte_cryptodev_scheduler_operations.h"
 #include "scheduler_pmd_private.h"
 
+_RTE_CRYPTO_ENQ_PROTO(schedule_rr_enqueue);
+_RTE_CRYPTO_DEQ_PROTO(schedule_rr_dequeue);
+_RTE_CRYPTO_ENQ_PROTO(schedule_rr_enqueue_ordering);
+_RTE_CRYPTO_DEQ_PROTO(schedule_rr_dequeue_ordering);
+
 struct rr_scheduler_qp_ctx {
 	struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
 	uint32_t nb_workers;
@@ -41,6 +47,7 @@  schedule_rr_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 
 	return processed_ops;
 }
+_RTE_CRYPTO_ENQ_DEF(schedule_rr_enqueue)
 
 static uint16_t
 schedule_rr_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
@@ -57,6 +64,7 @@  schedule_rr_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
 
 	return nb_ops_enqd;
 }
+_RTE_CRYPTO_ENQ_DEF(schedule_rr_enqueue_ordering)
 
 
 static uint16_t
@@ -96,6 +104,7 @@  schedule_rr_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 
 	return nb_deq_ops;
 }
+_RTE_CRYPTO_DEQ_DEF(schedule_rr_dequeue)
 
 static uint16_t
 schedule_rr_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
@@ -108,6 +117,7 @@  schedule_rr_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
 
 	return scheduler_order_drain(order_ring, ops, nb_ops);
 }
+_RTE_CRYPTO_DEQ_DEF(schedule_rr_dequeue_ordering)
 
 static int
 worker_attach(__rte_unused struct rte_cryptodev *dev,
@@ -130,11 +140,15 @@  scheduler_start(struct rte_cryptodev *dev)
 	uint16_t i;
 
 	if (sched_ctx->reordering_enabled) {
-		dev->enqueue_burst = &schedule_rr_enqueue_ordering;
-		dev->dequeue_burst = &schedule_rr_dequeue_ordering;
+		rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+			_RTE_CRYPTO_ENQ_FUNC(schedule_rr_enqueue_ordering));
+		rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+			_RTE_CRYPTO_DEQ_FUNC(schedule_rr_dequeue_ordering));
 	} else {
-		dev->enqueue_burst = &schedule_rr_enqueue;
-		dev->dequeue_burst = &schedule_rr_dequeue;
+		rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+			_RTE_CRYPTO_ENQ_FUNC(schedule_rr_enqueue));
+		rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+			_RTE_CRYPTO_DEQ_FUNC(schedule_rr_dequeue));
 	}
 
 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {