@@ -3,6 +3,7 @@
*/
#include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
#include <rte_malloc.h>
#include "rte_cryptodev_scheduler_operations.h"
@@ -13,6 +14,11 @@
#define NB_FAILOVER_WORKERS 2
#define WORKER_SWITCH_MASK (0x01)
+_RTE_CRYPTO_ENQ_PROTO(schedule_fo_enqueue);
+_RTE_CRYPTO_DEQ_PROTO(schedule_fo_dequeue);
+_RTE_CRYPTO_ENQ_PROTO(schedule_fo_enqueue_ordering);
+_RTE_CRYPTO_DEQ_PROTO(schedule_fo_dequeue_ordering);
+
struct fo_scheduler_qp_ctx {
struct scheduler_worker primary_worker;
struct scheduler_worker secondary_worker;
@@ -57,7 +63,7 @@ schedule_fo_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
return enqueued_ops;
}
-
+_RTE_CRYPTO_ENQ_DEF(schedule_fo_enqueue)
static uint16_t
schedule_fo_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
@@ -74,6 +80,7 @@ schedule_fo_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
return nb_ops_enqd;
}
+_RTE_CRYPTO_ENQ_DEF(schedule_fo_enqueue_ordering)
static uint16_t
schedule_fo_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
@@ -106,6 +113,7 @@ schedule_fo_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
return nb_deq_ops + nb_deq_ops2;
}
+_RTE_CRYPTO_DEQ_DEF(schedule_fo_dequeue)
static uint16_t
schedule_fo_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
@@ -118,6 +126,7 @@ schedule_fo_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
return scheduler_order_drain(order_ring, ops, nb_ops);
}
+_RTE_CRYPTO_DEQ_DEF(schedule_fo_dequeue_ordering)
static int
worker_attach(__rte_unused struct rte_cryptodev *dev,
@@ -145,11 +154,15 @@ scheduler_start(struct rte_cryptodev *dev)
}
if (sched_ctx->reordering_enabled) {
- dev->enqueue_burst = schedule_fo_enqueue_ordering;
- dev->dequeue_burst = schedule_fo_dequeue_ordering;
+ rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(schedule_fo_enqueue_ordering));
+ rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(schedule_fo_dequeue_ordering));
} else {
- dev->enqueue_burst = schedule_fo_enqueue;
- dev->dequeue_burst = schedule_fo_dequeue;
+ rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(schedule_fo_enqueue));
+ rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(schedule_fo_dequeue));
}
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
@@ -4,6 +4,7 @@
#include <unistd.h>
#include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
#include <rte_malloc.h>
#include "rte_cryptodev_scheduler_operations.h"
@@ -16,6 +17,11 @@
#define CRYPTO_OP_STATUS_BIT_COMPLETE 0x80
+_RTE_CRYPTO_ENQ_PROTO(schedule_mc_enqueue);
+_RTE_CRYPTO_DEQ_PROTO(schedule_mc_dequeue);
+_RTE_CRYPTO_ENQ_PROTO(schedule_mc_enqueue_ordering);
+_RTE_CRYPTO_DEQ_PROTO(schedule_mc_dequeue_ordering);
+
/** multi-core scheduler context */
struct mc_scheduler_ctx {
uint32_t num_workers; /**< Number of workers polling */
@@ -62,6 +68,7 @@ schedule_mc_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
return processed_ops;
}
+_RTE_CRYPTO_ENQ_DEF(schedule_mc_enqueue)
static uint16_t
schedule_mc_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
@@ -78,6 +85,7 @@ schedule_mc_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
return nb_ops_enqd;
}
+_RTE_CRYPTO_ENQ_DEF(schedule_mc_enqueue_ordering)
static uint16_t
@@ -105,6 +113,7 @@ schedule_mc_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
return processed_ops;
}
+_RTE_CRYPTO_DEQ_DEF(schedule_mc_dequeue)
static uint16_t
schedule_mc_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
@@ -130,6 +139,7 @@ schedule_mc_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
rte_ring_dequeue_finish(order_ring, nb_ops_to_deq);
return nb_ops_to_deq;
}
+_RTE_CRYPTO_DEQ_DEF(schedule_mc_dequeue_ordering)
static int
worker_attach(__rte_unused struct rte_cryptodev *dev,
@@ -253,11 +263,15 @@ scheduler_start(struct rte_cryptodev *dev)
sched_ctx->wc_pool[i]);
if (sched_ctx->reordering_enabled) {
- dev->enqueue_burst = &schedule_mc_enqueue_ordering;
- dev->dequeue_burst = &schedule_mc_dequeue_ordering;
+ rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(schedule_mc_enqueue_ordering));
+ rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(schedule_mc_dequeue_ordering));
} else {
- dev->enqueue_burst = &schedule_mc_enqueue;
- dev->dequeue_burst = &schedule_mc_dequeue;
+ rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(schedule_mc_enqueue));
+ rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(schedule_mc_dequeue));
}
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
@@ -3,6 +3,7 @@
*/
#include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
#include <rte_malloc.h>
#include "rte_cryptodev_scheduler_operations.h"
@@ -14,6 +15,11 @@
#define SECONDARY_WORKER_IDX 1
#define NB_PKT_SIZE_WORKERS 2
+_RTE_CRYPTO_ENQ_PROTO(schedule_dist_enqueue);
+_RTE_CRYPTO_DEQ_PROTO(schedule_dist_dequeue);
+_RTE_CRYPTO_ENQ_PROTO(schedule_dist_enqueue_ordering);
+_RTE_CRYPTO_DEQ_PROTO(schedule_dist_dequeue_ordering);
+
/** pkt size based scheduler context */
struct psd_scheduler_ctx {
uint32_t threshold;
@@ -169,6 +175,7 @@ schedule_dist_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
return processed_ops_pri + processed_ops_sec;
}
+_RTE_CRYPTO_ENQ_DEF(schedule_dist_enqueue)
static uint16_t
schedule_dist_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
@@ -185,6 +192,7 @@ schedule_dist_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
return nb_ops_enqd;
}
+_RTE_CRYPTO_ENQ_DEF(schedule_dist_enqueue_ordering)
static uint16_t
schedule_dist_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
@@ -222,6 +230,7 @@ schedule_dist_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
return nb_deq_ops_pri + nb_deq_ops_sec;
}
+_RTE_CRYPTO_DEQ_DEF(schedule_dist_dequeue)
static uint16_t
schedule_dist_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
@@ -234,6 +243,7 @@ schedule_dist_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
return scheduler_order_drain(order_ring, ops, nb_ops);
}
+_RTE_CRYPTO_DEQ_DEF(schedule_dist_dequeue_ordering)
static int
worker_attach(__rte_unused struct rte_cryptodev *dev,
@@ -281,11 +291,15 @@ scheduler_start(struct rte_cryptodev *dev)
}
if (sched_ctx->reordering_enabled) {
- dev->enqueue_burst = &schedule_dist_enqueue_ordering;
- dev->dequeue_burst = &schedule_dist_dequeue_ordering;
+ rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(schedule_dist_enqueue_ordering));
+ rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(schedule_dist_dequeue_ordering));
} else {
- dev->enqueue_burst = &schedule_dist_enqueue;
- dev->dequeue_burst = &schedule_dist_dequeue;
+ rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(schedule_dist_enqueue));
+ rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(schedule_dist_dequeue));
}
return 0;
@@ -3,11 +3,17 @@
*/
#include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
#include <rte_malloc.h>
#include "rte_cryptodev_scheduler_operations.h"
#include "scheduler_pmd_private.h"
+_RTE_CRYPTO_ENQ_PROTO(schedule_rr_enqueue);
+_RTE_CRYPTO_DEQ_PROTO(schedule_rr_dequeue);
+_RTE_CRYPTO_ENQ_PROTO(schedule_rr_enqueue_ordering);
+_RTE_CRYPTO_DEQ_PROTO(schedule_rr_dequeue_ordering);
+
struct rr_scheduler_qp_ctx {
struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
uint32_t nb_workers;
@@ -41,6 +47,7 @@ schedule_rr_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
return processed_ops;
}
+_RTE_CRYPTO_ENQ_DEF(schedule_rr_enqueue)
static uint16_t
schedule_rr_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
@@ -57,6 +64,7 @@ schedule_rr_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
return nb_ops_enqd;
}
+_RTE_CRYPTO_ENQ_DEF(schedule_rr_enqueue_ordering)
static uint16_t
@@ -96,6 +104,7 @@ schedule_rr_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
return nb_deq_ops;
}
+_RTE_CRYPTO_DEQ_DEF(schedule_rr_dequeue)
static uint16_t
schedule_rr_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
@@ -108,6 +117,7 @@ schedule_rr_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
return scheduler_order_drain(order_ring, ops, nb_ops);
}
+_RTE_CRYPTO_DEQ_DEF(schedule_rr_dequeue_ordering)
static int
worker_attach(__rte_unused struct rte_cryptodev *dev,
@@ -130,11 +140,15 @@ scheduler_start(struct rte_cryptodev *dev)
uint16_t i;
if (sched_ctx->reordering_enabled) {
- dev->enqueue_burst = &schedule_rr_enqueue_ordering;
- dev->dequeue_burst = &schedule_rr_dequeue_ordering;
+ rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(schedule_rr_enqueue_ordering));
+ rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(schedule_rr_dequeue_ordering));
} else {
- dev->enqueue_burst = &schedule_rr_enqueue;
- dev->dequeue_burst = &schedule_rr_dequeue;
+ rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(schedule_rr_enqueue));
+ rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(schedule_rr_dequeue));
}
for (i = 0; i < dev->data->nb_queue_pairs; i++) {