Move fastpath inline function pointers from rte_cryptodev into a
separate structure accessed via a flat array.
The intension is to make rte_cryptodev and related structures private
to avoid future API/ABI breakages.
Signed-off-by: Akhil Goyal <gakhil@marvell.com>
Tested-by: Rebecca Troy <rebecca.troy@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
lib/cryptodev/cryptodev_pmd.c | 53 +++++++++++++++++++++++++++++-
lib/cryptodev/cryptodev_pmd.h | 11 +++++++
lib/cryptodev/rte_cryptodev.c | 19 +++++++++++
lib/cryptodev/rte_cryptodev_core.h | 29 ++++++++++++++++
lib/cryptodev/version.map | 5 +++
5 files changed, 116 insertions(+), 1 deletion(-)
@@ -3,7 +3,7 @@
*/
#include <sys/queue.h>
-
+#include <rte_errno.h>
#include <rte_string_fns.h>
#include <rte_malloc.h>
@@ -160,3 +160,54 @@ rte_cryptodev_pmd_destroy(struct rte_cryptodev *cryptodev)
return 0;
}
+
+static uint16_t
+dummy_crypto_enqueue_burst(__rte_unused void *qp,
+ __rte_unused struct rte_crypto_op **ops,
+ __rte_unused uint16_t nb_ops)
+{
+ CDEV_LOG_ERR(
+ "crypto enqueue burst requested for unconfigured device");
+ rte_errno = ENOTSUP;
+ return 0;
+}
+
+static uint16_t
+dummy_crypto_dequeue_burst(__rte_unused void *qp,
+ __rte_unused struct rte_crypto_op **ops,
+ __rte_unused uint16_t nb_ops)
+{
+ CDEV_LOG_ERR(
+ "crypto dequeue burst requested for unconfigured device");
+ rte_errno = ENOTSUP;
+ return 0;
+}
+
+void
+cryptodev_fp_ops_reset(struct rte_crypto_fp_ops *fp_ops)
+{
+ static struct rte_cryptodev_cb_rcu dummy_cb[RTE_MAX_QUEUES_PER_PORT];
+ static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
+ static const struct rte_crypto_fp_ops dummy = {
+ .enqueue_burst = dummy_crypto_enqueue_burst,
+ .dequeue_burst = dummy_crypto_dequeue_burst,
+ .qp = {
+ .data = dummy_data,
+ .enq_cb = dummy_cb,
+ .deq_cb = dummy_cb,
+ },
+ };
+
+ *fp_ops = dummy;
+}
+
+void
+cryptodev_fp_ops_set(struct rte_crypto_fp_ops *fp_ops,
+ const struct rte_cryptodev *dev)
+{
+ fp_ops->enqueue_burst = dev->enqueue_burst;
+ fp_ops->dequeue_burst = dev->dequeue_burst;
+ fp_ops->qp.data = dev->data->queue_pairs;
+ fp_ops->qp.enq_cb = dev->enq_cbs;
+ fp_ops->qp.deq_cb = dev->deq_cbs;
+}
@@ -516,6 +516,17 @@ RTE_INIT(init_ ##driver_id)\
driver_id = rte_cryptodev_allocate_driver(&crypto_drv, &(drv));\
}
+/* Reset crypto device fastpath APIs to dummy values. */
+__rte_internal
+void
+cryptodev_fp_ops_reset(struct rte_crypto_fp_ops *fp_ops);
+
+/* Setup crypto device fastpath APIs. */
+__rte_internal
+void
+cryptodev_fp_ops_set(struct rte_crypto_fp_ops *fp_ops,
+ const struct rte_cryptodev *dev);
+
static inline void *
get_sym_session_private_data(const struct rte_cryptodev_sym_session *sess,
uint8_t driver_id) {
@@ -53,6 +53,9 @@ static struct rte_cryptodev_global cryptodev_globals = {
.nb_devs = 0
};
+/* Public fastpath APIs. */
+struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
+
/* spinlock for crypto device callbacks */
static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
@@ -917,6 +920,8 @@ rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
dev_id = cryptodev->data->dev_id;
+ cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
+
/* Close device only if device operations have been set */
if (cryptodev->dev_ops) {
ret = rte_cryptodev_close(dev_id);
@@ -1080,6 +1085,9 @@ rte_cryptodev_start(uint8_t dev_id)
}
diag = (*dev->dev_ops->dev_start)(dev);
+ /* expose selection of PMD fast-path functions */
+ cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev);
+
rte_cryptodev_trace_start(dev_id, diag);
if (diag == 0)
dev->data->dev_started = 1;
@@ -1109,6 +1117,9 @@ rte_cryptodev_stop(uint8_t dev_id)
return;
}
+ /* point fast-path functions to dummy ones */
+ cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
+
(*dev->dev_ops->dev_stop)(dev);
rte_cryptodev_trace_stop(dev_id);
dev->data->dev_started = 0;
@@ -2411,3 +2422,11 @@ rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
return nb_drivers++;
}
+
+RTE_INIT(cryptodev_init_fp_ops)
+{
+ uint32_t i;
+
+ for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++)
+ cryptodev_fp_ops_reset(rte_crypto_fp_ops + i);
+}
@@ -25,6 +25,35 @@ typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Enqueue packets for processing on queue pair of a device. */
+/**
+ * @internal
+ * Structure used to hold opaque pointers to internal ethdev Rx/Tx
+ * queues data.
+ * The main purpose to expose these pointers at all - allow compiler
+ * to fetch this data for fast-path cryptodev inline functions in advance.
+ */
+struct rte_cryptodev_qpdata {
+ /** points to array of internal queue pair data pointers. */
+ void **data;
+ /** points to array of enqueue callback data pointers */
+ struct rte_cryptodev_cb_rcu *enq_cb;
+ /** points to array of dequeue callback data pointers */
+ struct rte_cryptodev_cb_rcu *deq_cb;
+};
+
+struct rte_crypto_fp_ops {
+ /** PMD enqueue burst function. */
+ enqueue_pkt_burst_t enqueue_burst;
+ /** PMD dequeue burst function. */
+ dequeue_pkt_burst_t dequeue_burst;
+ /** Internal queue pair data pointers. */
+ struct rte_cryptodev_qpdata qp;
+ /** Reserved for future ops. */
+ uintptr_t reserved[3];
+} __rte_cache_aligned;
+
+extern struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
+
/**
* @internal
* The data part, with no function pointers, associated with each device.
@@ -45,6 +45,9 @@ DPDK_22 {
rte_cryptodev_sym_session_init;
rte_cryptodevs;
+ #added in 21.11
+ rte_crypto_fp_ops;
+
local: *;
};
@@ -109,6 +112,8 @@ EXPERIMENTAL {
INTERNAL {
global:
+ cryptodev_fp_ops_reset;
+ cryptodev_fp_ops_set;
rte_cryptodev_allocate_driver;
rte_cryptodev_pmd_allocate;
rte_cryptodev_pmd_callback_process;