@@ -14,6 +14,8 @@
#include "aesni_gcm_pmd_private.h"
static uint8_t cryptodev_driver_id;
+_RTE_CRYPTO_ENQ_PROTO(aesni_gcm_pmd_enqueue_burst);
+_RTE_CRYPTO_DEQ_PROTO(aesni_gcm_pmd_dequeue_burst);
/* setup session handlers */
static void
@@ -758,6 +760,7 @@ aesni_gcm_pmd_dequeue_burst(void *queue_pair,
return i;
}
+_RTE_CRYPTO_DEQ_DEF(aesni_gcm_pmd_dequeue_burst)
static uint16_t
aesni_gcm_pmd_enqueue_burst(void *queue_pair,
@@ -773,6 +776,7 @@ aesni_gcm_pmd_enqueue_burst(void *queue_pair,
return nb_enqueued;
}
+_RTE_CRYPTO_ENQ_DEF(aesni_gcm_pmd_enqueue_burst)
static int aesni_gcm_remove(struct rte_vdev_device *vdev);
@@ -807,8 +811,10 @@ aesni_gcm_create(const char *name,
dev->dev_ops = rte_aesni_gcm_pmd_ops;
/* register rx/tx burst functions for data path */
- dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
- dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
+ rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(aesni_gcm_pmd_enqueue_burst));
+ rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(aesni_gcm_pmd_dequeue_burst));
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -30,6 +30,8 @@ static RTE_DEFINE_PER_LCORE(MB_MGR *, sync_mb_mgr);
typedef void (*hash_one_block_t)(const void *data, void *digest);
typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
+_RTE_CRYPTO_ENQ_PROTO(aesni_mb_pmd_enqueue_burst);
+_RTE_CRYPTO_DEQ_PROTO(aesni_mb_pmd_dequeue_burst);
/**
* Calculate the authentication pre-computes
*
@@ -1005,6 +1007,7 @@ aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
return nb_enqueued;
}
+_RTE_CRYPTO_ENQ_DEF(aesni_mb_pmd_enqueue_burst)
/** Get multi buffer session */
static inline struct aesni_mb_session *
@@ -1872,6 +1875,8 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
return processed_jobs;
}
+_RTE_CRYPTO_DEQ_DEF(aesni_mb_pmd_dequeue_burst)
+
static MB_MGR *
alloc_init_mb_mgr(enum aesni_mb_vector_mode vector_mode)
@@ -2097,8 +2102,10 @@ cryptodev_aesni_mb_create(const char *name,
dev->dev_ops = rte_aesni_mb_pmd_ops;
/* register rx/tx burst functions for data path */
- dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
- dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
+ rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(aesni_mb_pmd_enqueue_burst));
+ rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(aesni_mb_pmd_dequeue_burst));
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -18,6 +18,9 @@
static uint8_t cryptodev_driver_id;
+_RTE_CRYPTO_ENQ_PROTO(armv8_crypto_pmd_enqueue_burst);
+_RTE_CRYPTO_DEQ_PROTO(armv8_crypto_pmd_dequeue_burst);
+
static int cryptodev_armv8_crypto_uninit(struct rte_vdev_device *vdev);
/**
@@ -731,6 +734,7 @@ armv8_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
qp->stats.enqueue_err_count++;
return retval;
}
+_RTE_CRYPTO_ENQ_DEF(armv8_crypto_pmd_enqueue_burst)
/** Dequeue burst */
static uint16_t
@@ -747,6 +751,7 @@ armv8_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
return nb_dequeued;
}
+_RTE_CRYPTO_DEQ_DEF(armv8_crypto_pmd_dequeue_burst)
/** Create ARMv8 crypto device */
static int
@@ -789,8 +794,10 @@ cryptodev_armv8_crypto_create(const char *name,
dev->dev_ops = rte_armv8_crypto_pmd_ops;
/* register rx/tx burst functions for data path */
- dev->dequeue_burst = armv8_crypto_pmd_dequeue_burst;
- dev->enqueue_burst = armv8_crypto_pmd_enqueue_burst;
+ rte_crypto_set_enq_burst_fn(cryptodev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(armv8_crypto_pmd_enqueue_burst));
+ rte_crypto_set_deq_burst_fn(cryptodev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(armv8_crypto_pmd_dequeue_burst));
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -19,6 +19,9 @@
uint8_t cryptodev_bcmfs_driver_id;
+_RTE_CRYPTO_ENQ_PROTO(bcmfs_sym_pmd_enqueue_op_burst);
+_RTE_CRYPTO_DEQ_PROTO(bcmfs_sym_pmd_dequeue_op_burst);
+
static int bcmfs_sym_qp_release(struct rte_cryptodev *dev,
uint16_t queue_pair_id);
@@ -298,6 +301,7 @@ bcmfs_sym_pmd_enqueue_op_burst(void *queue_pair,
return enq;
}
+_RTE_CRYPTO_ENQ_DEF(bcmfs_sym_pmd_enqueue_op_burst)
static void bcmfs_sym_set_request_status(struct rte_crypto_op *op,
struct bcmfs_sym_request *out)
@@ -339,6 +343,7 @@ bcmfs_sym_pmd_dequeue_op_burst(void *queue_pair,
return pkts;
}
+_RTE_CRYPTO_DEQ_DEF(bcmfs_sym_pmd_dequeue_op_burst)
/*
* An rte_driver is needed in the registration of both the
@@ -380,8 +385,10 @@ bcmfs_sym_dev_create(struct bcmfs_device *fsdev)
cryptodev->driver_id = cryptodev_bcmfs_driver_id;
cryptodev->dev_ops = &crypto_bcmfs_ops;
- cryptodev->enqueue_burst = bcmfs_sym_pmd_enqueue_op_burst;
- cryptodev->dequeue_burst = bcmfs_sym_pmd_dequeue_op_burst;
+ rte_crypto_set_enq_burst_fn(cryptodev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(bcmfs_sym_pmd_enqueue_op_burst));
+ rte_crypto_set_deq_burst_fn(cryptodev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(bcmfs_sym_pmd_dequeue_op_burst));
cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -36,6 +36,9 @@
#define CRYPTODEV_NAME_CAAM_JR_PMD crypto_caam_jr
static uint8_t cryptodev_driver_id;
+_RTE_CRYPTO_ENQ_PROTO(caam_jr_enqueue_burst);
+_RTE_CRYPTO_DEQ_PROTO(caam_jr_dequeue_burst);
+
/* Lists the states possible for the SEC user space driver. */
enum sec_driver_state_e {
SEC_DRIVER_STATE_IDLE, /* Driver not initialized */
@@ -697,6 +700,7 @@ caam_jr_dequeue_burst(void *qp, struct rte_crypto_op **ops,
return num_rx;
}
+_RTE_CRYPTO_DEQ_DEF(caam_jr_dequeue_burst)
/**
* packet looks like:
@@ -1485,6 +1489,7 @@ caam_jr_enqueue_burst(void *qp, struct rte_crypto_op **ops,
return num_tx;
}
+_RTE_CRYPTO_ENQ_DEF(caam_jr_enqueue_burst)
/* Release queue pair */
static int
@@ -2333,8 +2338,10 @@ caam_jr_dev_init(const char *name,
dev->dev_ops = &caam_jr_ops;
/* register rx/tx burst functions for data path */
- dev->dequeue_burst = caam_jr_dequeue_burst;
- dev->enqueue_burst = caam_jr_enqueue_burst;
+ rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(caam_jr_enqueue_burst));
+ rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(caam_jr_dequeue_burst));
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -12,6 +12,7 @@
#include <sys/file.h>
#include <unistd.h>
+#include <cryptodev_pmd.h>
#include <rte_hexdump.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
@@ -34,6 +34,9 @@ struct ccp_pmd_init_params {
#define CCP_CRYPTODEV_PARAM_MAX_NB_QP ("max_nb_queue_pairs")
#define CCP_CRYPTODEV_PARAM_AUTH_OPT ("ccp_auth_opt")
+_RTE_CRYPTO_ENQ_PROTO(ccp_pmd_enqueue_burst);
+_RTE_CRYPTO_DEQ_PROTO(ccp_pmd_dequeue_burst);
+
const char *ccp_pmd_valid_params[] = {
CCP_CRYPTODEV_PARAM_NAME,
CCP_CRYPTODEV_PARAM_SOCKET_ID,
@@ -140,6 +143,7 @@ ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
qp->qp_stats.enqueued_count += enq_cnt;
return enq_cnt;
}
+_RTE_CRYPTO_ENQ_DEF(ccp_pmd_enqueue_burst)
static uint16_t
ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
@@ -176,6 +180,7 @@ ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
return nb_dequeued;
}
+_RTE_CRYPTO_DEQ_DEF(ccp_pmd_dequeue_burst)
/*
* The set of PCI devices this driver supports
@@ -257,8 +262,10 @@ cryptodev_ccp_create(const char *name,
/* register rx/tx burst functions for data path */
dev->dev_ops = ccp_pmd_ops;
- dev->enqueue_burst = ccp_pmd_enqueue_burst;
- dev->dequeue_burst = ccp_pmd_dequeue_burst;
+ rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(ccp_pmd_enqueue_burst));
+ rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(ccp_pmd_dequeue_burst));
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
@@ -256,6 +256,7 @@ cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
return count + i;
}
+_RTE_CRYPTO_ENQ_DEF(cn10k_cpt_enqueue_burst)
static inline void
cn10k_cpt_sec_post_process(struct rte_crypto_op *cop,
@@ -414,12 +415,15 @@ cn10k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
return i;
}
+_RTE_CRYPTO_DEQ_DEF(cn10k_cpt_dequeue_burst)
void
cn10k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
{
- dev->enqueue_burst = cn10k_cpt_enqueue_burst;
- dev->dequeue_burst = cn10k_cpt_dequeue_burst;
+ rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(cn10k_cpt_enqueue_burst));
+ rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(cn10k_cpt_dequeue_burst));
rte_mb();
}
@@ -10,6 +10,9 @@
extern struct rte_cryptodev_ops cn10k_cpt_ops;
+_RTE_CRYPTO_ENQ_PROTO(cn10k_cpt_enqueue_burst);
+_RTE_CRYPTO_DEQ_PROTO(cn10k_cpt_dequeue_burst);
+
void cn10k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev);
#endif /* _CN10K_CRYPTODEV_OPS_H_ */
@@ -9,6 +9,7 @@
#include <rte_security.h>
#include <rte_security_driver.h>
#include <rte_udp.h>
+#include <cryptodev_pmd.h>
#include "cnxk_cryptodev.h"
#include "cnxk_ipsec.h"
@@ -175,6 +175,7 @@ cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
return count;
}
+_RTE_CRYPTO_ENQ_DEF(cn9k_cpt_enqueue_burst)
static inline void
cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop,
@@ -299,11 +300,15 @@ cn9k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
return i;
}
+_RTE_CRYPTO_DEQ_DEF(cn9k_cpt_dequeue_burst)
+
void
cn9k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
{
- dev->enqueue_burst = cn9k_cpt_enqueue_burst;
- dev->dequeue_burst = cn9k_cpt_dequeue_burst;
+ rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(cn9k_cpt_enqueue_burst));
+ rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(cn9k_cpt_dequeue_burst));
rte_mb();
}
@@ -9,6 +9,9 @@
extern struct rte_cryptodev_ops cn9k_cpt_ops;
+_RTE_CRYPTO_ENQ_PROTO(cn9k_cpt_enqueue_burst);
+_RTE_CRYPTO_DEQ_PROTO(cn9k_cpt_dequeue_burst);
+
void cn9k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev);
#endif /* _CN9K_CRYPTODEV_OPS_H_ */
@@ -4,6 +4,7 @@
#include <rte_cryptodev.h>
#include <rte_security.h>
+#include <cryptodev_pmd.h>
#include "roc_api.h"
@@ -6,6 +6,7 @@
#include <rte_malloc.h>
#include <rte_security.h>
#include <rte_security_driver.h>
+#include <cryptodev_pmd.h>
#include "cnxk_cryptodev_capabilities.h"
#include "cnxk_cryptodev_sec.h"
@@ -59,6 +59,9 @@
static uint8_t cryptodev_driver_id;
+_RTE_CRYPTO_ENQ_PROTO(dpaa2_sec_enqueue_burst);
+_RTE_CRYPTO_DEQ_PROTO(dpaa2_sec_dequeue_burst);
+
#ifdef RTE_LIB_SECURITY
static inline int
build_proto_compound_sg_fd(dpaa2_sec_session *sess,
@@ -1524,6 +1527,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
dpaa2_qp->tx_vq.err_pkts += nb_ops;
return num_tx;
}
+_RTE_CRYPTO_ENQ_DEF(dpaa2_sec_enqueue_burst)
#ifdef RTE_LIB_SECURITY
static inline struct rte_crypto_op *
@@ -1727,6 +1731,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
return num_rx;
}
+_RTE_CRYPTO_DEQ_DEF(dpaa2_sec_dequeue_burst)
+
/** Release queue pair */
static int
dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
@@ -3881,8 +3887,10 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
cryptodev->driver_id = cryptodev_driver_id;
cryptodev->dev_ops = &crypto_ops;
- cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
- cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
+ rte_crypto_set_enq_burst_fn(cryptodev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(dpaa2_sec_enqueue_burst));
+ rte_crypto_set_deq_burst_fn(cryptodev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(dpaa2_sec_dequeue_burst));
cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -47,6 +47,9 @@
static uint8_t cryptodev_driver_id;
+_RTE_CRYPTO_ENQ_PROTO(dpaa_sec_enqueue_burst);
+_RTE_CRYPTO_DEQ_PROTO(dpaa_sec_dequeue_burst);
+
static int
dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
@@ -1916,6 +1919,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
return num_tx;
}
+_RTE_CRYPTO_ENQ_DEF(dpaa_sec_enqueue_burst)
static uint16_t
dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
@@ -1940,6 +1944,7 @@ dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
return num_rx;
}
+_RTE_CRYPTO_DEQ_DEF(dpaa_sec_dequeue_burst)
/** Release queue pair */
static int
@@ -3365,8 +3370,10 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
cryptodev->driver_id = cryptodev_driver_id;
cryptodev->dev_ops = &crypto_ops;
- cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
- cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
+ rte_crypto_set_enq_burst_fn(cryptodev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(dpaa_sec_enqueue_burst));
+ rte_crypto_set_deq_burst_fn(cryptodev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(dpaa_sec_dequeue_burst));
cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -19,6 +19,9 @@
static uint8_t cryptodev_driver_id;
+_RTE_CRYPTO_ENQ_PROTO(kasumi_pmd_enqueue_burst);
+_RTE_CRYPTO_DEQ_PROTO(kasumi_pmd_dequeue_burst);
+
/** Get xform chain order. */
static enum kasumi_operation
kasumi_get_mode(const struct rte_crypto_sym_xform *xform)
@@ -508,6 +511,7 @@ kasumi_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
return enqueued_ops;
}
+_RTE_CRYPTO_ENQ_DEF(kasumi_pmd_enqueue_burst)
static uint16_t
kasumi_pmd_dequeue_burst(void *queue_pair,
@@ -523,6 +527,7 @@ kasumi_pmd_dequeue_burst(void *queue_pair,
return nb_dequeued;
}
+_RTE_CRYPTO_DEQ_DEF(kasumi_pmd_dequeue_burst)
static int cryptodev_kasumi_remove(struct rte_vdev_device *vdev);
@@ -545,8 +550,10 @@ cryptodev_kasumi_create(const char *name,
dev->dev_ops = rte_kasumi_pmd_ops;
/* Register RX/TX burst functions for data path. */
- dev->dequeue_burst = kasumi_pmd_dequeue_burst;
- dev->enqueue_burst = kasumi_pmd_enqueue_burst;
+ rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(kasumi_pmd_enqueue_burst));
+ rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(kasumi_pmd_dequeue_burst));
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -39,6 +39,9 @@ int mlx5_crypto_logtype;
uint8_t mlx5_crypto_driver_id;
+_RTE_CRYPTO_ENQ_PROTO(mlx5_crypto_enqueue_burst);
+_RTE_CRYPTO_DEQ_PROTO(mlx5_crypto_dequeue_burst);
+
const struct rte_cryptodev_capabilities mlx5_crypto_caps[] = {
{ /* AES XTS */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
@@ -523,6 +526,7 @@ mlx5_crypto_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
rte_wmb();
return nb_ops;
}
+_RTE_CRYPTO_ENQ_DEF(mlx5_crypto_enqueue_burst)
static __rte_noinline void
mlx5_crypto_cqe_err_handle(struct mlx5_crypto_qp *qp, struct rte_crypto_op *op)
@@ -576,6 +580,7 @@ mlx5_crypto_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
}
return i;
}
+_RTE_CRYPTO_DEQ_DEF(mlx5_crypto_dequeue_burst)
static void
mlx5_crypto_qp_init(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp)
@@ -1041,8 +1046,10 @@ mlx5_crypto_dev_probe(struct rte_device *dev)
DRV_LOG(INFO,
"Crypto device %s was created successfully.", ibv->name);
crypto_dev->dev_ops = &mlx5_crypto_ops;
- crypto_dev->dequeue_burst = mlx5_crypto_dequeue_burst;
- crypto_dev->enqueue_burst = mlx5_crypto_enqueue_burst;
+ rte_crypto_set_enq_burst_fn(crypto_dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(mlx5_crypto_enqueue_burst));
+ rte_crypto_set_deq_burst_fn(crypto_dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(mlx5_crypto_dequeue_burst));
crypto_dev->feature_flags = MLX5_CRYPTO_FEATURE_FLAGS;
crypto_dev->driver_id = mlx5_crypto_driver_id;
priv = crypto_dev->data->dev_private;
@@ -22,6 +22,9 @@
static uint8_t cryptodev_driver_id;
+_RTE_CRYPTO_ENQ_PROTO(mrvl_crypto_pmd_enqueue_burst);
+_RTE_CRYPTO_DEQ_PROTO(mrvl_crypto_pmd_dequeue_burst);
+
struct mrvl_pmd_init_params {
struct rte_cryptodev_pmd_init_params common;
uint32_t max_nb_sessions;
@@ -981,6 +984,7 @@ mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
qp->stats.enqueued_count += to_enq_sec + to_enq_crp;
return consumed;
}
+_RTE_CRYPTO_ENQ_DEF(mrvl_crypto_pmd_enqueue_burst)
/**
* Dequeue burst.
@@ -1046,6 +1050,7 @@ mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
qp->stats.dequeued_count += nb_ops;
return nb_ops;
}
+_RTE_CRYPTO_DEQ_DEF(mrvl_crypto_pmd_dequeue_burst)
/**
* Create a new crypto device.
@@ -1077,8 +1082,10 @@ cryptodev_mrvl_crypto_create(const char *name,
dev->dev_ops = rte_mrvl_crypto_pmd_ops;
/* Register rx/tx burst functions for data path. */
- dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst;
- dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst;
+ rte_crypto_set_enq_burst_fn(cryptodev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(mrvl_crypto_pmd_enqueue_burst));
+ rte_crypto_set_deq_burst_fn(cryptodev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(mrvl_crypto_pmd_dequeue_burst));
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -68,6 +68,9 @@ static const struct rte_driver nitrox_rte_sym_drv = {
.alias = nitrox_sym_drv_name
};
+_RTE_CRYPTO_ENQ_PROTO(nitrox_sym_dev_enq_burst);
+_RTE_CRYPTO_DEQ_PROTO(nitrox_sym_dev_deq_burst);
+
static int nitrox_sym_dev_qp_release(struct rte_cryptodev *cdev,
uint16_t qp_id);
@@ -677,6 +680,7 @@ nitrox_sym_dev_enq_burst(void *queue_pair, struct rte_crypto_op **ops,
return cnt;
}
+_RTE_CRYPTO_ENQ_DEF(nitrox_sym_dev_enq_burst)
static int
nitrox_deq_single_op(struct nitrox_qp *qp, struct rte_crypto_op **op_ptr)
@@ -726,6 +730,7 @@ nitrox_sym_dev_deq_burst(void *queue_pair, struct rte_crypto_op **ops,
return cnt;
}
+_RTE_CRYPTO_DEQ_DEF(nitrox_sym_dev_deq_burst)
static struct rte_cryptodev_ops nitrox_cryptodev_ops = {
.dev_configure = nitrox_sym_dev_config,
@@ -769,8 +774,10 @@ nitrox_sym_pmd_create(struct nitrox_device *ndev)
ndev->rte_sym_dev.name = cdev->data->name;
cdev->driver_id = nitrox_sym_drv_id;
cdev->dev_ops = &nitrox_cryptodev_ops;
- cdev->enqueue_burst = nitrox_sym_dev_enq_burst;
- cdev->dequeue_burst = nitrox_sym_dev_deq_burst;
+ rte_crypto_set_enq_burst_fn(cdev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(nitrox_sym_dev_enq_burst));
+ rte_crypto_set_deq_burst_fn(cdev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(nitrox_sym_dev_deq_burst));
cdev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -6,6 +6,7 @@
#include <rte_cryptodev.h>
#include <rte_cycles.h>
#include <rte_errno.h>
+#include <cryptodev_pmd.h>
#include "nitrox_sym_reqmgr.h"
#include "nitrox_logs.h"
@@ -11,6 +11,9 @@
static uint8_t cryptodev_driver_id;
+_RTE_CRYPTO_ENQ_PROTO(null_crypto_pmd_enqueue_burst);
+_RTE_CRYPTO_DEQ_PROTO(null_crypto_pmd_dequeue_burst);
+
/** verify and set session parameters */
int
null_crypto_set_session_parameters(
@@ -137,6 +140,7 @@ null_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
qp->qp_stats.enqueue_err_count++;
return i;
}
+_RTE_CRYPTO_ENQ_DEF(null_crypto_pmd_enqueue_burst)
/** Dequeue burst */
static uint16_t
@@ -153,6 +157,7 @@ null_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
return nb_dequeued;
}
+_RTE_CRYPTO_DEQ_DEF(null_crypto_pmd_dequeue_burst)
/** Create crypto device */
static int
@@ -172,8 +177,10 @@ cryptodev_null_create(const char *name,
dev->dev_ops = null_crypto_pmd_ops;
/* register rx/tx burst functions for data path */
- dev->dequeue_burst = null_crypto_pmd_dequeue_burst;
- dev->enqueue_burst = null_crypto_pmd_enqueue_burst;
+ rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(null_crypto_pmd_enqueue_burst));
+ rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(null_crypto_pmd_dequeue_burst));
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -12,6 +12,7 @@
#include <rte_mempool.h>
#include <rte_memzone.h>
#include <rte_string_fns.h>
+#include <cryptodev_pmd.h>
#include "otx_cryptodev_hw_access.h"
#include "otx_cryptodev_mbox.h"
@@ -687,12 +687,14 @@ otx_cpt_enqueue_asym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
{
return otx_cpt_pkt_enqueue(qptr, ops, nb_ops, OP_TYPE_ASYM);
}
+_RTE_CRYPTO_ENQ_DEF(otx_cpt_enqueue_asym)
static uint16_t
otx_cpt_enqueue_sym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
{
return otx_cpt_pkt_enqueue(qptr, ops, nb_ops, OP_TYPE_SYM);
}
+_RTE_CRYPTO_ENQ_DEF(otx_cpt_enqueue_sym)
static __rte_always_inline void
submit_request_to_sso(struct ssows *ws, uintptr_t req,
@@ -1019,12 +1021,14 @@ otx_cpt_dequeue_asym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
{
return otx_cpt_pkt_dequeue(qptr, ops, nb_ops, OP_TYPE_ASYM);
}
+_RTE_CRYPTO_DEQ_DEF(otx_cpt_dequeue_asym)
static uint16_t
otx_cpt_dequeue_sym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
{
return otx_cpt_pkt_dequeue(qptr, ops, nb_ops, OP_TYPE_SYM);
}
+_RTE_CRYPTO_DEQ_DEF(otx_cpt_dequeue_sym)
uintptr_t __rte_hot
otx_crypto_adapter_dequeue(uintptr_t get_work1)
@@ -1151,11 +1155,15 @@ otx_cpt_dev_create(struct rte_cryptodev *c_dev)
c_dev->dev_ops = &cptvf_ops;
if (c_dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
- c_dev->enqueue_burst = otx_cpt_enqueue_sym;
- c_dev->dequeue_burst = otx_cpt_dequeue_sym;
+ rte_crypto_set_enq_burst_fn(c_dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(otx_cpt_enqueue_sym));
+ rte_crypto_set_deq_burst_fn(c_dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(otx_cpt_dequeue_sym));
} else {
- c_dev->enqueue_burst = otx_cpt_enqueue_asym;
- c_dev->dequeue_burst = otx_cpt_dequeue_asym;
+ rte_crypto_set_enq_burst_fn(c_dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(otx_cpt_enqueue_asym));
+ rte_crypto_set_deq_burst_fn(c_dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(otx_cpt_dequeue_asym));
}
/* Save dev private data */
@@ -14,6 +14,11 @@
int
otx_cpt_dev_create(struct rte_cryptodev *c_dev);
+_RTE_CRYPTO_ENQ_PROTO(otx_cpt_enqueue_sym);
+_RTE_CRYPTO_DEQ_PROTO(otx_cpt_dequeue_sym);
+_RTE_CRYPTO_ENQ_PROTO(otx_cpt_enqueue_asym);
+_RTE_CRYPTO_DEQ_PROTO(otx_cpt_dequeue_asym);
+
__rte_internal
uint16_t __rte_hot
otx_crypto_adapter_enqueue(void *port, struct rte_crypto_op *op);
@@ -3,6 +3,7 @@
*/
#include <rte_cryptodev.h>
#include <rte_ethdev.h>
+#include <cryptodev_pmd.h>
#include "otx2_cryptodev.h"
#include "otx2_cryptodev_hw_access.h"
@@ -33,6 +33,9 @@ static uint64_t otx2_fpm_iova[CPT_EC_ID_PMAX];
/* Forward declarations */
+_RTE_CRYPTO_ENQ_PROTO(otx2_cpt_enqueue_burst);
+_RTE_CRYPTO_DEQ_PROTO(otx2_cpt_dequeue_burst);
+
static int
otx2_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id);
@@ -826,6 +829,7 @@ otx2_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
return count;
}
+_RTE_CRYPTO_ENQ_DEF(otx2_cpt_enqueue_burst)
static __rte_always_inline void
otx2_cpt_asym_rsa_op(struct rte_crypto_op *cop, struct cpt_request_info *req,
@@ -1096,12 +1100,15 @@ otx2_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
return nb_completed;
}
+_RTE_CRYPTO_DEQ_DEF(otx2_cpt_dequeue_burst)
void
otx2_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
{
- dev->enqueue_burst = otx2_cpt_enqueue_burst;
- dev->dequeue_burst = otx2_cpt_dequeue_burst;
+ rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(otx2_cpt_enqueue_burst));
+ rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(otx2_cpt_dequeue_burst));
rte_mb();
}
@@ -20,6 +20,9 @@
static uint8_t cryptodev_driver_id;
+_RTE_CRYPTO_ENQ_PROTO(openssl_pmd_enqueue_burst);
+_RTE_CRYPTO_DEQ_PROTO(openssl_pmd_dequeue_burst);
+
#if (OPENSSL_VERSION_NUMBER < 0x10100000L)
static HMAC_CTX *HMAC_CTX_new(void)
{
@@ -2159,6 +2162,7 @@ openssl_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
qp->stats.enqueue_err_count++;
return i;
}
+_RTE_CRYPTO_ENQ_DEF(openssl_pmd_enqueue_burst)
/** Dequeue burst */
static uint16_t
@@ -2175,6 +2179,7 @@ openssl_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
return nb_dequeued;
}
+_RTE_CRYPTO_DEQ_DEF(openssl_pmd_dequeue_burst)
/** Create OPENSSL crypto device */
static int
@@ -2195,8 +2200,10 @@ cryptodev_openssl_create(const char *name,
dev->dev_ops = rte_openssl_pmd_ops;
/* register rx/tx burst functions for data path */
- dev->dequeue_burst = openssl_pmd_dequeue_burst;
- dev->enqueue_burst = openssl_pmd_enqueue_burst;
+ rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(openssl_pmd_enqueue_burst));
+ rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(openssl_pmd_dequeue_burst));
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -13,6 +13,9 @@
uint8_t qat_asym_driver_id;
+_RTE_CRYPTO_ENQ_PROTO(qat_asym_pmd_enqueue_op_burst);
+_RTE_CRYPTO_DEQ_PROTO(qat_asym_pmd_dequeue_op_burst);
+
static const struct rte_cryptodev_capabilities qat_gen1_asym_capabilities[] = {
QAT_BASE_GEN1_ASYM_CAPABILITIES,
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
@@ -214,12 +217,14 @@ uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
{
return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
}
+_RTE_CRYPTO_ENQ_DEF(qat_asym_pmd_enqueue_op_burst)
uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
}
+_RTE_CRYPTO_DEQ_DEF(qat_asym_pmd_dequeue_op_burst)
/* An rte_driver is needed in the registration of both the device and the driver
* with cryptodev.
@@ -292,8 +297,10 @@ qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
cryptodev->driver_id = qat_asym_driver_id;
cryptodev->dev_ops = &crypto_qat_ops;
- cryptodev->enqueue_burst = qat_asym_pmd_enqueue_op_burst;
- cryptodev->dequeue_burst = qat_asym_pmd_dequeue_op_burst;
+ rte_crypto_set_enq_burst_fn(cryptodev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(qat_asym_pmd_enqueue_op_burst));
+ rte_crypto_set_deq_burst_fn(cryptodev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(qat_asym_pmd_dequeue_op_burst));
cryptodev->feature_flags = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
@@ -20,6 +20,8 @@
#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
uint8_t qat_sym_driver_id;
+_RTE_CRYPTO_ENQ_PROTO(qat_sym_pmd_enqueue_op_burst);
+_RTE_CRYPTO_DEQ_PROTO(qat_sym_pmd_dequeue_op_burst);
static const struct rte_cryptodev_capabilities qat_gen1_sym_capabilities[] = {
QAT_BASE_GEN1_SYM_CAPABILITIES,
@@ -319,6 +321,7 @@ qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
{
return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
}
+_RTE_CRYPTO_ENQ_DEF(qat_sym_pmd_enqueue_op_burst)
static uint16_t
qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
@@ -326,6 +329,7 @@ qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
{
return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
}
+_RTE_CRYPTO_DEQ_DEF(qat_sym_pmd_dequeue_op_burst)
/* An rte_driver is needed in the registration of both the device and the driver
* with cryptodev.
@@ -399,8 +403,10 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
cryptodev->driver_id = qat_sym_driver_id;
cryptodev->dev_ops = &crypto_qat_ops;
- cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst;
- cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst;
+ rte_crypto_set_enq_burst_fn(cryptodev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(qat_sym_pmd_enqueue_op_burst));
+ rte_crypto_set_deq_burst_fn(cryptodev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(qat_sym_pmd_dequeue_op_burst));
cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
@@ -18,6 +18,9 @@
static uint8_t cryptodev_driver_id;
+_RTE_CRYPTO_ENQ_PROTO(snow3g_pmd_enqueue_burst);
+_RTE_CRYPTO_DEQ_PROTO(snow3g_pmd_dequeue_burst);
+
/** Get xform chain order. */
static enum snow3g_operation
snow3g_get_mode(const struct rte_crypto_sym_xform *xform)
@@ -520,6 +523,7 @@ snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
return enqueued_ops;
}
+_RTE_CRYPTO_ENQ_DEF(snow3g_pmd_enqueue_burst)
static uint16_t
snow3g_pmd_dequeue_burst(void *queue_pair,
@@ -535,6 +539,7 @@ snow3g_pmd_dequeue_burst(void *queue_pair,
return nb_dequeued;
}
+_RTE_CRYPTO_DEQ_DEF(snow3g_pmd_dequeue_burst)
static int cryptodev_snow3g_remove(struct rte_vdev_device *vdev);
@@ -557,8 +562,10 @@ cryptodev_snow3g_create(const char *name,
dev->dev_ops = rte_snow3g_pmd_ops;
/* Register RX/TX burst functions for data path. */
- dev->dequeue_burst = snow3g_pmd_dequeue_burst;
- dev->enqueue_burst = snow3g_pmd_enqueue_burst;
+ rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(snow3g_pmd_enqueue_burst));
+ rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(snow3g_pmd_dequeue_burst));
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -731,8 +731,10 @@ crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
cryptodev->driver_id = cryptodev_virtio_driver_id;
cryptodev->dev_ops = &virtio_crypto_dev_ops;
- cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst;
- cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;
+ rte_crypto_set_enq_burst_fn(cryptodev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(virtio_crypto_pkt_tx_burst));
+ rte_crypto_set_deq_burst_fn(cryptodev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(virtio_crypto_pkt_rx_burst));
cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -773,8 +775,8 @@ virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev)
}
cryptodev->dev_ops = NULL;
- cryptodev->enqueue_burst = NULL;
- cryptodev->dequeue_burst = NULL;
+ rte_crypto_set_enq_burst_fn(cryptodev->data->dev_id, NULL);
+ rte_crypto_set_deq_burst_fn(cryptodev->data->dev_id, NULL);
/* release control queue */
virtio_crypto_queue_release(hw->cvq);
@@ -63,4 +63,6 @@ uint16_t virtio_crypto_pkt_rx_burst(void *tx_queue,
struct rte_crypto_op **tx_pkts,
uint16_t nb_pkts);
+_RTE_CRYPTO_ENQ_PROTO(virtio_crypto_pkt_tx_burst);
+_RTE_CRYPTO_DEQ_PROTO(virtio_crypto_pkt_rx_burst);
#endif /* _VIRTIO_CRYPTODEV_H_ */
@@ -454,6 +454,7 @@ virtio_crypto_pkt_rx_burst(void *tx_queue, struct rte_crypto_op **rx_pkts,
return nb_rx;
}
+_RTE_CRYPTO_DEQ_DEF(virtio_crypto_pkt_rx_burst)
uint16_t
virtio_crypto_pkt_tx_burst(void *tx_queue, struct rte_crypto_op **tx_pkts,
@@ -525,3 +526,4 @@ virtio_crypto_pkt_tx_burst(void *tx_queue, struct rte_crypto_op **tx_pkts,
return nb_tx;
}
+_RTE_CRYPTO_ENQ_DEF(virtio_crypto_pkt_tx_burst)
@@ -16,6 +16,9 @@
static uint8_t cryptodev_driver_id;
+_RTE_CRYPTO_ENQ_PROTO(zuc_pmd_enqueue_burst);
+_RTE_CRYPTO_DEQ_PROTO(zuc_pmd_dequeue_burst);
+
/** Get xform chain order. */
static enum zuc_operation
zuc_get_mode(const struct rte_crypto_sym_xform *xform)
@@ -444,6 +447,7 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
return enqueued_ops;
}
+_RTE_CRYPTO_ENQ_DEF(zuc_pmd_enqueue_burst)
static uint16_t
zuc_pmd_dequeue_burst(void *queue_pair,
@@ -459,6 +463,7 @@ zuc_pmd_dequeue_burst(void *queue_pair,
return nb_dequeued;
}
+_RTE_CRYPTO_DEQ_DEF(zuc_pmd_dequeue_burst)
static int cryptodev_zuc_remove(struct rte_vdev_device *vdev);
@@ -505,8 +510,10 @@ cryptodev_zuc_create(const char *name,
dev->dev_ops = rte_zuc_pmd_ops;
/* Register RX/TX burst functions for data path. */
- dev->dequeue_burst = zuc_pmd_dequeue_burst;
- dev->enqueue_burst = zuc_pmd_enqueue_burst;
+ rte_crypto_set_enq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_ENQ_FUNC(zuc_pmd_enqueue_burst));
+ rte_crypto_set_deq_burst_fn(dev->data->dev_id,
+ _RTE_CRYPTO_DEQ_FUNC(zuc_pmd_dequeue_burst));
internals = dev->data->dev_private;
internals->mb_mgr = mb_mgr;