[RFC,4/5] crypto/ionic: add datapath and capabilities support

Message ID 20240222184019.15301-5-andrew.boyer@amd.com (mailing list archive)
State Superseded
Delegated to: akhil goyal
Headers
Series crypto/ionic: introduce AMD Pensando ionic crypto driver |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Andrew Boyer Feb. 22, 2024, 6:40 p.m. UTC
  This defines the main datapath and reports the device
capabilities to the stack.

Signed-off-by: Andrew Boyer <andrew.boyer@amd.com>
---
 drivers/crypto/ionic/ionic_crypto.h      |  62 +++
 drivers/crypto/ionic/ionic_crypto_caps.c |  55 ++
 drivers/crypto/ionic/ionic_crypto_main.c | 417 +++++++++++++++-
 drivers/crypto/ionic/ionic_crypto_ops.c  | 606 +++++++++++++++++++++++
 drivers/crypto/ionic/meson.build         |   2 +
 5 files changed, 1140 insertions(+), 2 deletions(-)
 create mode 100644 drivers/crypto/ionic/ionic_crypto_caps.c
 create mode 100644 drivers/crypto/ionic/ionic_crypto_ops.c
  

Patch

diff --git a/drivers/crypto/ionic/ionic_crypto.h b/drivers/crypto/ionic/ionic_crypto.h
index e35a0de5dd..dc16e2eafc 100644
--- a/drivers/crypto/ionic/ionic_crypto.h
+++ b/drivers/crypto/ionic/ionic_crypto.h
@@ -36,6 +36,8 @@  extern int iocpt_logtype;
 
 #define IOCPT_PRINT_CALL() IOCPT_PRINT(DEBUG, " >>")
 
+const struct rte_cryptodev_capabilities *iocpt_get_caps(uint64_t flags);
+
 static inline void iocpt_struct_size_checks(void)
 {
 	RTE_BUILD_BUG_ON(sizeof(struct ionic_doorbell) != 8);
@@ -162,6 +164,50 @@  struct iocpt_admin_q {
 	uint16_t flags;
 };
 
+struct iocpt_crypto_q {
+	/* cacheline0, cacheline1 */
+	IOCPT_COMMON_FIELDS;
+
+	/* cacheline2 */
+	uint64_t last_wdog_cycles;
+	uint16_t flags;
+
+	/* cacheline3 */
+	struct rte_cryptodev_stats stats;
+
+	uint64_t enqueued_wdogs;
+	uint64_t dequeued_wdogs;
+	uint8_t wdog_iv[IOCPT_Q_WDOG_IV_LEN];
+	uint8_t wdog_pld[IOCPT_Q_WDOG_PLD_LEN];
+	uint8_t wdog_tag[IOCPT_Q_WDOG_TAG_LEN];
+};
+
+#define IOCPT_S_F_INITED	BIT(0)
+
+struct iocpt_session_priv {
+	struct iocpt_dev *dev;
+
+	uint32_t index;
+
+	uint16_t iv_offset;
+	uint16_t iv_length;
+	uint16_t digest_length;
+	uint16_t aad_length;
+
+	uint8_t flags;
+	uint8_t op;
+	uint8_t type;
+
+	uint16_t key_len;
+	uint8_t key[IOCPT_SESS_KEY_LEN_MAX_SYMM];
+};
+
+static inline uint32_t
+iocpt_session_size(void)
+{
+	return sizeof(struct iocpt_session_priv);
+}
+
 #define IOCPT_DEV_F_INITED		BIT(0)
 #define IOCPT_DEV_F_UP			BIT(1)
 #define IOCPT_DEV_F_FW_RESET		BIT(2)
@@ -193,6 +239,7 @@  struct iocpt_dev {
 	rte_spinlock_t adminq_service_lock;
 
 	struct iocpt_admin_q *adminq;
+	struct iocpt_crypto_q **cryptoqs;
 
 	struct rte_bitmap  *sess_bm;	/* SET bit indicates index is free */
 
@@ -241,6 +288,9 @@  int iocpt_probe(void *bus_dev, struct rte_device *rte_dev,
 int iocpt_remove(struct rte_device *rte_dev);
 
 void iocpt_configure(struct iocpt_dev *dev);
+int iocpt_assign_ops(struct rte_cryptodev *cdev);
+int iocpt_start(struct iocpt_dev *dev);
+void iocpt_stop(struct iocpt_dev *dev);
 void iocpt_deinit(struct iocpt_dev *dev);
 
 int iocpt_dev_identify(struct iocpt_dev *dev);
@@ -250,6 +300,14 @@  void iocpt_dev_reset(struct iocpt_dev *dev);
 
 int iocpt_adminq_post_wait(struct iocpt_dev *dev, struct iocpt_admin_ctx *ctx);
 
+int iocpt_cryptoq_alloc(struct iocpt_dev *dev, uint32_t socket_id,
+	uint32_t index, uint16_t ndescs);
+void iocpt_cryptoq_free(struct iocpt_crypto_q *cptq);
+
+int iocpt_session_init(struct iocpt_session_priv *priv);
+int iocpt_session_update(struct iocpt_session_priv *priv);
+void iocpt_session_deinit(struct iocpt_session_priv *priv);
+
 struct ionic_doorbell __iomem *iocpt_db_map(struct iocpt_dev *dev,
 	struct iocpt_queue *q);
 
@@ -258,6 +316,10 @@  typedef bool (*iocpt_cq_cb)(struct iocpt_cq *cq, uint16_t cq_desc_index,
 uint32_t iocpt_cq_service(struct iocpt_cq *cq, uint32_t work_to_do,
 	iocpt_cq_cb cb, void *cb_arg);
 
+void iocpt_get_stats(const struct iocpt_dev *dev,
+	struct rte_cryptodev_stats *stats);
+void iocpt_reset_stats(struct iocpt_dev *dev);
+
 static inline uint16_t
 iocpt_q_space_avail(struct iocpt_queue *q)
 {
diff --git a/drivers/crypto/ionic/ionic_crypto_caps.c b/drivers/crypto/ionic/ionic_crypto_caps.c
new file mode 100644
index 0000000000..da5a69be3d
--- /dev/null
+++ b/drivers/crypto/ionic/ionic_crypto_caps.c
@@ -0,0 +1,55 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021-2024 Advanced Micro Devices, Inc.
+ */
+
+#include <rte_cryptodev.h>
+
+#include "ionic_crypto.h"
+
+static const struct rte_cryptodev_capabilities iocpt_sym_caps[] = {
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 16
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 1024,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_cryptodev_capabilities iocpt_asym_caps[] = {
+	/* None */
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+const struct rte_cryptodev_capabilities *
+iocpt_get_caps(uint64_t flags)
+{
+	if (flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)
+		return iocpt_asym_caps;
+	else
+		return iocpt_sym_caps;
+}
diff --git a/drivers/crypto/ionic/ionic_crypto_main.c b/drivers/crypto/ionic/ionic_crypto_main.c
index 8cfed60958..da24b85348 100644
--- a/drivers/crypto/ionic/ionic_crypto_main.c
+++ b/drivers/crypto/ionic/ionic_crypto_main.c
@@ -31,6 +31,15 @@  iocpt_cq_init(struct iocpt_cq *cq, uint16_t num_descs)
 	return 0;
 }
 
+static void
+iocpt_cq_reset(struct iocpt_cq *cq)
+{
+	cq->tail_idx = 0;
+	cq->done_color = 1;
+
+	memset(cq->base, 0, sizeof(struct iocpt_nop_comp) * cq->num_descs);
+}
+
 static void
 iocpt_cq_map(struct iocpt_cq *cq, void *base, rte_iova_t base_pa)
 {
@@ -91,6 +100,13 @@  iocpt_q_init(struct iocpt_queue *q, uint8_t type, uint32_t index,
 	return 0;
 }
 
+static void
+iocpt_q_reset(struct iocpt_queue *q)
+{
+	q->head_idx = 0;
+	q->tail_idx = 0;
+}
+
 static void
 iocpt_q_map(struct iocpt_queue *q, void *base, rte_iova_t base_pa)
 {
@@ -114,6 +130,178 @@  iocpt_q_free(struct iocpt_queue *q)
 	}
 }
 
+static void
+iocpt_get_abs_stats(const struct iocpt_dev *dev,
+		struct rte_cryptodev_stats *stats)
+{
+	uint32_t i;
+
+	memset(stats, 0, sizeof(*stats));
+
+	/* Sum up the per-queue stats counters */
+	for (i = 0; i < dev->crypto_dev->data->nb_queue_pairs; i++) {
+		struct rte_cryptodev_stats *q_stats = &dev->cryptoqs[i]->stats;
+
+		stats->enqueued_count    += q_stats->enqueued_count;
+		stats->dequeued_count    += q_stats->dequeued_count;
+		stats->enqueue_err_count += q_stats->enqueue_err_count;
+		stats->dequeue_err_count += q_stats->dequeue_err_count;
+	}
+}
+
+void
+iocpt_get_stats(const struct iocpt_dev *dev, struct rte_cryptodev_stats *stats)
+{
+	/* Retrieve the new absolute stats values */
+	iocpt_get_abs_stats(dev, stats);
+
+	/* Subtract the base stats values to get relative values */
+	stats->enqueued_count    -= dev->stats_base.enqueued_count;
+	stats->dequeued_count    -= dev->stats_base.dequeued_count;
+	stats->enqueue_err_count -= dev->stats_base.enqueue_err_count;
+	stats->dequeue_err_count -= dev->stats_base.dequeue_err_count;
+}
+
+void
+iocpt_reset_stats(struct iocpt_dev *dev)
+{
+	uint32_t i;
+
+	/* Erase the per-queue stats counters */
+	for (i = 0; i < dev->crypto_dev->data->nb_queue_pairs; i++)
+		memset(&dev->cryptoqs[i]->stats, 0,
+			sizeof(dev->cryptoqs[i]->stats));
+
+	/* Update the base stats values */
+	iocpt_get_abs_stats(dev, &dev->stats_base);
+}
+
+static int
+iocpt_session_write(struct iocpt_session_priv *priv,
+		    enum iocpt_sess_control_oper oper)
+{
+	struct iocpt_dev *dev = priv->dev;
+	struct iocpt_admin_ctx ctx = {
+		.pending_work = true,
+		.cmd.sess_control = {
+			.opcode = IOCPT_CMD_SESS_CONTROL,
+			.type = priv->type,
+			.oper = oper,
+			.index = rte_cpu_to_le_32(priv->index),
+			.key_len = rte_cpu_to_le_16(priv->key_len),
+			.key_seg_len = (uint8_t)RTE_MIN(priv->key_len,
+						IOCPT_SESS_KEY_SEG_LEN),
+		},
+	};
+	struct iocpt_sess_control_cmd *cmd = &ctx.cmd.sess_control;
+	uint16_t key_offset;
+	uint8_t key_segs, seg;
+	int err;
+
+	key_segs = ((priv->key_len - 1) >> IOCPT_SESS_KEY_SEG_SHFT) + 1;
+
+	for (seg = 0; seg < key_segs; seg++) {
+		ctx.pending_work = true;
+
+		key_offset = seg * cmd->key_seg_len;
+		memcpy(cmd->key, &priv->key[key_offset],
+			IOCPT_SESS_KEY_SEG_LEN);
+		cmd->key_seg_idx = seg;
+
+		/* Mark final segment */
+		if (seg + 1 == key_segs)
+			cmd->flags |= rte_cpu_to_le_16(IOCPT_SCTL_F_END);
+
+		err = iocpt_adminq_post_wait(dev, &ctx);
+		if (err != 0)
+			return err;
+	}
+
+	return 0;
+}
+
+static int
+iocpt_session_wdog(struct iocpt_dev *dev)
+{
+	struct iocpt_session_priv priv = {
+		.dev = dev,
+		.index = IOCPT_Q_WDOG_SESS_IDX,
+		.type = IOCPT_SESS_AEAD_AES_GCM,
+		.key_len = IOCPT_Q_WDOG_KEY_LEN,
+	};
+
+	/* Reserve session 0 for queue watchdog */
+	rte_bitmap_clear(dev->sess_bm, IOCPT_Q_WDOG_SESS_IDX);
+
+	return iocpt_session_write(&priv, IOCPT_SESS_INIT);
+}
+
+int
+iocpt_session_init(struct iocpt_session_priv *priv)
+{
+	struct iocpt_dev *dev = priv->dev;
+	uint64_t bm_slab = 0;
+	uint32_t bm_pos = 0;
+	int err = 0;
+
+	rte_spinlock_lock(&dev->adminq_lock);
+
+	if (rte_bitmap_scan(dev->sess_bm, &bm_pos, &bm_slab) > 0) {
+		priv->index = bm_pos + rte_ctz64(bm_slab);
+		rte_bitmap_clear(dev->sess_bm, priv->index);
+	} else
+		err = -ENOSPC;
+
+	rte_spinlock_unlock(&dev->adminq_lock);
+
+	if (err != 0) {
+		IOCPT_PRINT(ERR, "session index space exhausted");
+		return err;
+	}
+
+	err = iocpt_session_write(priv, IOCPT_SESS_INIT);
+	if (err != 0) {
+		rte_spinlock_lock(&dev->adminq_lock);
+		rte_bitmap_set(dev->sess_bm, priv->index);
+		rte_spinlock_unlock(&dev->adminq_lock);
+		return err;
+	}
+
+	priv->flags |= IOCPT_S_F_INITED;
+
+	return 0;
+}
+
+int
+iocpt_session_update(struct iocpt_session_priv *priv)
+{
+	return iocpt_session_write(priv, IOCPT_SESS_UPDATE_KEY);
+}
+
+void
+iocpt_session_deinit(struct iocpt_session_priv *priv)
+{
+	struct iocpt_dev *dev = priv->dev;
+	struct iocpt_admin_ctx ctx = {
+		.pending_work = true,
+		.cmd.sess_control = {
+			.opcode = IOCPT_CMD_SESS_CONTROL,
+			.type = priv->type,
+			.oper = IOCPT_SESS_DISABLE,
+			.index = rte_cpu_to_le_32(priv->index),
+			.key_len = rte_cpu_to_le_16(priv->key_len),
+		},
+	};
+
+	(void)iocpt_adminq_post_wait(dev, &ctx);
+
+	rte_spinlock_lock(&dev->adminq_lock);
+	rte_bitmap_set(dev->sess_bm, priv->index);
+	rte_spinlock_unlock(&dev->adminq_lock);
+
+	priv->flags &= ~IOCPT_S_F_INITED;
+}
+
 static const struct rte_memzone *
 iocpt_dma_zone_reserve(const char *type_name, uint16_t qid, size_t size,
 			unsigned int align, int socket_id)
@@ -240,12 +428,157 @@  iocpt_commonq_alloc(struct iocpt_dev *dev,
 	return err;
 }
 
+int
+iocpt_cryptoq_alloc(struct iocpt_dev *dev, uint32_t socket_id, uint32_t index,
+		uint16_t num_descs)
+{
+	struct iocpt_crypto_q *cptq;
+	uint16_t flags = 0;
+	int err;
+
+	/* CryptoQ always supports scatter-gather */
+	flags |= IOCPT_Q_F_SG;
+
+	IOCPT_PRINT(DEBUG, "cptq %u num_descs %u num_segs %u",
+		index, num_descs, 1);
+
+	err = iocpt_commonq_alloc(dev,
+		IOCPT_QTYPE_CRYPTOQ,
+		sizeof(struct iocpt_crypto_q),
+		socket_id,
+		index,
+		"crypto",
+		flags,
+		num_descs,
+		1,
+		sizeof(struct iocpt_crypto_desc),
+		sizeof(struct iocpt_crypto_comp),
+		sizeof(struct iocpt_crypto_sg_desc),
+		(struct iocpt_common_q **)&cptq);
+	if (err != 0)
+		return err;
+
+	cptq->flags = flags;
+
+	dev->cryptoqs[index] = cptq;
+
+	return 0;
+}
+
 struct ionic_doorbell *
 iocpt_db_map(struct iocpt_dev *dev, struct iocpt_queue *q)
 {
 	return dev->db_pages + q->hw_type;
 }
 
+static int
+iocpt_cryptoq_init(struct iocpt_crypto_q *cptq)
+{
+	struct iocpt_queue *q = &cptq->q;
+	struct iocpt_dev *dev = cptq->dev;
+	struct iocpt_cq *cq = &cptq->cq;
+	struct iocpt_admin_ctx ctx = {
+		.pending_work = true,
+		.cmd.q_init = {
+			.opcode = IOCPT_CMD_Q_INIT,
+			.type = IOCPT_QTYPE_CRYPTOQ,
+			.ver = dev->qtype_info[IOCPT_QTYPE_CRYPTOQ].version,
+			.index = rte_cpu_to_le_32(q->index),
+			.flags = rte_cpu_to_le_16(IOCPT_QINIT_F_ENA |
+						IOCPT_QINIT_F_SG),
+			.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
+			.ring_size = rte_log2_u32(q->num_descs),
+			.ring_base = rte_cpu_to_le_64(q->base_pa),
+			.cq_ring_base = rte_cpu_to_le_64(cq->base_pa),
+			.sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa),
+		},
+	};
+	int err;
+
+	IOCPT_PRINT(DEBUG, "cptq_init.index %d", q->index);
+	IOCPT_PRINT(DEBUG, "cptq_init.ring_base %#jx", q->base_pa);
+	IOCPT_PRINT(DEBUG, "cptq_init.ring_size %d",
+		ctx.cmd.q_init.ring_size);
+	IOCPT_PRINT(DEBUG, "cptq_init.ver %u", ctx.cmd.q_init.ver);
+
+	iocpt_q_reset(q);
+	iocpt_cq_reset(cq);
+
+	err = iocpt_adminq_post_wait(dev, &ctx);
+	if (err != 0)
+		return err;
+
+	q->hw_type = ctx.comp.q_init.hw_type;
+	q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index);
+	q->db = iocpt_db_map(dev, q);
+
+	IOCPT_PRINT(DEBUG, "cptq->hw_type %d", q->hw_type);
+	IOCPT_PRINT(DEBUG, "cptq->hw_index %d", q->hw_index);
+	IOCPT_PRINT(DEBUG, "cptq->db %p", q->db);
+
+	cptq->flags |= IOCPT_Q_F_INITED;
+
+	return 0;
+}
+
+static void
+iocpt_cryptoq_deinit(struct iocpt_crypto_q *cptq)
+{
+	struct iocpt_dev *dev = cptq->dev;
+	struct iocpt_admin_ctx ctx = {
+		.pending_work = true,
+		.cmd.q_control = {
+			.opcode = IOCPT_CMD_Q_CONTROL,
+			.type = IOCPT_QTYPE_CRYPTOQ,
+			.index = rte_cpu_to_le_32(cptq->q.index),
+			.oper = IOCPT_Q_DISABLE,
+		},
+	};
+	unsigned long sleep_usec = 100UL * 1000;
+	uint32_t sleep_cnt, sleep_max = IOCPT_CRYPTOQ_WAIT;
+	int err;
+
+	for (sleep_cnt = 0; sleep_cnt < sleep_max; sleep_cnt++) {
+		ctx.pending_work = true;
+
+		err = iocpt_adminq_post_wait(dev, &ctx);
+		if (err != -EAGAIN)
+			break;
+
+		rte_delay_us_block(sleep_usec);
+	}
+
+	if (err != 0)
+		IOCPT_PRINT(ERR, "Deinit queue %u returned %d after %u ms",
+			cptq->q.index, err, sleep_cnt * 100);
+	else
+		IOCPT_PRINT(DEBUG, "Deinit queue %u returned %d after %u ms",
+			cptq->q.index, err, sleep_cnt * 100);
+
+	IOCPT_PRINT(DEBUG, "Queue %u watchdog: enq %"PRIu64" deq %"PRIu64,
+		cptq->q.index, cptq->enqueued_wdogs, cptq->dequeued_wdogs);
+
+	cptq->flags &= ~IOCPT_Q_F_INITED;
+}
+
+void
+iocpt_cryptoq_free(struct iocpt_crypto_q *cptq)
+{
+	if (cptq == NULL)
+		return;
+
+	if (cptq->base_z != NULL) {
+		rte_memzone_free(cptq->base_z);
+		cptq->base_z = NULL;
+		cptq->base = NULL;
+		cptq->base_pa = 0;
+	}
+
+	iocpt_q_free(&cptq->q);
+
+	rte_free(cptq);
+}
+
 static int
 iocpt_adminq_alloc(struct iocpt_dev *dev)
 {
@@ -313,6 +646,14 @@  iocpt_alloc_objs(struct iocpt_dev *dev)
 
 	IOCPT_PRINT(DEBUG, "Crypto: %s", dev->name);
 
+	dev->cryptoqs = rte_calloc_socket("iocpt",
+				dev->max_qps, sizeof(*dev->cryptoqs),
+				RTE_CACHE_LINE_SIZE, dev->socket_id);
+	if (dev->cryptoqs == NULL) {
+		IOCPT_PRINT(ERR, "Cannot allocate tx queues array");
+		return -ENOMEM;
+	}
+
 	rte_spinlock_init(&dev->adminq_lock);
 	rte_spinlock_init(&dev->adminq_service_lock);
 
@@ -320,7 +661,7 @@  iocpt_alloc_objs(struct iocpt_dev *dev)
 	if (err != 0) {
 		IOCPT_PRINT(ERR, "Cannot allocate admin queue");
 		err = -ENOMEM;
-		goto err_out;
+		goto err_free_cryptoqs;
 	}
 
 	dev->info_sz = RTE_ALIGN(sizeof(*dev->info), rte_mem_page_size());
@@ -365,7 +706,9 @@  iocpt_alloc_objs(struct iocpt_dev *dev)
 err_free_adminq:
 	iocpt_adminq_free(dev->adminq);
 	dev->adminq = NULL;
-err_out:
+err_free_cryptoqs:
+	rte_free(dev->cryptoqs);
+	dev->cryptoqs = NULL;
 	return err;
 }
 
@@ -385,9 +728,21 @@  iocpt_init(struct iocpt_dev *dev)
 	if (err != 0)
 		return err;
 
+	/* Write the queue watchdog key */
+	err = iocpt_session_wdog(dev);
+	if (err != 0) {
+		IOCPT_PRINT(ERR, "Cannot setup watchdog session");
+		goto err_out_adminq_deinit;
+	}
+
 	dev->state |= IOCPT_DEV_F_INITED;
 
 	return 0;
+
+err_out_adminq_deinit:
+	iocpt_adminq_deinit(dev);
+
+	return err;
 }
 
 void
@@ -396,6 +751,43 @@  iocpt_configure(struct iocpt_dev *dev)
 	RTE_SET_USED(dev);
 }
 
+int
+iocpt_start(struct iocpt_dev *dev)
+{
+	uint32_t i;
+	int err;
+
+	IOCPT_PRINT(DEBUG, "Starting %u queues",
+		dev->crypto_dev->data->nb_queue_pairs);
+
+	for (i = 0; i < dev->crypto_dev->data->nb_queue_pairs; i++) {
+		err = iocpt_cryptoq_init(dev->cryptoqs[i]);
+		if (err != 0)
+			return err;
+	}
+
+	dev->state |= IOCPT_DEV_F_UP;
+
+	return 0;
+}
+
+void
+iocpt_stop(struct iocpt_dev *dev)
+{
+	uint32_t i;
+
+	IOCPT_PRINT_CALL();
+
+	dev->state &= ~IOCPT_DEV_F_UP;
+
+	for (i = 0; i < dev->crypto_dev->data->nb_queue_pairs; i++) {
+		struct iocpt_crypto_q *cptq = dev->cryptoqs[i];
+
+		if (cptq->flags & IOCPT_Q_F_INITED)
+			(void)iocpt_cryptoq_deinit(cptq);
+	}
+}
+
 void
 iocpt_deinit(struct iocpt_dev *dev)
 {
@@ -412,8 +804,16 @@  iocpt_deinit(struct iocpt_dev *dev)
 static void
 iocpt_free_objs(struct iocpt_dev *dev)
 {
+	void **queue_pairs = dev->crypto_dev->data->queue_pairs;
+	uint32_t i;
+
 	IOCPT_PRINT_CALL();
 
+	for (i = 0; i < dev->crypto_dev->data->nb_queue_pairs; i++) {
+		iocpt_cryptoq_free(queue_pairs[i]);
+		queue_pairs[i] = NULL;
+	}
+
 	if (dev->sess_bm != NULL) {
 		rte_bitmap_free(dev->sess_bm);
 		rte_free(dev->sess_bm);
@@ -425,6 +825,11 @@  iocpt_free_objs(struct iocpt_dev *dev)
 		dev->adminq = NULL;
 	}
 
+	if (dev->cryptoqs != NULL) {
+		rte_free(dev->cryptoqs);
+		dev->cryptoqs = NULL;
+	}
+
 	if (dev->info != NULL) {
 		rte_memzone_free(dev->info_z);
 		dev->info_z = NULL;
@@ -542,8 +947,16 @@  iocpt_probe(void *bus_dev, struct rte_device *rte_dev,
 		goto err_free_objs;
 	}
 
+	err = iocpt_assign_ops(cdev);
+	if (err != 0) {
+		IOCPT_PRINT(ERR, "Failed to configure opts");
+		goto err_deinit_dev;
+	}
+
 	return 0;
 
+err_deinit_dev:
+	iocpt_deinit(dev);
 err_free_objs:
 	iocpt_free_objs(dev);
 err_destroy_crypto_dev:
diff --git a/drivers/crypto/ionic/ionic_crypto_ops.c b/drivers/crypto/ionic/ionic_crypto_ops.c
new file mode 100644
index 0000000000..69768c9955
--- /dev/null
+++ b/drivers/crypto/ionic/ionic_crypto_ops.c
@@ -0,0 +1,606 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021-2024 Advanced Micro Devices, Inc.
+ */
+
+#include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
+#include <rte_errno.h>
+#include <rte_malloc.h>
+#include <rte_mempool.h>
+
+#include "ionic_crypto.h"
+
+static int
+iocpt_op_config(struct rte_cryptodev *cdev,
+		struct rte_cryptodev_config *config __rte_unused)
+{
+	struct iocpt_dev *dev = cdev->data->dev_private;
+
+	iocpt_configure(dev);
+
+	return 0;
+}
+
+static int
+iocpt_op_start(struct rte_cryptodev *cdev)
+{
+	struct iocpt_dev *dev = cdev->data->dev_private;
+
+	return iocpt_start(dev);
+}
+
+static void
+iocpt_op_stop(struct rte_cryptodev *cdev)
+{
+	struct iocpt_dev *dev = cdev->data->dev_private;
+
+	return iocpt_stop(dev);
+}
+
+static int
+iocpt_op_close(struct rte_cryptodev *cdev)
+{
+	struct iocpt_dev *dev = cdev->data->dev_private;
+
+	iocpt_deinit(dev);
+
+	return 0;
+}
+
+static void
+iocpt_op_info_get(struct rte_cryptodev *cdev, struct rte_cryptodev_info *info)
+{
+	struct iocpt_dev *dev = cdev->data->dev_private;
+
+	if (info == NULL)
+		return;
+
+	info->max_nb_queue_pairs = dev->max_qps;
+	info->feature_flags = dev->features;
+	info->capabilities = iocpt_get_caps(info->feature_flags);
+	/* Reserve one session for watchdog */
+	info->sym.max_nb_sessions = dev->max_sessions - 1;
+	info->driver_id = dev->driver_id;
+	info->min_mbuf_headroom_req = 0;
+	info->min_mbuf_tailroom_req = 0;
+}
+
+static void
+iocpt_op_stats_get(struct rte_cryptodev *cdev,
+		struct rte_cryptodev_stats *stats)
+{
+	struct iocpt_dev *dev = cdev->data->dev_private;
+
+	iocpt_get_stats(dev, stats);
+}
+
+static void
+iocpt_op_stats_reset(struct rte_cryptodev *cdev)
+{
+	struct iocpt_dev *dev = cdev->data->dev_private;
+
+	iocpt_reset_stats(dev);
+}
+
+static int
+iocpt_op_queue_release(struct rte_cryptodev *cdev, uint16_t queue_id)
+{
+	struct iocpt_crypto_q *cptq = cdev->data->queue_pairs[queue_id];
+
+	IOCPT_PRINT(DEBUG, "queue_id %u", queue_id);
+
+	assert(!(cptq->flags & IOCPT_Q_F_INITED));
+
+	iocpt_cryptoq_free(cptq);
+
+	cdev->data->queue_pairs[queue_id] = NULL;
+
+	return 0;
+}
+
+static int
+iocpt_op_queue_setup(struct rte_cryptodev *cdev, uint16_t queue_id,
+		const struct rte_cryptodev_qp_conf *qp_conf,
+		int socket_id)
+{
+	struct iocpt_dev *dev = cdev->data->dev_private;
+	int err;
+
+	if (cdev->data->queue_pairs[queue_id] != NULL)
+		iocpt_op_queue_release(cdev, queue_id);
+
+	if (qp_conf->nb_descriptors < (1 << IOCPT_QSIZE_MIN_LG2) ||
+	    qp_conf->nb_descriptors > (1 << IOCPT_QSIZE_MAX_LG2)) {
+		IOCPT_PRINT(ERR, "invalid nb_descriptors %u, use range %u..%u",
+			qp_conf->nb_descriptors,
+			1 << IOCPT_QSIZE_MIN_LG2, 1 << IOCPT_QSIZE_MAX_LG2);
+		return -ERANGE;
+	}
+
+	IOCPT_PRINT(DEBUG, "queue_id %u", queue_id);
+
+	err = iocpt_cryptoq_alloc(dev, socket_id, queue_id,
+				qp_conf->nb_descriptors);
+	if (err != 0)
+		return err;
+
+	cdev->data->queue_pairs[queue_id] = dev->cryptoqs[queue_id];
+
+	return 0;
+}
+
+static unsigned int
+iocpt_op_get_session_size(struct rte_cryptodev *cdev __rte_unused)
+{
+	return iocpt_session_size();
+}
+
+static inline int
+iocpt_is_algo_supported(struct rte_crypto_sym_xform *xform)
+{
+	if (xform->next != NULL) {
+		IOCPT_PRINT(ERR, "chaining not supported");
+		return -ENOTSUP;
+	}
+
+	if (xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
+		IOCPT_PRINT(ERR, "xform->type %d not supported", xform->type);
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+static __rte_always_inline int
+iocpt_fill_sess_aead(struct rte_crypto_sym_xform *xform,
+		struct iocpt_session_priv *priv)
+{
+	struct rte_crypto_aead_xform *aead_form = &xform->aead;
+
+	if (aead_form->algo != RTE_CRYPTO_AEAD_AES_GCM) {
+		IOCPT_PRINT(ERR, "Unknown algo");
+		return -EINVAL;
+	}
+	if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+		priv->op = IOCPT_DESC_OPCODE_GCM_AEAD_ENCRYPT;
+	} else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
+		priv->op = IOCPT_DESC_OPCODE_GCM_AEAD_DECRYPT;
+	} else {
+		IOCPT_PRINT(ERR, "Unknown cipher operations");
+		return -1;
+	}
+
+	if (aead_form->key.length < IOCPT_SESS_KEY_LEN_MIN ||
+	    aead_form->key.length > IOCPT_SESS_KEY_LEN_MAX_SYMM) {
+		IOCPT_PRINT(ERR, "Invalid cipher keylen %u",
+			aead_form->key.length);
+		return -1;
+	}
+	priv->key_len = aead_form->key.length;
+	memcpy(priv->key, aead_form->key.data, priv->key_len);
+
+	priv->type = IOCPT_SESS_AEAD_AES_GCM;
+	priv->iv_offset = aead_form->iv.offset;
+	priv->iv_length = aead_form->iv.length;
+	priv->digest_length = aead_form->digest_length;
+	priv->aad_length = aead_form->aad_length;
+
+	return 0;
+}
+
+static int
+iocpt_session_cfg(struct iocpt_dev *dev,
+		  struct rte_crypto_sym_xform *xform,
+		  struct rte_cryptodev_sym_session *sess)
+{
+	struct rte_crypto_sym_xform *chain;
+	struct iocpt_session_priv *priv = NULL;
+
+	if (iocpt_is_algo_supported(xform) < 0)
+		return -ENOTSUP;
+
+	if (unlikely(sess == NULL)) {
+		IOCPT_PRINT(ERR, "invalid session");
+		return -EINVAL;
+	}
+
+	priv = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
+	priv->dev = dev;
+
+	chain = xform;
+	while (chain) {
+		switch (chain->type) {
+		case RTE_CRYPTO_SYM_XFORM_AEAD:
+			if (iocpt_fill_sess_aead(chain, priv))
+				return -EIO;
+			break;
+		default:
+			IOCPT_PRINT(ERR, "invalid crypto xform type %d",
+				chain->type);
+			return -ENOTSUP;
+		}
+		chain = chain->next;
+	}
+
+	return iocpt_session_init(priv);
+}
+
+static int
+iocpt_op_session_cfg(struct rte_cryptodev *cdev,
+		struct rte_crypto_sym_xform *xform,
+		struct rte_cryptodev_sym_session *sess)
+{
+	struct iocpt_dev *dev = cdev->data->dev_private;
+
+	return iocpt_session_cfg(dev, xform, sess);
+}
+
+static void
+iocpt_session_clear(struct rte_cryptodev_sym_session *sess)
+{
+	iocpt_session_deinit(CRYPTODEV_GET_SYM_SESS_PRIV(sess));
+}
+
+static void
+iocpt_op_session_clear(struct rte_cryptodev *cdev __rte_unused,
+		struct rte_cryptodev_sym_session *sess)
+{
+	iocpt_session_clear(sess);
+}
+
+static inline void
+iocpt_fill_sge(struct iocpt_crypto_sg_elem *arr, uint8_t idx,
+	       uint64_t addr, uint16_t len)
+{
+	arr[idx].addr = rte_cpu_to_le_64(addr);
+	arr[idx].len = rte_cpu_to_le_16(len);
+}
+
+static __rte_always_inline int
+iocpt_enq_one_aead(struct iocpt_crypto_q *cptq,
+		   struct iocpt_session_priv *priv, struct rte_crypto_op *op)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct iocpt_queue *q = &cptq->q;
+	struct iocpt_crypto_desc *desc, *desc_base = q->base;
+	struct iocpt_crypto_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
+	struct iocpt_crypto_sg_elem *src, *dst;
+	rte_iova_t aad_addr, digest_addr, iv_addr, seg_addr;
+	uint32_t data_len, data_offset, seg_len;
+	uint8_t nsge_src = 0, nsge_dst = 0, flags = 0;
+	struct rte_mbuf *m;
+
+	desc = &desc_base[q->head_idx];
+	sg_desc = &sg_desc_base[q->head_idx];
+	src = sg_desc->src_elems;
+	dst = sg_desc->dst_elems;
+
+	/* Fill the first SGE with the IV / Nonce */
+	iv_addr = rte_crypto_op_ctophys_offset(op, priv->iv_offset);
+	iocpt_fill_sge(src, nsge_src++, iv_addr, priv->iv_length);
+
+	/* Fill the second SGE with the AAD, if applicable */
+	if (priv->aad_length > 0) {
+		aad_addr = sym_op->aead.aad.phys_addr;
+		iocpt_fill_sge(src, nsge_src++, aad_addr, priv->aad_length);
+		flags |= IOCPT_DESC_F_AAD_VALID;
+	}
+
+	m = sym_op->m_src;
+	data_len = sym_op->aead.data.length;
+
+	/* Fast-forward through mbuf chain to account for data offset */
+	data_offset = sym_op->aead.data.offset;
+	while (m != NULL && data_offset >= m->data_len) {
+		data_offset -= m->data_len;
+		m = m->next;
+	}
+
+	/* Fill the next SGEs with the payload segments */
+	while (m != NULL && data_len > 0) {
+		seg_addr = rte_mbuf_data_iova(m) + data_offset;
+		seg_len = RTE_MIN(m->data_len - data_offset, data_len);
+		data_offset = 0;
+		data_len -= seg_len;
+
+		/* Use -1 to save room for digest */
+		if (nsge_src >= IOCPT_CRYPTO_MAX_SG_ELEMS - 1)
+			return -ERANGE;
+
+		iocpt_fill_sge(src, nsge_src++, seg_addr, seg_len);
+
+		m = m->next;
+	}
+
+	/* AEAD AES-GCM: digest == authentication tag */
+	digest_addr = sym_op->aead.digest.phys_addr;
+	iocpt_fill_sge(src, nsge_src++, digest_addr, priv->digest_length);
+
+	/* Process Out-Of-Place destination SGL */
+	if (sym_op->m_dst != NULL) {
+		/* Put the AAD here, too */
+		if (priv->aad_length > 0)
+			iocpt_fill_sge(dst, nsge_dst++,
+				sym_op->aead.aad.phys_addr, priv->aad_length);
+
+		m = sym_op->m_dst;
+		data_len = sym_op->aead.data.length;
+
+		/* Fast-forward through chain to account for data offset */
+		data_offset = sym_op->aead.data.offset;
+		while (m != NULL && data_offset >= m->data_len) {
+			data_offset -= m->data_len;
+			m = m->next;
+		}
+
+		/* Fill in the SGEs with the payload segments */
+		while (m != NULL && data_len > 0) {
+			seg_addr = rte_mbuf_data_iova(m) + data_offset;
+			seg_len = RTE_MIN(m->data_len - data_offset, data_len);
+			data_offset = 0;
+			data_len -= seg_len;
+
+			if (nsge_dst >= IOCPT_CRYPTO_MAX_SG_ELEMS)
+				return -ERANGE;
+
+			iocpt_fill_sge(dst, nsge_dst++, seg_addr, seg_len);
+
+			m = m->next;
+		}
+	}
+
+	desc->opcode = priv->op;
+	desc->flags = flags;
+	desc->num_src_dst_sgs = iocpt_encode_nsge_src_dst(nsge_src, nsge_dst);
+	desc->session_tag = rte_cpu_to_le_32(priv->index);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	q->info[q->head_idx] = op;
+	q->head_idx = Q_NEXT_TO_POST(q, 1);
+
+	return 0;
+}
+
+static uint16_t
+iocpt_enqueue_sym(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct iocpt_crypto_q *cptq = qp;
+	struct rte_crypto_op *op;
+	struct iocpt_session_priv *priv;
+	struct rte_cryptodev_stats *stats = &cptq->stats;
+	uint16_t avail, count;
+	int err;
+
+	avail = iocpt_q_space_avail(&cptq->q);
+	if (unlikely(nb_ops > avail))
+		nb_ops = avail;
+
+	count = 0;
+	while (likely(count < nb_ops)) {
+		op = ops[count];
+
+		if (unlikely(op->sess_type != RTE_CRYPTO_OP_WITH_SESSION)) {
+			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			break;
+		}
+
+		priv = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
+		if (unlikely(priv == NULL)) {
+			op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+			break;
+		}
+
+		err = iocpt_enq_one_aead(cptq, priv, op);
+		if (unlikely(err != 0)) {
+			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			stats->enqueue_err_count++;
+			break;
+		}
+
+		count++;
+	}
+
+	if (likely(count > 0)) {
+		iocpt_q_flush(&cptq->q);
+
+		/* Restart timer if ops are being enqueued */
+		cptq->last_wdog_cycles = rte_get_timer_cycles();
+
+		stats->enqueued_count += count;
+	}
+
+	return count;
+}
+
+static void
+iocpt_enqueue_wdog(struct iocpt_crypto_q *cptq)
+{
+	struct iocpt_queue *q = &cptq->q;
+	struct iocpt_crypto_desc *desc, *desc_base = q->base;
+	struct iocpt_crypto_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
+	struct iocpt_crypto_sg_elem *src;
+	struct rte_crypto_op *wdog_op;
+	rte_iova_t iv_addr, pld_addr, tag_addr;
+	uint8_t nsge_src = 0;
+	uint16_t avail;
+
+	avail = iocpt_q_space_avail(&cptq->q);
+	if (avail < 1)
+		goto out_flush;
+
+	wdog_op = rte_zmalloc_socket("iocpt", sizeof(*wdog_op),
+				RTE_CACHE_LINE_SIZE, rte_socket_id());
+	if (wdog_op == NULL)
+		goto out_flush;
+
+	wdog_op->type = IOCPT_Q_WDOG_OP_TYPE;
+	wdog_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+	desc = &desc_base[q->head_idx];
+	sg_desc = &sg_desc_base[q->head_idx];
+	src = sg_desc->src_elems;
+
+	/* Fill the first SGE with the IV / Nonce */
+	iv_addr = rte_mem_virt2iova(cptq->wdog_iv);
+	iocpt_fill_sge(src, nsge_src++, iv_addr, IOCPT_Q_WDOG_IV_LEN);
+
+	/* Fill the second SGE with the payload segment */
+	pld_addr = rte_mem_virt2iova(cptq->wdog_pld);
+	iocpt_fill_sge(src, nsge_src++, pld_addr, IOCPT_Q_WDOG_PLD_LEN);
+
+	/* AEAD AES-GCM: digest == authentication tag */
+	tag_addr = rte_mem_virt2iova(cptq->wdog_tag);
+	iocpt_fill_sge(src, nsge_src++, tag_addr, IOCPT_Q_WDOG_TAG_LEN);
+
+	desc->opcode = IOCPT_DESC_OPCODE_GCM_AEAD_ENCRYPT;
+	desc->flags = 0;
+	desc->num_src_dst_sgs = iocpt_encode_nsge_src_dst(nsge_src, 0);
+	desc->session_tag = rte_cpu_to_le_32(IOCPT_Q_WDOG_SESS_IDX);
+
+	q->info[q->head_idx] = wdog_op;
+	q->head_idx = Q_NEXT_TO_POST(q, 1);
+
+	IOCPT_PRINT(DEBUG, "Queue %u wdog enq %p ops %"PRIu64,
+		q->index, wdog_op, cptq->stats.enqueued_count);
+	cptq->enqueued_wdogs++;
+
+out_flush:
+	iocpt_q_flush(q);
+}
+
+static uint16_t
+iocpt_dequeue_sym(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct iocpt_crypto_q *cptq = qp;
+	struct iocpt_queue *q = &cptq->q;
+	struct iocpt_cq *cq = &cptq->cq;
+	struct rte_crypto_op *op;
+	struct iocpt_crypto_comp *cq_desc_base = cq->base;
+	volatile struct iocpt_crypto_comp *cq_desc;
+	struct rte_cryptodev_stats *stats = &cptq->stats;
+	uint64_t then, now, hz, delta;
+	uint16_t count = 0;
+
+	cq_desc = &cq_desc_base[cq->tail_idx];
+
+	/* First walk the CQ to update any completed op's status
+	 * NB: These can arrive out of order!
+	 */
+	while ((cq_desc->color & 0x1) == cq->done_color) {
+		cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
+		if (unlikely(cq->tail_idx == 0))
+			cq->done_color = !cq->done_color;
+
+		op = q->info[rte_le_to_cpu_16(cq_desc->comp_index)];
+
+		/* Process returned CQ descriptor status */
+		if (unlikely(cq_desc->status)) {
+			switch (cq_desc->status) {
+			case IOCPT_COMP_SYMM_AUTH_VERIFY_ERROR:
+				op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+				break;
+			case IOCPT_COMP_INVAL_OPCODE_ERROR:
+			case IOCPT_COMP_UNSUPP_OPCODE_ERROR:
+			case IOCPT_COMP_SYMM_SRC_SG_ERROR:
+			case IOCPT_COMP_SYMM_DST_SG_ERROR:
+			case IOCPT_COMP_SYMM_SRC_DST_LEN_MISMATCH:
+			case IOCPT_COMP_SYMM_KEY_IDX_ERROR:
+				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+				break;
+			default:
+				op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+				break;
+			}
+		} else
+			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+		cq_desc = &cq_desc_base[cq->tail_idx];
+	}
+
+	/* Next walk the SQ to pop off completed ops in-order */
+	while (count < nb_ops) {
+		op = q->info[q->tail_idx];
+
+		/* No more completions */
+		if (op == NULL ||
+		    op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+			break;
+
+		/* Handle watchdog operations */
+		if (unlikely(op->type == IOCPT_Q_WDOG_OP_TYPE)) {
+			IOCPT_PRINT(DEBUG, "Queue %u wdog deq %p st %d",
+				q->index, op, op->status);
+			q->info[q->tail_idx] = NULL;
+			q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
+			cptq->dequeued_wdogs++;
+			rte_free(op);
+			continue;
+		}
+
+		if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS))
+			stats->dequeue_err_count++;
+
+		ops[count] = op;
+		q->info[q->tail_idx] = NULL;
+
+		q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
+		count++;
+	}
+
+	if (!count) {
+		/*
+		 * Ring the doorbell again if no work was dequeued and work
+		 * is still pending after the deadline.
+		 */
+		if (q->head_idx != q->tail_idx) {
+			then = cptq->last_wdog_cycles;
+			now = rte_get_timer_cycles();
+			hz = rte_get_timer_hz();
+			delta = (now - then) * 1000;
+
+			if (delta >= hz * IONIC_Q_WDOG_MS) {
+				iocpt_enqueue_wdog(cptq);
+				cptq->last_wdog_cycles = now;
+			}
+		}
+	} else
+		/* Restart timer if the queue is making progress */
+		cptq->last_wdog_cycles = rte_get_timer_cycles();
+
+	stats->dequeued_count += count;
+
+	return count;
+}
+
+static struct rte_cryptodev_ops iocpt_ops = {
+	.dev_configure = iocpt_op_config,
+	.dev_start = iocpt_op_start,
+	.dev_stop = iocpt_op_stop,
+	.dev_close = iocpt_op_close,
+	.dev_infos_get = iocpt_op_info_get,
+
+	.stats_get = iocpt_op_stats_get,
+	.stats_reset = iocpt_op_stats_reset,
+	.queue_pair_setup = iocpt_op_queue_setup,
+	.queue_pair_release = iocpt_op_queue_release,
+
+	.sym_session_get_size = iocpt_op_get_session_size,
+	.sym_session_configure = iocpt_op_session_cfg,
+	.sym_session_clear = iocpt_op_session_clear,
+};
+
+int
+iocpt_assign_ops(struct rte_cryptodev *cdev)
+{
+	struct iocpt_dev *dev = cdev->data->dev_private;
+
+	cdev->dev_ops = &iocpt_ops;
+	cdev->feature_flags = dev->features;
+
+	if (dev->features & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
+		cdev->enqueue_burst = iocpt_enqueue_sym;
+		cdev->dequeue_burst = iocpt_dequeue_sym;
+	}
+
+	return 0;
+}
diff --git a/drivers/crypto/ionic/meson.build b/drivers/crypto/ionic/meson.build
index a6e0a1d415..b63428fa9b 100644
--- a/drivers/crypto/ionic/meson.build
+++ b/drivers/crypto/ionic/meson.build
@@ -5,8 +5,10 @@  deps += ['bus_vdev']
 deps += ['common_ionic']
 
 sources = files(
+        'ionic_crypto_caps.c',
         'ionic_crypto_cmds.c',
         'ionic_crypto_main.c',
+        'ionic_crypto_ops.c',
         'ionic_crypto_vdev.c',
 )
 name = 'ionic_crypto'