From patchwork Wed Sep 11 07:52:00 2024
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Hanxiao Li
X-Patchwork-Id: 143938
X-Patchwork-Delegate: gakhil@marvell.com
Return-Path:
X-Original-To: patchwork@inbox.dpdk.org
Delivered-To: patchwork@inbox.dpdk.org
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
by inbox.dpdk.org (Postfix) with ESMTP id 90AEF4595D;
Wed, 11 Sep 2024 09:53:21 +0200 (CEST)
Received: from mails.dpdk.org (localhost [127.0.0.1])
by mails.dpdk.org (Postfix) with ESMTP id 290C5402D1;
Wed, 11 Sep 2024 09:53:21 +0200 (CEST)
Received: from mxhk.zte.com.cn (mxhk.zte.com.cn [63.216.63.35])
by mails.dpdk.org (Postfix) with ESMTP id 774AD402CE
for ; Wed, 11 Sep 2024 09:53:19 +0200 (CEST)
Received: from mse-fl1.zte.com.cn (unknown [10.5.228.132])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by mxhk.zte.com.cn (FangMail) with ESMTPS id 4X3Xqv4tVTz5B1Fj
for ; Wed, 11 Sep 2024 15:53:15 +0800 (CST)
Received: from szxlzmapp03.zte.com.cn ([10.5.231.207])
by mse-fl1.zte.com.cn with SMTP id 48B7qxbO069031
for ; Wed, 11 Sep 2024 15:52:59 +0800 (+08)
(envelope-from li.hanxiao@zte.com.cn)
Received: from localhost.localdomain (unknown [192.168.6.15])
by smtp (Zmail) with SMTP; Wed, 11 Sep 2024 15:53:02 +0800
X-Zmail-TransId: 3e8166e14c54000-08c6f
From: Hanxiao Li
To: dev@dpdk.org
Cc: wang.yong19@zte.com.cn, Hanxiao Li
Subject: [PATCH v6 1/8] zsda: Introduce zsda device drivers
Date: Wed, 11 Sep 2024 15:52:00 +0800
Message-ID: <20240911075200.4074366-1-li.hanxiao@zte.com.cn>
X-Mailer: git-send-email 2.43.0
In-Reply-To: <20240910091543.3854422-1-li.hanxiao@zte.com.cn>
References: <20240910091543.3854422-1-li.hanxiao@zte.com.cn>
MIME-Version: 1.0
X-MAIL: mse-fl1.zte.com.cn 48B7qxbO069031
X-Fangmail-Anti-Spam-Filtered: true
X-Fangmail-MID-QID: 66E14C6B.001/4X3Xqv4tVTz5B1Fj
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: dev-bounces@dpdk.org
Introduce driver support for ZSDA which can
help to accelerate storage data process.
v6: modify code for ci compile
Signed-off-by: Hanxiao Li
---
MAINTAINERS | 4 +
config/rte_config.h | 4 +
drivers/common/zsda/meson.build | 25 +++
drivers/common/zsda/zsda_common.c | 168 +++++++++++++++
drivers/common/zsda/zsda_common.h | 328 ++++++++++++++++++++++++++++++
drivers/common/zsda/zsda_logs.c | 21 ++
drivers/common/zsda/zsda_logs.h | 32 +++
usertools/dpdk-devbind.py | 5 +-
8 files changed, 586 insertions(+), 1 deletion(-)
create mode 100644 drivers/common/zsda/meson.build
create mode 100644 drivers/common/zsda/zsda_common.c
create mode 100644 drivers/common/zsda/zsda_common.h
create mode 100644 drivers/common/zsda/zsda_logs.c
create mode 100644 drivers/common/zsda/zsda_logs.h
--
2.27.0
diff --git a/MAINTAINERS b/MAINTAINERS
index c5a703b5c0..ea245fc61b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1268,6 +1268,10 @@ F: drivers/compress/zlib/
F: doc/guides/compressdevs/zlib.rst
F: doc/guides/compressdevs/features/zlib.ini
+ZTE Storage Data Accelerator
+M: Hanxiao Li
+F: drivers/compress/zsda/
+F: drivers/common/zsda/
DMAdev Drivers
--------------
diff --git a/config/rte_config.h b/config/rte_config.h
index dd7bb0d35b..acfbe5b0f7 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -108,6 +108,10 @@
/****** driver defines ********/
+/* ZSDA device */
+/* Max. number of ZSDA devices which can be attached */
+#define RTE_PMD_ZSDA_MAX_PCI_DEVICES 256
+
/* Packet prefetching in PMDs */
#define RTE_PMD_PACKET_PREFETCH 1
diff --git a/drivers/common/zsda/meson.build b/drivers/common/zsda/meson.build
new file mode 100644
index 0000000000..b12ef17476
--- /dev/null
+++ b/drivers/common/zsda/meson.build
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2024 ZTE Corporation
+
+config_flag_fmt = 'RTE_LIBRTE_@0@_COMMON'
+
+deps += ['bus_pci', 'compressdev']
+sources += files(
+ 'zsda_common.c',
+ 'zsda_logs.c',
+ 'zsda_device.c',
+ 'zsda_qp.c',
+ )
+
+zsda_compress = true
+zsda_compress_path = 'compress/zsda'
+zsda_compress_relpath = '../../' + zsda_compress_path
+includes += include_directories(zsda_compress_relpath)
+
+if zsda_compress
+zlib = dependency('zlib', required: false, method: 'pkg-config')
+ foreach f: ['zsda_comp_pmd.c', 'zsda_comp.c']
+ sources += files(join_paths(zsda_compress_relpath, f))
+ endforeach
+ ext_deps += zlib
+endif
diff --git a/drivers/common/zsda/zsda_common.c b/drivers/common/zsda/zsda_common.c
new file mode 100644
index 0000000000..33ea8a42fe
--- /dev/null
+++ b/drivers/common/zsda/zsda_common.c
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include "zsda_common.h"
+
+#include "bus_pci_driver.h"
+
+#define MAGIC_SEND 0xab
+#define MAGIC_RECV 0xcd
+#define ADMIN_VER 1
+
+static const uint8_t crc8_table[256] = {
+ 0x00, 0x41, 0x13, 0x52, 0x26, 0x67, 0x35, 0x74, 0x4c, 0x0d, 0x5f, 0x1e,
+ 0x6a, 0x2b, 0x79, 0x38, 0x09, 0x48, 0x1a, 0x5b, 0x2f, 0x6e, 0x3c, 0x7d,
+ 0x45, 0x04, 0x56, 0x17, 0x63, 0x22, 0x70, 0x31, 0x12, 0x53, 0x01, 0x40,
+ 0x34, 0x75, 0x27, 0x66, 0x5e, 0x1f, 0x4d, 0x0c, 0x78, 0x39, 0x6b, 0x2a,
+ 0x1b, 0x5a, 0x08, 0x49, 0x3d, 0x7c, 0x2e, 0x6f, 0x57, 0x16, 0x44, 0x05,
+ 0x71, 0x30, 0x62, 0x23, 0x24, 0x65, 0x37, 0x76, 0x02, 0x43, 0x11, 0x50,
+ 0x68, 0x29, 0x7b, 0x3a, 0x4e, 0x0f, 0x5d, 0x1c, 0x2d, 0x6c, 0x3e, 0x7f,
+ 0x0b, 0x4a, 0x18, 0x59, 0x61, 0x20, 0x72, 0x33, 0x47, 0x06, 0x54, 0x15,
+ 0x36, 0x77, 0x25, 0x64, 0x10, 0x51, 0x03, 0x42, 0x7a, 0x3b, 0x69, 0x28,
+ 0x5c, 0x1d, 0x4f, 0x0e, 0x3f, 0x7e, 0x2c, 0x6d, 0x19, 0x58, 0x0a, 0x4b,
+ 0x73, 0x32, 0x60, 0x21, 0x55, 0x14, 0x46, 0x07, 0x48, 0x09, 0x5b, 0x1a,
+ 0x6e, 0x2f, 0x7d, 0x3c, 0x04, 0x45, 0x17, 0x56, 0x22, 0x63, 0x31, 0x70,
+ 0x41, 0x00, 0x52, 0x13, 0x67, 0x26, 0x74, 0x35, 0x0d, 0x4c, 0x1e, 0x5f,
+ 0x2b, 0x6a, 0x38, 0x79, 0x5a, 0x1b, 0x49, 0x08, 0x7c, 0x3d, 0x6f, 0x2e,
+ 0x16, 0x57, 0x05, 0x44, 0x30, 0x71, 0x23, 0x62, 0x53, 0x12, 0x40, 0x01,
+ 0x75, 0x34, 0x66, 0x27, 0x1f, 0x5e, 0x0c, 0x4d, 0x39, 0x78, 0x2a, 0x6b,
+ 0x6c, 0x2d, 0x7f, 0x3e, 0x4a, 0x0b, 0x59, 0x18, 0x20, 0x61, 0x33, 0x72,
+ 0x06, 0x47, 0x15, 0x54, 0x65, 0x24, 0x76, 0x37, 0x43, 0x02, 0x50, 0x11,
+ 0x29, 0x68, 0x3a, 0x7b, 0x0f, 0x4e, 0x1c, 0x5d, 0x7e, 0x3f, 0x6d, 0x2c,
+ 0x58, 0x19, 0x4b, 0x0a, 0x32, 0x73, 0x21, 0x60, 0x14, 0x55, 0x07, 0x46,
+ 0x77, 0x36, 0x64, 0x25, 0x51, 0x10, 0x42, 0x03, 0x3b, 0x7a, 0x28, 0x69,
+ 0x1d, 0x5c, 0x0e, 0x4f};
+
+static uint8_t
+zsda_crc8(const uint8_t *message, const int length)
+{
+ uint8_t crc = 0;
+ int i;
+
+ for (i = 0; i < length; i++)
+ crc = crc8_table[crc ^ message[i]];
+ return crc;
+}
+
+uint32_t
+zsda_set_reg_8(void *addr, const uint8_t val0, const uint8_t val1,
+ const uint8_t val2, const uint8_t val3)
+{
+ uint8_t val[4];
+ val[0] = val0;
+ val[1] = val1;
+ val[2] = val2;
+ val[3] = val3;
+ ZSDA_CSR_WRITE32(addr, *(uint32_t *)val);
+ return *(uint32_t *)val;
+}
+
+uint8_t
+zsda_get_reg_8(void *addr, const int offset)
+{
+ uint32_t val = ZSDA_CSR_READ32(addr);
+
+ return *(((uint8_t *)&val) + offset);
+}
+
+int
+zsda_admin_msg_init(const struct rte_pci_device *pci_dev)
+{
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+
+ zsda_set_reg_8(mmio_base + ZSDA_ADMIN_WQ_BASE7, 0, 0, MAGIC_RECV, 0);
+ zsda_set_reg_8(mmio_base + ZSDA_ADMIN_CQ_BASE7, 0, 0, MAGIC_RECV, 0);
+ return 0;
+}
+
+int
+zsda_send_admin_msg(const struct rte_pci_device *pci_dev, void *req,
+ const uint32_t len)
+{
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+ uint8_t wq_flag;
+ uint8_t crc;
+ uint16_t admin_db;
+ uint32_t retry = ZSDA_TIME_NUM;
+ int i;
+ uint16_t db;
+ int repeat = sizeof(struct zsda_admin_req) / sizeof(uint32_t);
+
+ if (len > ADMIN_BUF_DATA_LEN)
+ return -EINVAL;
+
+ for (i = 0; i < repeat; i++) {
+ ZSDA_CSR_WRITE32(((uint32_t *)(mmio_base + ZSDA_ADMIN_WQ) + i),
+ *((uint32_t *)req + i));
+ }
+
+ crc = zsda_crc8((uint8_t *)req, ADMIN_BUF_DATA_LEN);
+ zsda_set_reg_8(mmio_base + ZSDA_ADMIN_WQ_BASE7, crc, ADMIN_VER, MAGIC_SEND, 0);
+ rte_delay_us_sleep(ZSDA_TIME_SLEEP_US);
+ rte_wmb();
+
+ admin_db = ZSDA_CSR_READ32(mmio_base + ZSDA_ADMIN_WQ_TAIL);
+ db = zsda_modulo_32(admin_db, 0x1ff);
+ ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_WQ_TAIL, db);
+
+ do {
+ rte_delay_us_sleep(ZSDA_TIME_SLEEP_US);
+ wq_flag = zsda_get_reg_8(mmio_base + ZSDA_ADMIN_WQ_BASE7, 2);
+ if (wq_flag == MAGIC_RECV)
+ break;
+
+ retry--;
+ if (!retry) {
+ ZSDA_LOG(ERR, "wq_flag 0x%X\n", wq_flag);
+ zsda_set_reg_8(mmio_base + ZSDA_ADMIN_WQ_BASE7, 0, crc,
+ ADMIN_VER, 0);
+ return -EIO;
+ }
+ } while (1);
+
+ return ZSDA_SUCCESS;
+}
+
+int
+zsda_recv_admin_msg(const struct rte_pci_device *pci_dev, void *resp,
+ const uint32_t len)
+{
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+ uint8_t cq_flag;
+ uint32_t retry = ZSDA_TIME_NUM;
+ uint8_t crc;
+ uint8_t buf[ADMIN_BUF_TOTAL_LEN] = {0};
+ uint32_t i;
+
+ if (len > ADMIN_BUF_DATA_LEN)
+ return -EINVAL;
+
+ do {
+ rte_delay_us_sleep(ZSDA_TIME_SLEEP_US);
+
+ cq_flag = zsda_get_reg_8(mmio_base + ZSDA_ADMIN_CQ_BASE7, 2);
+ if (cq_flag == MAGIC_SEND)
+ break;
+
+ retry--;
+ if (!retry)
+ return -EIO;
+ } while (1);
+
+ for (i = 0; i < len; i++)
+ buf[i] = ZSDA_CSR_READ8(
+ (uint8_t *)(mmio_base + ZSDA_ADMIN_CQ + i));
+
+ crc = ZSDA_CSR_READ8(mmio_base + ZSDA_ADMIN_CQ_CRC);
+ rte_rmb();
+ ZSDA_CSR_WRITE8(mmio_base + ZSDA_ADMIN_CQ_FLAG, MAGIC_RECV);
+ if (crc != zsda_crc8(buf, ADMIN_BUF_DATA_LEN)) {
+ ZSDA_LOG(ERR, "[%d] Failed! crc error!", __LINE__);
+ return -EIO;
+ }
+
+ memcpy(resp, buf, len);
+
+ return ZSDA_SUCCESS;
+}
diff --git a/drivers/common/zsda/zsda_common.h b/drivers/common/zsda/zsda_common.h
new file mode 100644
index 0000000000..0dbc9b7d3c
--- /dev/null
+++ b/drivers/common/zsda/zsda_common.h
@@ -0,0 +1,328 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_COMMON_H_
+#define _ZSDA_COMMON_H_
+
+#include
+
+#include
+#include
+#include
+
+#include "zsda_logs.h"
+
+#define ZSDA_DEV_NAME_MAX_LEN 64
+#define MAX_QPS_ON_FUNCTION 128
+
+#define ADMIN_WQ_BASE_ADDR_0 0x40
+#define ADMIN_WQ_BASE_ADDR_1 0x44
+#define ADMIN_WQ_BASE_ADDR_2 0x48
+#define ADMIN_WQ_BASE_ADDR_3 0x4C
+#define ADMIN_WQ_BASE_ADDR_4 0x50
+#define ADMIN_WQ_BASE_ADDR_5 0x54
+#define ADMIN_WQ_BASE_ADDR_6 0x58
+#define ADMIN_WQ_BASE_ADDR_7 0x5C
+
+#define ADMIN_CQ_BASE_ADDR_0 0x60
+#define ADMIN_CQ_BASE_ADDR_1 0x64
+#define ADMIN_CQ_BASE_ADDR_2 0x68
+#define ADMIN_CQ_BASE_ADDR_3 0x6C
+#define ADMIN_CQ_BASE_ADDR_4 0x70
+#define ADMIN_CQ_BASE_ADDR_5 0x74
+#define ADMIN_CQ_BASE_ADDR_6 0x78
+#define ADMIN_CQ_BASE_ADDR_7 0x7C
+
+#define IO_DB_INITIAL_CONFIG 0x1C00
+
+#define ADMIN_BUF_DATA_LEN 0x1C
+#define ADMIN_BUF_TOTAL_LEN 0x20
+
+#define ZSDA_CSR_VERSION 0x0
+#define ZSDA_ADMIN_WQ 0x40
+#define ZSDA_ADMIN_WQ_BASE7 0x5C
+#define ZSDA_ADMIN_WQ_CRC 0x5C
+#define ZSDA_ADMIN_WQ_VERSION 0x5D
+#define ZSDA_ADMIN_WQ_FLAG 0x5E
+#define ZSDA_ADMIN_CQ 0x60
+#define ZSDA_ADMIN_CQ_BASE7 0x7C
+#define ZSDA_ADMIN_CQ_CRC 0x7C
+#define ZSDA_ADMIN_CQ_VERSION 0x7D
+#define ZSDA_ADMIN_CQ_FLAG 0x7E
+
+#define ZSDA_ADMIN_WQ_TAIL 0x80
+#define ZSDA_ADMIN_CQ_HEAD 0x84
+
+#define ZSDA_ADMIN_Q_START 0x100
+#define ZSDA_ADMIN_Q_STOP 0x100
+#define ZSDA_ADMIN_Q_STOP_RESP 0x104
+#define ZSDA_ADMIN_Q_CLR 0x108
+#define ZSDA_ADMIN_Q_CLR_RESP 0x10C
+
+#define ZSDA_IO_Q_START 0x200
+#define ZSDA_IO_Q_STOP 0x200
+#define ZSDA_IO_Q_STOP_RESP 0x400
+#define ZSDA_IO_Q_CLR 0x600
+#define ZSDA_IO_Q_CLR_RESP 0x800
+
+#define ZSDA_CSR_READ32(addr) rte_read32((addr))
+#define ZSDA_CSR_WRITE32(addr, value) rte_write32((value), (addr))
+#define ZSDA_CSR_READ16(addr) rte_read16((addr))
+#define ZSDA_CSR_WRITE16(addr, value) rte_write16((value), (addr))
+#define ZSDA_CSR_READ8(addr) rte_read8((addr))
+#define ZSDA_CSR_WRITE8(addr, value) rte_write8_relaxed((value), (addr))
+
+#define ZSDA_PCI_NAME zsda
+#define ZSDA_SGL_MAX_NUMBER 512
+#define ZSDA_SGL_FRAGMENT_SIZE 32
+#define NB_DES 512
+
+#define ZSDA_SUCCESS EXIT_SUCCESS
+#define ZSDA_FAILED (-1)
+
+#define E_NULL "Failed! Addr is NULL"
+#define E_CREATE "Failed! Create"
+#define E_FUNC "Failed! Function"
+#define E_START_Q "Failed! START q"
+#define E_MALLOC "Failed! malloc"
+#define E_FREE "Failed! free"
+
+#define E_COMPARE "Failed! compare"
+#define E_START "Failed! start/setup"
+#define E_CLOSE "Failed! stop/close"
+#define E_CONFIG "Failed! config"
+#define E_RESULT "Failed! result wrong"
+
+enum zsda_service_type {
+ ZSDA_SERVICE_COMPRESSION = 0,
+ ZSDA_SERVICE_DECOMPRESSION,
+ ZSDA_SERVICE_INVALID,
+};
+
+#define ZSDA_MAX_SERVICES (ZSDA_SERVICE_INVALID)
+
+#define ZSDA_OPC_COMP_GZIP 0x10 /* Encomp deflate-Gzip */
+#define ZSDA_OPC_COMP_ZLIB 0x11 /* Encomp deflate-Zlib */
+#define ZSDA_OPC_DECOMP_GZIP 0x18 /* Decompinfalte-Gzip */
+#define ZSDA_OPC_DECOMP_ZLIB 0x19 /* Decompinfalte-Zlib */
+#define ZSDA_OPC_INVALID 0xff
+
+#define SET_CYCLE 0xff
+#define SET_HEAD_INTI 0x0
+
+#define ZSDA_Q_START 0x1
+#define ZSDA_Q_STOP 0x0
+#define ZSDA_CLEAR_VALID 0x1
+#define ZSDA_CLEAR_INVALID 0x0
+#define ZSDA_RESP_VALID 0x1
+#define ZSDA_RESP_INVALID 0x0
+
+#define ZSDA_TIME_SLEEP_US 100
+#define ZSDA_TIME_NUM 500
+
+#define ZSDA_MAX_DESC 512
+#define ZSDA_MAX_CYCLE 256
+#define ZSDA_MAX_DEV 256
+#define MAX_NUM_OPS 0x1FF
+
+struct zsda_pci_device;
+
+enum sgl_element_type_wqe {
+ SGL_ELM_TYPE_PHYS_ADDR = 1,
+ SGL_ELM_TYPE_LIST,
+ SGL_ELM_TYPE_LIST_ADDR,
+ SGL_ELM_TYPE_LIST_SGL32,
+};
+
+enum sgl_element_type {
+ SGL_TYPE_PHYS_ADDR = 0,
+ SGL_TYPE_LAST_PHYS_ADDR,
+ SGL_TYPE_NEXT_LIST,
+ SGL_TYPE_EC_LEVEL1_SGL32,
+};
+
+enum zsda_admin_msg_id {
+ /* Version information */
+ ZSDA_ADMIN_VERSION_REQ = 0,
+ ZSDA_ADMIN_VERSION_RESP,
+ /* algo type */
+ ZSDA_ADMIN_QUEUE_CFG_REQ,
+ ZSDA_ADMIN_QUEUE_CFG_RESP,
+ /* get cycle */
+ ZSDA_ADMIN_QUEUE_CYCLE_REQ,
+ ZSDA_ADMIN_QUEUE_CYCLE_RESP,
+ /* set cyclr */
+ ZSDA_ADMIN_SET_CYCLE_REQ,
+ ZSDA_ADMIN_SET_CYCLE_RESP,
+
+ ZSDA_MIG_STATE_WARNING,
+ ZSDA_ADMIN_RESERVE,
+ /* set close flr register */
+ ZSDA_FLR_SET_FUNCTION,
+ ZSDA_ADMIN_MSG_VALID,
+ ZSDA_ADMIN_INT_TEST
+};
+
+struct zsda_admin_req {
+ uint16_t msg_type;
+ uint8_t data[26];
+};
+
+struct zsda_admin_resp {
+ uint16_t msg_type;
+ uint8_t data[26];
+};
+
+struct zsda_test_msg {
+ uint32_t msg_type;
+ uint32_t data_in;
+ uint8_t data[20];
+};
+
+struct zsda_admin_req_qcfg {
+ uint16_t msg_type;
+ uint8_t qid;
+ uint8_t data[25];
+};
+
+#pragma pack(1)
+struct qinfo {
+ uint16_t q_type;
+ uint16_t wq_tail;
+ uint16_t wq_head;
+ uint16_t cq_tail;
+ uint16_t cq_head;
+ uint16_t cycle;
+};
+
+struct zsda_admin_resp_qcfg {
+ uint16_t msg_type;
+ struct qinfo qcfg;
+ uint8_t data[14];
+};
+#pragma pack()
+
+enum flr_clr_mask {
+ unmask = 0,
+ mask,
+};
+
+/**< Common struct for scatter-gather list operations */
+struct zsda_buf {
+ uint64_t addr;
+ uint32_t len;
+ uint8_t resrvd[3];
+ uint8_t type;
+} __rte_packed;
+
+struct __rte_cache_aligned zsda_sgl {
+ struct zsda_buf buffers[ZSDA_SGL_MAX_NUMBER];
+};
+
+/* The space length. The space is used for compression header and tail */
+#define COMP_REMOVE_SPACE_LEN 16
+
+struct zsda_op_cookie {
+ bool used;
+ bool decomp_no_tail;
+ void *op;
+ uint16_t sid;
+ struct zsda_sgl sgl_src;
+ struct zsda_sgl sgl_dst;
+ phys_addr_t sgl_src_phys_addr;
+ phys_addr_t sgl_dst_phys_addr;
+ phys_addr_t comp_head_phys_addr;
+
+ uint8_t comp_head[COMP_REMOVE_SPACE_LEN];
+} __rte_packed;
+
+struct compress_cfg {
+} __rte_packed;
+
+struct zsda_wqe_comp {
+ uint8_t valid;
+ uint8_t op_code;
+ uint16_t sid;
+ uint8_t resv[3];
+ uint8_t rx_sgl_type : 4;
+ uint8_t tx_sgl_type : 4;
+ uint64_t rx_addr;
+ uint32_t rx_length;
+ uint64_t tx_addr;
+ uint32_t tx_length;
+ struct compress_cfg cfg;
+} __rte_packed;
+
+struct zsda_cqe {
+ uint8_t valid; /* cqe_cycle */
+ uint8_t op_code;
+ uint16_t sid;
+ uint8_t state;
+ uint8_t result;
+ uint16_t zsda_wq_id;
+ uint32_t tx_real_length;
+ uint16_t err0;
+ uint16_t err1;
+} __rte_packed;
+
+struct zsda_common_stat {
+ /**< Count of all operations enqueued */
+ uint64_t enqueued_count;
+ /**< Count of all operations dequeued */
+ uint64_t dequeued_count;
+
+ /**< Total error count on operations enqueued */
+ uint64_t enqueue_err_count;
+ /**< Total error count on operations dequeued */
+ uint64_t dequeue_err_count;
+};
+
+enum zsda_algo_core {
+ ZSDA_CORE_COMP,
+ ZSDA_CORE_DECOMP,
+ ZSDA_CORE_INVALID,
+};
+
+static inline uint32_t
+zsda_modulo_32(uint32_t data, uint32_t modulo_mask)
+{
+ return (data) & (modulo_mask);
+}
+static inline uint16_t
+zsda_modulo_16(uint16_t data, uint16_t modulo_mask)
+{
+ return (data) & (modulo_mask);
+}
+static inline uint8_t
+zsda_modulo_8(uint8_t data, uint8_t modulo_mask)
+{
+ return (data) & (modulo_mask);
+}
+
+#define CQE_VALID(value) (value & 0x8000)
+#define CQE_ERR0(value) (value & 0xffff)
+#define CQE_ERR1(value) (value & 0x7fff)
+
+/* For situations where err0 are reported but the results are correct */
+#define DECOMP_RIGHT_ERR0_0 0xc710
+#define DECOMP_RIGHT_ERR0_1 0xc727
+#define DECOMP_RIGHT_ERR0_2 0xc729
+#define CQE_ERR0_RIGHT(value) \
+ (value == DECOMP_RIGHT_ERR0_0 || value == DECOMP_RIGHT_ERR0_1 || \
+ value == DECOMP_RIGHT_ERR0_2)
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+uint32_t zsda_set_reg_8(void *addr, const uint8_t val0, const uint8_t val1,
+ const uint8_t val2, const uint8_t val3);
+uint8_t zsda_get_reg_8(void *addr, const int offset);
+
+int zsda_admin_msg_init(const struct rte_pci_device *pci_dev);
+int zsda_send_admin_msg(const struct rte_pci_device *pci_dev, void *req,
+ const uint32_t len);
+
+int zsda_recv_admin_msg(const struct rte_pci_device *pci_dev, void *resp,
+ const uint32_t len);
+
+#endif /* _ZSDA_COMMON_H_ */
diff --git a/drivers/common/zsda/zsda_logs.c b/drivers/common/zsda/zsda_logs.c
new file mode 100644
index 0000000000..b8d502a7f0
--- /dev/null
+++ b/drivers/common/zsda/zsda_logs.c
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include
+#include
+
+#include "zsda_logs.h"
+
+int
+zsda_hexdump_log(uint32_t level, uint32_t logtype, const char *title,
+ const void *buf, unsigned int len)
+{
+ if (rte_log_can_log(logtype, level))
+ rte_hexdump(rte_log_get_stream(), title, buf, len);
+
+ return 0;
+}
+
+RTE_LOG_REGISTER_DEFAULT(zsda_gen_logtype, NOTICE);
+RTE_LOG_REGISTER_DEFAULT(zsda_dp_logtype, NOTICE);
diff --git a/drivers/common/zsda/zsda_logs.h b/drivers/common/zsda/zsda_logs.h
new file mode 100644
index 0000000000..f6b27e2f2d
--- /dev/null
+++ b/drivers/common/zsda/zsda_logs.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_LOGS_H_
+#define _ZSDA_LOGS_H_
+
+extern int zsda_gen_logtype;
+extern int zsda_dp_logtype;
+
+#define ZSDA_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_##level, (zsda_gen_logtype & 0xff), \
+ "%s(): [%d] " fmt "\n", __func__, __LINE__, ##args)
+
+#define ZSDA_DP_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_##level, zsda_dp_logtype, "%s(): " fmt "\n", __func__, \
+ ##args)
+
+#define ZSDA_DP_HEXDUMP_LOG(level, title, buf, len) \
+ zsda_hexdump_log(RTE_LOG_##level, zsda_dp_logtype, title, buf, len)
+
+/**
+ * zsda_hexdump_log - Dump out memory in a special hex dump format.
+ *
+ * Dump out the message buffer in a special hex dump output format with
+ * characters printed for each line of 16 hex values. The message will be sent
+ * to the stream used by the rte_log infrastructure.
+ */
+int zsda_hexdump_log(uint32_t level, uint32_t logtype, const char *title,
+ const void *buf, unsigned int len);
+
+#endif /* _ZSDA_LOGS_H_ */
diff --git a/usertools/dpdk-devbind.py b/usertools/dpdk-devbind.py
index b276e8efc8..9b8b015fbb 100755
--- a/usertools/dpdk-devbind.py
+++ b/usertools/dpdk-devbind.py
@@ -80,6 +80,9 @@
cnxk_ml = {'Class': '08', 'Vendor': '177d', 'Device': 'a092',
'SVendor': None, 'SDevice': None}
+zte_zsda = {'Class': '12', 'Vendor': '1cf2', 'Device': '8050,8051',
+ 'SVendor': None, 'SDevice': None}
+
network_devices = [network_class, cavium_pkx, avp_vnic, ifpga_class]
baseband_devices = [acceleration_class]
crypto_devices = [encryption_class, intel_processor_class]
@@ -93,7 +96,7 @@
ml_devices = [cnxk_ml]
misc_devices = [cnxk_bphy, cnxk_bphy_cgx, cnxk_inl_dev,
intel_ntb_skx, intel_ntb_icx,
- virtio_blk]
+ virtio_blk, zte_zsda]
# global dict ethernet devices present. Dictionary indexed by PCI address.
# Each device within this is itself a dictionary of device properties
From patchwork Wed Sep 11 07:54:24 2024
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
X-Patchwork-Submitter: Hanxiao Li
X-Patchwork-Id: 143940
X-Patchwork-Delegate: gakhil@marvell.com
Return-Path:
X-Original-To: patchwork@inbox.dpdk.org
Delivered-To: patchwork@inbox.dpdk.org
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
by inbox.dpdk.org (Postfix) with ESMTP id 257E34595D;
Wed, 11 Sep 2024 09:55:41 +0200 (CEST)
Received: from mails.dpdk.org (localhost [127.0.0.1])
by mails.dpdk.org (Postfix) with ESMTP id 6B94A42FFB;
Wed, 11 Sep 2024 09:55:34 +0200 (CEST)
Received: from mxhk.zte.com.cn (mxhk.zte.com.cn [63.216.63.35])
by mails.dpdk.org (Postfix) with ESMTP id A8213402CE
for ; Wed, 11 Sep 2024 09:55:30 +0200 (CEST)
Received: from mse-fl1.zte.com.cn (unknown [10.5.228.132])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by mxhk.zte.com.cn (FangMail) with ESMTPS id 4X3XtT2NRDz5B1Fk
for ; Wed, 11 Sep 2024 15:55:29 +0800 (CST)
Received: from szxlzmapp07.zte.com.cn ([10.5.230.251])
by mse-fl1.zte.com.cn with SMTP id 48B7tGWH072116
for ; Wed, 11 Sep 2024 15:55:16 +0800 (+08)
(envelope-from li.hanxiao@zte.com.cn)
Received: from localhost.localdomain (unknown [192.168.6.15])
by smtp (Zmail) with SMTP; Wed, 11 Sep 2024 15:55:18 +0800
X-Zmail-TransId: 3e8166e14cdf000-08e78
From: Hanxiao Li
To: dev@dpdk.org
Cc: wang.yong19@zte.com.cn, Hanxiao Li
Subject: [PATCH v6 2/8] zsda: add support for zsdadev operations
Date: Wed, 11 Sep 2024 15:54:24 +0800
Message-ID: <20240911075447.4074486-1-li.hanxiao@zte.com.cn>
X-Mailer: git-send-email 2.43.0
In-Reply-To: <20240911075200.4074366-1-li.hanxiao@zte.com.cn>
References: <20240911075200.4074366-1-li.hanxiao@zte.com.cn>
MIME-Version: 1.0
X-MAIL: mse-fl1.zte.com.cn 48B7tGWH072116
X-Fangmail-Anti-Spam-Filtered: true
X-Fangmail-MID-QID: 66E14CF1.000/4X3XtT2NRDz5B1Fk
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: dev-bounces@dpdk.org
Add support for zsdadev operations such as dev_start and dev_stop.
Signed-off-by: Hanxiao Li
---
drivers/common/zsda/zsda_device.c | 476 ++++++++++++++++++++++++++++++
drivers/common/zsda/zsda_device.h | 103 +++++++
2 files changed, 579 insertions(+)
create mode 100644 drivers/common/zsda/zsda_device.c
create mode 100644 drivers/common/zsda/zsda_device.h
--
2.27.0
diff --git a/drivers/common/zsda/zsda_device.c b/drivers/common/zsda/zsda_device.c
new file mode 100644
index 0000000000..de8894f5a3
--- /dev/null
+++ b/drivers/common/zsda/zsda_device.c
@@ -0,0 +1,476 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+
+#include
+#include
+
+#include "zsda_device.h"
+
+/* per-process array of device data */
+struct zsda_device_info zsda_devs[RTE_PMD_ZSDA_MAX_PCI_DEVICES];
+static int zsda_nb_pci_devices;
+uint8_t zsda_num_used_qps;
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_zsda_map[] = {
+ {
+ RTE_PCI_DEVICE(0x1cf2, 0x8050),
+ },
+ {
+ RTE_PCI_DEVICE(0x1cf2, 0x8051),
+ },
+ {.device_id = 0},
+};
+
+static int
+zsda_check_write(uint8_t *addr, const uint32_t dst_value)
+{
+ int times = ZSDA_TIME_NUM;
+ uint32_t val;
+
+ val = ZSDA_CSR_READ32(addr);
+
+ while ((val != dst_value) && times--) {
+ val = ZSDA_CSR_READ32(addr);
+ rte_delay_us_sleep(ZSDA_TIME_SLEEP_US);
+ }
+ if (val == dst_value)
+ return ZSDA_SUCCESS;
+ else
+ return ZSDA_FAILED;
+}
+
+static uint8_t
+zsda_get_num_used_qps(const struct rte_pci_device *pci_dev)
+{
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+ uint8_t num_used_qps;
+
+ num_used_qps = ZSDA_CSR_READ8(mmio_base + 0);
+
+ return num_used_qps;
+}
+
+int
+zsda_admin_q_start(const struct rte_pci_device *pci_dev)
+{
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+ int ret = 0;
+
+ ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_START, 0);
+
+ ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_START, ZSDA_Q_START);
+ ret = zsda_check_write(mmio_base + ZSDA_ADMIN_Q_START, ZSDA_Q_START);
+
+ return ret;
+}
+
+int
+zsda_admin_q_stop(const struct rte_pci_device *pci_dev)
+{
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+ int ret = 0;
+
+ ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_STOP_RESP, ZSDA_RESP_INVALID);
+ ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_STOP, ZSDA_Q_STOP);
+
+ ret = zsda_check_write(mmio_base + ZSDA_ADMIN_Q_STOP_RESP,
+ ZSDA_RESP_VALID);
+
+ if (ret)
+ ZSDA_LOG(INFO, "Failed! zsda_admin q stop");
+
+ return ret;
+}
+
+int
+zsda_admin_q_clear(const struct rte_pci_device *pci_dev)
+{
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+ int ret = 0;
+
+ ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_CLR_RESP, ZSDA_RESP_INVALID);
+ ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_CLR, ZSDA_RESP_VALID);
+
+ ret = zsda_check_write(mmio_base + ZSDA_ADMIN_Q_CLR_RESP,
+ ZSDA_RESP_VALID);
+
+ if (ret)
+ ZSDA_LOG(INFO, "Failed! zsda_admin q clear");
+
+ return ret;
+}
+
+static int
+zsda_queue_stop_single(uint8_t *mmio_base, const uint8_t id)
+{
+ int ret = 0;
+ uint8_t *addr_stop = mmio_base + ZSDA_IO_Q_STOP + (4 * id);
+ uint8_t *addr_resp = mmio_base + ZSDA_IO_Q_STOP_RESP + (4 * id);
+
+ ZSDA_CSR_WRITE32(addr_resp, ZSDA_RESP_INVALID);
+ ZSDA_CSR_WRITE32(addr_stop, ZSDA_Q_STOP);
+
+ ret = zsda_check_write(addr_resp, ZSDA_RESP_VALID);
+ ZSDA_CSR_WRITE32(addr_resp, ZSDA_RESP_INVALID);
+
+ return ret;
+}
+
+int
+zsda_queue_stop(const struct rte_pci_device *pci_dev)
+{
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+ uint8_t id;
+ int ret = 0;
+
+ for (id = 0; id < zsda_num_used_qps; id++)
+ ret |= zsda_queue_stop_single(mmio_base, id);
+
+ return ret;
+}
+
+static int
+zsda_queue_start_single(uint8_t *mmio_base, const uint8_t id)
+{
+ uint8_t *addr_start = mmio_base + ZSDA_IO_Q_START + (4 * id);
+
+ ZSDA_CSR_WRITE32(addr_start, ZSDA_Q_START);
+ return zsda_check_write(addr_start, ZSDA_Q_START);
+}
+
+int
+zsda_queue_start(const struct rte_pci_device *pci_dev)
+{
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+ uint8_t id;
+ int ret = 0;
+
+ for (id = 0; id < zsda_num_used_qps; id++)
+ ret |= zsda_queue_start_single(mmio_base, id);
+
+ return ret;
+}
+
+static int
+zsda_queue_clear_single(uint8_t *mmio_base, const uint8_t id)
+{
+ int ret = 0;
+ uint8_t *addr_clear = mmio_base + ZSDA_IO_Q_CLR + (4 * id);
+ uint8_t *addr_resp = mmio_base + ZSDA_IO_Q_CLR_RESP + (4 * id);
+
+ ZSDA_CSR_WRITE32(addr_resp, ZSDA_RESP_INVALID);
+ ZSDA_CSR_WRITE32(addr_clear, ZSDA_CLEAR_VALID);
+ ret = zsda_check_write(addr_resp, ZSDA_RESP_VALID);
+ ZSDA_CSR_WRITE32(addr_clear, ZSDA_CLEAR_INVALID);
+
+ return ret;
+}
+
+int
+zsda_queue_clear(const struct rte_pci_device *pci_dev)
+{
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+ uint8_t id;
+ int ret = 0;
+
+ for (id = 0; id < zsda_num_used_qps; id++)
+ ret |= zsda_queue_clear_single(mmio_base, id);
+
+ return ret;
+}
+
+static struct zsda_pci_device *
+zsda_pci_get_named_dev(const char *name)
+{
+ unsigned int i;
+
+ if (name == NULL) {
+ ZSDA_LOG(ERR, E_NULL);
+ return NULL;
+ }
+
+ for (i = 0; i < RTE_PMD_ZSDA_MAX_PCI_DEVICES; i++) {
+ if (zsda_devs[i].mz &&
+ (strcmp(((struct zsda_pci_device *)zsda_devs[i].mz->addr)
+ ->name,
+ name) == 0))
+ return (struct zsda_pci_device *)zsda_devs[i].mz->addr;
+ }
+
+ return NULL;
+}
+
+static uint8_t
+zsda_pci_find_free_device_index(void)
+{
+ uint32_t dev_id;
+
+ for (dev_id = 0; dev_id < RTE_PMD_ZSDA_MAX_PCI_DEVICES; dev_id++)
+ if (zsda_devs[dev_id].mz == NULL)
+ break;
+
+ return dev_id & (ZSDA_MAX_DEV - 1);
+}
+
+struct zsda_pci_device *
+zsda_get_zsda_dev_from_pci_dev(const struct rte_pci_device *pci_dev)
+{
+ char name[ZSDA_DEV_NAME_MAX_LEN];
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ return zsda_pci_get_named_dev(name);
+}
+
+struct zsda_pci_device *
+zsda_pci_device_allocate(struct rte_pci_device *pci_dev)
+{
+ struct zsda_pci_device *zsda_pci_dev;
+ uint8_t zsda_dev_id;
+ char name[ZSDA_DEV_NAME_MAX_LEN];
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+ snprintf(name + strlen(name), (ZSDA_DEV_NAME_MAX_LEN - strlen(name)),
+ "_zsda");
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ const struct rte_memzone *mz = rte_memzone_lookup(name);
+
+ if (mz == NULL) {
+ ZSDA_LOG(ERR, "Secondary can't find %s mz", name);
+ return NULL;
+ }
+ zsda_pci_dev = mz->addr;
+ zsda_devs[zsda_pci_dev->zsda_dev_id].mz = mz;
+ zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev = pci_dev;
+ zsda_nb_pci_devices++;
+ return zsda_pci_dev;
+ }
+
+ if (zsda_pci_get_named_dev(name) != NULL) {
+ ZSDA_LOG(ERR, E_CONFIG);
+ return NULL;
+ }
+
+ zsda_dev_id = zsda_pci_find_free_device_index();
+
+ if (zsda_dev_id == (RTE_PMD_ZSDA_MAX_PCI_DEVICES - 1)) {
+ ZSDA_LOG(ERR, "Reached maximum number of ZSDA devices");
+ return NULL;
+ }
+
+ unsigned int socket_id = rte_socket_id();
+
+ zsda_devs[zsda_dev_id].mz =
+ rte_memzone_reserve(name, sizeof(struct zsda_pci_device),
+ (int)(socket_id & 0xfff), 0);
+
+ if (zsda_devs[zsda_dev_id].mz == NULL) {
+ ZSDA_LOG(ERR, E_MALLOC);
+ return NULL;
+ }
+
+ zsda_pci_dev = zsda_devs[zsda_dev_id].mz->addr;
+ memset(zsda_pci_dev, 0, sizeof(*zsda_pci_dev));
+ strlcpy(zsda_pci_dev->name, name, ZSDA_DEV_NAME_MAX_LEN);
+ zsda_pci_dev->zsda_dev_id = zsda_dev_id;
+ zsda_pci_dev->pci_dev = pci_dev;
+ zsda_devs[zsda_dev_id].pci_dev = pci_dev;
+
+ rte_spinlock_init(&zsda_pci_dev->arb_csr_lock);
+ zsda_nb_pci_devices++;
+
+ return zsda_pci_dev;
+}
+
+static int
+zsda_pci_device_release(const struct rte_pci_device *pci_dev)
+{
+ struct zsda_pci_device *zsda_pci_dev;
+ struct zsda_device_info *inst;
+ char name[ZSDA_DEV_NAME_MAX_LEN];
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ snprintf(name + strlen(name),
+ ZSDA_DEV_NAME_MAX_LEN - (strlen(name) - 1), "_zsda");
+ zsda_pci_dev = zsda_pci_get_named_dev(name);
+ if (zsda_pci_dev != NULL) {
+ inst = &zsda_devs[zsda_pci_dev->zsda_dev_id];
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (zsda_pci_dev->comp_dev != NULL) {
+ ZSDA_LOG(DEBUG, "ZSDA device %s is busy", name);
+ return -EBUSY;
+ }
+ rte_memzone_free(inst->mz);
+ }
+ memset(inst, 0, sizeof(struct zsda_device_info));
+ zsda_nb_pci_devices--;
+ }
+ return 0;
+}
+
+static int
+zsda_pci_dev_destroy(struct zsda_pci_device *zsda_pci_dev,
+ const struct rte_pci_device *pci_dev)
+{
+ zsda_comp_dev_destroy(zsda_pci_dev);
+
+ return zsda_pci_device_release(pci_dev);
+}
+
+int
+zsda_get_queue_cfg_by_id(const struct zsda_pci_device *zsda_pci_dev,
+ const uint8_t qid, struct qinfo *qcfg)
+{
+ struct zsda_admin_req_qcfg req = {0};
+ struct zsda_admin_resp_qcfg resp = {0};
+ int ret = 0;
+ struct rte_pci_device *pci_dev =
+ zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
+
+ if (qid >= MAX_QPS_ON_FUNCTION) {
+ ZSDA_LOG(ERR, "qid beyond limit!");
+ return ZSDA_FAILED;
+ }
+
+ zsda_admin_msg_init(pci_dev);
+ req.msg_type = ZSDA_ADMIN_QUEUE_CFG_REQ;
+ req.qid = qid;
+
+ ret = zsda_send_admin_msg(pci_dev, &req, sizeof(req));
+ if (ret) {
+ ZSDA_LOG(ERR, "Failed! Send msg");
+ return ret;
+ }
+
+ ret = zsda_recv_admin_msg(pci_dev, &resp, sizeof(resp));
+ if (ret) {
+ ZSDA_LOG(ERR, "Failed! Receive msg");
+ return ret;
+ }
+
+ memcpy(qcfg, &resp.qcfg, sizeof(*qcfg));
+
+ return ZSDA_SUCCESS;
+}
+
+static int
+zsda_unmask_flr(const struct zsda_pci_device *zsda_pci_dev)
+{
+ struct zsda_admin_req_qcfg req = {0};
+ struct zsda_admin_resp_qcfg resp = {0};
+
+ int ret = 0;
+ struct rte_pci_device *pci_dev =
+ zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
+
+ zsda_admin_msg_init(pci_dev);
+
+ req.msg_type = ZSDA_FLR_SET_FUNCTION;
+
+ ret = zsda_send_admin_msg(pci_dev, &req, sizeof(req));
+ if (ret) {
+ ZSDA_LOG(ERR, "Failed! Send msg");
+ return ret;
+ }
+
+ ret = zsda_recv_admin_msg(pci_dev, &resp, sizeof(resp));
+ if (ret) {
+ ZSDA_LOG(ERR, "Failed! Receive msg");
+ return ret;
+ }
+
+ return ZSDA_SUCCESS;
+}
+
+static int
+zsda_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ int ret = 0;
+ struct zsda_pci_device *zsda_pci_dev;
+
+ zsda_pci_dev = zsda_pci_device_allocate(pci_dev);
+ if (zsda_pci_dev == NULL) {
+ ZSDA_LOG(ERR, E_NULL);
+ return -ENODEV;
+ }
+
+ zsda_num_used_qps = zsda_get_num_used_qps(zsda_pci_dev->pci_dev);
+ zsda_num_used_qps++;
+
+ ret = zsda_admin_q_start(zsda_pci_dev->pci_dev);
+ if (ret) {
+ ZSDA_LOG(ERR, "Failed! admin q start");
+ return ret;
+ }
+
+ ret = zsda_queue_clear(zsda_pci_dev->pci_dev);
+ if (ret) {
+ ZSDA_LOG(ERR, "Failed! used zsda_io q clear");
+ return ret;
+ }
+
+ ret = zsda_unmask_flr(zsda_pci_dev);
+ if (ret) {
+ ZSDA_LOG(ERR, "Failed! flr close");
+ return ret;
+ }
+
+ ret = zsda_get_queue_cfg(zsda_pci_dev);
+ if (ret) {
+ ZSDA_LOG(ERR, "Failed! zsda_get_queue_cfg");
+ return ret;
+ }
+
+ ret |= zsda_comp_dev_create(zsda_pci_dev);
+
+ if (ret) {
+ ZSDA_LOG(ERR, "Failed! dev create");
+ zsda_pci_dev_destroy(zsda_pci_dev, pci_dev);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+zsda_pci_remove(struct rte_pci_device *pci_dev)
+{
+ struct zsda_pci_device *zsda_pci_dev;
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ zsda_pci_dev = zsda_get_zsda_dev_from_pci_dev(pci_dev);
+ if (zsda_pci_dev == NULL)
+ return 0;
+
+ if (zsda_admin_q_clear(zsda_pci_dev->pci_dev) == ZSDA_FAILED)
+ ZSDA_LOG(ERR, "Failed! q clear");
+
+ if (zsda_admin_q_stop(zsda_pci_dev->pci_dev) == ZSDA_FAILED)
+ ZSDA_LOG(ERR, "Failed! q stop");
+
+ return zsda_pci_dev_destroy(zsda_pci_dev, pci_dev);
+}
+
+static struct rte_pci_driver rte_zsda_pmd = {
+ .id_table = pci_id_zsda_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = zsda_pci_probe,
+ .remove = zsda_pci_remove };
+
+RTE_PMD_REGISTER_PCI(ZSDA_PCI_NAME, rte_zsda_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(ZSDA_PCI_NAME, pci_id_zsda_map);
+RTE_PMD_REGISTER_KMOD_DEP(ZSDA_PCI_NAME,
+ "* igb_uio | uio_pci_generic | vfio-pci");
diff --git a/drivers/common/zsda/zsda_device.h b/drivers/common/zsda/zsda_device.h
new file mode 100644
index 0000000000..1b2ad0ce85
--- /dev/null
+++ b/drivers/common/zsda/zsda_device.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_DEVICE_H_
+#define _ZSDA_DEVICE_H_
+
+#include "bus_pci_driver.h"
+
+#include "zsda_common.h"
+#include "zsda_logs.h"
+
+struct zsda_device_info {
+ const struct rte_memzone *mz;
+ /**< mz to store the: struct zsda_pci_device , so it can be
+ * shared across processes
+ */
+
+ struct rte_pci_device *pci_dev;
+
+ struct rte_device comp_rte_dev;
+ /**< This represents the compression subset of this pci device.
+ * Register with this rather than with the one in
+ * pci_dev so that its driver can have a compression-specific name
+ */
+};
+
+extern struct zsda_device_info zsda_devs[];
+
+struct zsda_comp_dev_private;
+
+struct zsda_qp_hw_data {
+ bool used;
+
+ uint8_t tx_ring_num;
+ uint8_t rx_ring_num;
+ uint16_t tx_msg_size;
+ uint16_t rx_msg_size;
+};
+
+struct zsda_qp_hw {
+ struct zsda_qp_hw_data data[MAX_QPS_ON_FUNCTION];
+};
+
+/*
+ * This struct holds all the data about a ZSDA pci device
+ * including data about all services it supports.
+ * It contains
+ * - hw_data
+ * - config data
+ * - runtime data
+ * Note: as this data can be shared in a multi-process scenario,
+ * any pointers in it must also point to shared memory.
+ */
+struct zsda_pci_device {
+ /* Data used by all services */
+ char name[ZSDA_DEV_NAME_MAX_LEN];
+ /**< Name of zsda pci device */
+ uint8_t zsda_dev_id;
+ /**< Id of device instance for this zsda pci device */
+
+ rte_spinlock_t arb_csr_lock;
+ /**< lock to protect accesses to the arbiter CSR */
+
+ struct rte_pci_device *pci_dev;
+
+ /* Data relating to compression service */
+ struct zsda_comp_dev_private *comp_dev;
+ /**< link back to compressdev private data */
+
+ struct zsda_qp_hw zsda_hw_qps[ZSDA_MAX_SERVICES];
+ uint16_t zsda_qp_hw_num[ZSDA_MAX_SERVICES];
+};
+
+struct zsda_pci_device *
+zsda_pci_device_allocate(struct rte_pci_device *pci_dev);
+
+struct zsda_pci_device *
+zsda_get_zsda_dev_from_pci_dev(const struct rte_pci_device *pci_dev);
+
+__rte_weak int
+zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev);
+
+__rte_weak int
+zsda_comp_dev_create(struct zsda_pci_device *zsda_pci_dev);
+
+__rte_weak int
+zsda_comp_dev_destroy(struct zsda_pci_device *zsda_pci_dev);
+
+int zsda_get_queue_cfg_by_id(const struct zsda_pci_device *zsda_pci_dev,
+ const uint8_t qid, struct qinfo *qcfg);
+
+int zsda_queue_start(const struct rte_pci_device *pci_dev);
+int zsda_queue_stop(const struct rte_pci_device *pci_dev);
+int zsda_queue_clear(const struct rte_pci_device *pci_dev);
+
+int zsda_admin_q_start(const struct rte_pci_device *pci_dev);
+int zsda_admin_q_stop(const struct rte_pci_device *pci_dev);
+int zsda_admin_q_clear(const struct rte_pci_device *pci_dev);
+
+int zsda_set_cycle_head_tail(struct zsda_pci_device *zsda_pci_dev);
+
+#endif /* _ZSDA_DEVICE_H_ */
From patchwork Wed Sep 11 07:54:25 2024
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Hanxiao Li
X-Patchwork-Id: 143945
X-Patchwork-Delegate: gakhil@marvell.com
Return-Path:
X-Original-To: patchwork@inbox.dpdk.org
Delivered-To: patchwork@inbox.dpdk.org
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
by inbox.dpdk.org (Postfix) with ESMTP id 3E6564595D;
Wed, 11 Sep 2024 09:56:27 +0200 (CEST)
Received: from mails.dpdk.org (localhost [127.0.0.1])
by mails.dpdk.org (Postfix) with ESMTP id EADC143287;
Wed, 11 Sep 2024 09:55:45 +0200 (CEST)
Received: from mxhk.zte.com.cn (mxhk.zte.com.cn [63.216.63.40])
by mails.dpdk.org (Postfix) with ESMTP id CF2024325B
for ; Wed, 11 Sep 2024 09:55:38 +0200 (CEST)
Received: from mxct.zte.com.cn (unknown [192.168.251.13])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by mxhk.zte.com.cn (FangMail) with ESMTPS id 4X3XtZ5P6Rz8RTZW
for ; Wed, 11 Sep 2024 15:55:34 +0800 (CST)
Received: from mse-fl2.zte.com.cn (unknown [10.5.228.133])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by mxct.zte.com.cn (FangMail) with ESMTPS id 4X3XtS2kTVz501bc
for ; Wed, 11 Sep 2024 15:55:28 +0800 (CST)
Received: from szxlzmapp06.zte.com.cn ([10.5.230.252])
by mse-fl2.zte.com.cn with SMTP id 48B7tG6u069301
for ; Wed, 11 Sep 2024 15:55:16 +0800 (+08)
(envelope-from li.hanxiao@zte.com.cn)
Received: from localhost.localdomain (unknown [192.168.6.15])
by smtp (Zmail) with SMTP; Wed, 11 Sep 2024 15:55:19 +0800
X-Zmail-TransId: 3e8166e14ce6000-08e99
From: Hanxiao Li
To: dev@dpdk.org
Cc: wang.yong19@zte.com.cn, Hanxiao Li
Subject: [PATCH v6 3/8] zsda: add support for queue operation
Date: Wed, 11 Sep 2024 15:54:25 +0800
Message-ID: <20240911075447.4074486-2-li.hanxiao@zte.com.cn>
X-Mailer: git-send-email 2.43.0
In-Reply-To: <20240911075447.4074486-1-li.hanxiao@zte.com.cn>
References: <20240911075200.4074366-1-li.hanxiao@zte.com.cn>
<20240911075447.4074486-1-li.hanxiao@zte.com.cn>
MIME-Version: 1.0
X-MAIL: mse-fl2.zte.com.cn 48B7tG6u069301
X-Fangmail-Anti-Spam-Filtered: true
X-Fangmail-MID-QID: 66E14CF6.005/4X3XtZ5P6Rz8RTZW
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: dev-bounces@dpdk.org
Add queue initialization, release, enqueue, dequeue and other interface.
Signed-off-by: Hanxiao Li
---
drivers/common/zsda/zsda_qp.c | 720 ++++++++++++++++++++++++++++++++++
drivers/common/zsda/zsda_qp.h | 160 ++++++++
2 files changed, 880 insertions(+)
create mode 100644 drivers/common/zsda/zsda_qp.c
create mode 100644 drivers/common/zsda/zsda_qp.h
--
2.27.0
diff --git a/drivers/common/zsda/zsda_qp.c b/drivers/common/zsda/zsda_qp.c
new file mode 100644
index 0000000000..f2dfe43b2e
--- /dev/null
+++ b/drivers/common/zsda/zsda_qp.c
@@ -0,0 +1,720 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include
+
+#include
+
+#include "zsda_common.h"
+#include "zsda_logs.h"
+#include "zsda_device.h"
+#include "zsda_qp.h"
+
+#define RING_DIR_TX 0
+#define RING_DIR_RX 1
+
+struct ring_size {
+ uint16_t tx_msg_size;
+ uint16_t rx_msg_size;
+};
+
+struct ring_size zsda_qp_hw_ring_size[ZSDA_MAX_SERVICES] = {
+ [ZSDA_SERVICE_COMPRESSION] = {32, 16},
+ [ZSDA_SERVICE_DECOMPRESSION] = {32, 16},
+};
+
+static void
+zsda_set_queue_head_tail(const struct zsda_pci_device *zsda_pci_dev,
+ const uint8_t qid)
+{
+ struct rte_pci_device *pci_dev =
+ zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+
+ ZSDA_CSR_WRITE32(mmio_base + IO_DB_INITIAL_CONFIG + (qid * 4),
+ SET_HEAD_INTI);
+}
+
+int
+zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev)
+{
+ uint8_t i;
+ uint32_t index;
+ enum zsda_service_type type;
+ struct zsda_qp_hw *zsda_hw_qps = zsda_pci_dev->zsda_hw_qps;
+ struct qinfo qcfg;
+ int ret = 0;
+
+ for (i = 0; i < zsda_num_used_qps; i++) {
+ zsda_set_queue_head_tail(zsda_pci_dev, i);
+ ret = zsda_get_queue_cfg_by_id(zsda_pci_dev, i, &qcfg);
+ type = qcfg.q_type;
+ if (ret) {
+ ZSDA_LOG(ERR, "get queue cfg!");
+ return ret;
+ }
+ if (type >= ZSDA_SERVICE_INVALID)
+ continue;
+
+ index = zsda_pci_dev->zsda_qp_hw_num[type];
+ zsda_hw_qps[type].data[index].used = true;
+ zsda_hw_qps[type].data[index].tx_ring_num = i;
+ zsda_hw_qps[type].data[index].rx_ring_num = i;
+ zsda_hw_qps[type].data[index].tx_msg_size =
+ zsda_qp_hw_ring_size[type].tx_msg_size;
+ zsda_hw_qps[type].data[index].rx_msg_size =
+ zsda_qp_hw_ring_size[type].rx_msg_size;
+
+ zsda_pci_dev->zsda_qp_hw_num[type]++;
+ }
+
+ return ret;
+}
+
+struct zsda_qp_hw *
+zsda_qps_hw_per_service(struct zsda_pci_device *zsda_pci_dev,
+ const enum zsda_service_type service)
+{
+ struct zsda_qp_hw *qp_hw = NULL;
+
+ if (service < ZSDA_SERVICE_INVALID)
+ qp_hw = &(zsda_pci_dev->zsda_hw_qps[service]);
+
+ return qp_hw;
+}
+
+uint16_t
+zsda_qps_per_service(const struct zsda_pci_device *zsda_pci_dev,
+ const enum zsda_service_type service)
+{
+ uint16_t qp_hw_num = 0;
+
+ if (service < ZSDA_SERVICE_INVALID)
+ qp_hw_num = zsda_pci_dev->zsda_qp_hw_num[service];
+
+ return qp_hw_num;
+}
+
+uint16_t
+zsda_comp_max_nb_qps(const struct zsda_pci_device *zsda_pci_dev)
+{
+ uint16_t comp =
+ zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_COMPRESSION);
+ uint16_t decomp =
+ zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_DECOMPRESSION);
+ uint16_t min = 0;
+
+ if ((comp == MAX_QPS_ON_FUNCTION) ||
+ (decomp == MAX_QPS_ON_FUNCTION))
+ min = MAX_QPS_ON_FUNCTION;
+ else
+ min = (comp < decomp) ? comp : decomp;
+ if (min == 0)
+ return MAX_QPS_ON_FUNCTION;
+ return min;
+}
+
+
+void
+zsda_stats_get(void **queue_pairs, const uint32_t nb_queue_pairs,
+ struct zsda_common_stat *stats)
+{
+ enum zsda_service_type type;
+ uint32_t i;
+ struct zsda_qp *qp;
+
+ if ((stats == NULL) || (queue_pairs == NULL)) {
+ ZSDA_LOG(ERR, E_NULL);
+ return;
+ }
+
+ for (i = 0; i < nb_queue_pairs; i++) {
+ qp = (struct zsda_qp *)queue_pairs[i];
+
+ if (qp == NULL) {
+ ZSDA_LOG(ERR, E_NULL);
+ break;
+ }
+
+ for (type = 0; type < ZSDA_SERVICE_INVALID; type++) {
+ if (qp->srv[type].used) {
+ stats->enqueued_count +=
+ qp->srv[type].stats.enqueued_count;
+ stats->dequeued_count +=
+ qp->srv[type].stats.dequeued_count;
+ stats->enqueue_err_count +=
+ qp->srv[type].stats.enqueue_err_count;
+ stats->dequeue_err_count +=
+ qp->srv[type].stats.dequeue_err_count;
+ }
+ }
+ }
+}
+
+void
+zsda_stats_reset(void **queue_pairs, const uint32_t nb_queue_pairs)
+{
+ enum zsda_service_type type;
+ uint32_t i;
+ struct zsda_qp *qp;
+
+ if (queue_pairs == NULL) {
+ ZSDA_LOG(ERR, E_NULL);
+ return;
+ }
+
+ for (i = 0; i < nb_queue_pairs; i++) {
+ qp = (struct zsda_qp *)queue_pairs[i];
+
+ if (qp == NULL) {
+ ZSDA_LOG(ERR, E_NULL);
+ break;
+ }
+ for (type = 0; type < ZSDA_MAX_SERVICES; type++) {
+ if (qp->srv[type].used)
+ memset(&(qp->srv[type].stats), 0,
+ sizeof(struct zsda_common_stat));
+ }
+ }
+}
+
+static const struct rte_memzone *
+zsda_queue_dma_zone_reserve(const char *queue_name, const unsigned int queue_size,
+ const unsigned int socket_id)
+{
+ const struct rte_memzone *mz;
+
+ mz = rte_memzone_lookup(queue_name);
+ if (mz != 0) {
+ if (((size_t)queue_size <= mz->len) &&
+ ((socket_id == (SOCKET_ID_ANY & 0xffff)) ||
+ (socket_id == (mz->socket_id & 0xffff)))) {
+ ZSDA_LOG(DEBUG,
+ "re-use memzone already allocated for %s",
+ queue_name);
+ return mz;
+ }
+ ZSDA_LOG(ERR, E_MALLOC);
+ return NULL;
+ }
+
+ mz = rte_memzone_reserve_aligned(queue_name, queue_size,
+ (int)(socket_id & 0xfff),
+ RTE_MEMZONE_IOVA_CONTIG, queue_size);
+
+ return mz;
+}
+
+static int
+zsda_queue_create(const uint32_t dev_id, struct zsda_queue *queue,
+ const struct zsda_qp_config *qp_conf, const uint8_t dir)
+{
+ void *io_addr;
+ const struct rte_memzone *qp_mz;
+ struct qinfo qcfg = {0};
+
+ uint16_t desc_size = ((dir == RING_DIR_TX) ? qp_conf->hw->tx_msg_size
+ : qp_conf->hw->rx_msg_size);
+ unsigned int queue_size_bytes = qp_conf->nb_descriptors * desc_size;
+
+ queue->hw_queue_number =
+ ((dir == RING_DIR_TX) ? qp_conf->hw->tx_ring_num
+ : qp_conf->hw->rx_ring_num);
+
+ struct rte_pci_device *pci_dev = zsda_devs[dev_id].pci_dev;
+ struct zsda_pci_device *zsda_dev =
+ (struct zsda_pci_device *)zsda_devs[dev_id].mz->addr;
+
+ zsda_get_queue_cfg_by_id(zsda_dev, queue->hw_queue_number, &qcfg);
+
+ if (dir == RING_DIR_TX)
+ snprintf(queue->memz_name, sizeof(queue->memz_name),
+ "%s_%d_%s_%s_%d", pci_dev->driver->driver.name, dev_id,
+ qp_conf->service_str, "qptxmem",
+ queue->hw_queue_number);
+ else
+ snprintf(queue->memz_name, sizeof(queue->memz_name),
+ "%s_%d_%s_%s_%d", pci_dev->driver->driver.name, dev_id,
+ qp_conf->service_str, "qprxmem",
+ queue->hw_queue_number);
+
+ qp_mz = zsda_queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
+ rte_socket_id());
+ if (qp_mz == NULL) {
+ ZSDA_LOG(ERR, E_MALLOC);
+ return -ENOMEM;
+ }
+
+ queue->base_addr = (uint8_t *)qp_mz->addr;
+ queue->base_phys_addr = qp_mz->iova;
+ queue->modulo_mask = MAX_NUM_OPS;
+ queue->msg_size = desc_size;
+
+ queue->head = (dir == RING_DIR_TX) ? qcfg.wq_head : qcfg.cq_head;
+ queue->tail = (dir == RING_DIR_TX) ? qcfg.wq_tail : qcfg.cq_tail;
+
+ if ((queue->head == 0) && (queue->tail == 0))
+ qcfg.cycle += 1;
+
+ queue->valid = qcfg.cycle & (ZSDA_MAX_CYCLE - 1);
+ queue->queue_size = ZSDA_MAX_DESC;
+ queue->cycle_size = ZSDA_MAX_CYCLE;
+ queue->io_addr = pci_dev->mem_resource[0].addr;
+
+ memset(queue->base_addr, 0x0, queue_size_bytes);
+ io_addr = pci_dev->mem_resource[0].addr;
+
+ if (dir == RING_DIR_TX)
+ ZSDA_CSR_WQ_RING_BASE(io_addr, queue->hw_queue_number,
+ queue->base_phys_addr);
+ else
+ ZSDA_CSR_CQ_RING_BASE(io_addr, queue->hw_queue_number,
+ queue->base_phys_addr);
+
+ return 0;
+}
+
+static void
+zsda_queue_delete(const struct zsda_queue *queue)
+{
+ const struct rte_memzone *mz;
+ int status;
+
+ if (queue == NULL) {
+ ZSDA_LOG(DEBUG, "Invalid queue");
+ return;
+ }
+
+ mz = rte_memzone_lookup(queue->memz_name);
+ if (mz != NULL) {
+ memset(queue->base_addr, 0x0,
+ (uint16_t)(queue->queue_size * queue->msg_size));
+ status = rte_memzone_free(mz);
+ if (status != 0)
+ ZSDA_LOG(ERR, E_FREE);
+ } else
+ ZSDA_LOG(DEBUG, "queue %s doesn't exist", queue->memz_name);
+}
+
+static int
+zsda_cookie_init(const uint32_t dev_id, struct zsda_qp **qp_addr,
+ const uint16_t queue_pair_id,
+ const struct zsda_qp_config *zsda_qp_conf)
+{
+ struct zsda_qp *qp = *qp_addr;
+ struct rte_pci_device *pci_dev = zsda_devs[dev_id].pci_dev;
+ char op_cookie_pool_name[RTE_RING_NAMESIZE];
+ uint32_t i;
+ enum zsda_service_type type = zsda_qp_conf->service_type;
+
+ if (zsda_qp_conf->nb_descriptors != ZSDA_MAX_DESC)
+ ZSDA_LOG(ERR, "Can't create qp for %u descriptors",
+ zsda_qp_conf->nb_descriptors);
+
+ qp->srv[type].nb_descriptors = zsda_qp_conf->nb_descriptors;
+
+ qp->srv[type].op_cookies = rte_zmalloc_socket(
+ "zsda PMD op cookie pointer",
+ zsda_qp_conf->nb_descriptors *
+ sizeof(*qp->srv[type].op_cookies),
+ RTE_CACHE_LINE_SIZE, zsda_qp_conf->socket_id);
+
+ if (qp->srv[type].op_cookies == NULL) {
+ ZSDA_LOG(ERR, E_MALLOC);
+ return -ENOMEM;
+ }
+
+ snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s%d_cks_%s_qp%hu",
+ pci_dev->driver->driver.name, dev_id,
+ zsda_qp_conf->service_str, queue_pair_id);
+
+ qp->srv[type].op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
+ if (qp->srv[type].op_cookie_pool == NULL)
+ qp->srv[type].op_cookie_pool = rte_mempool_create(
+ op_cookie_pool_name, qp->srv[type].nb_descriptors,
+ zsda_qp_conf->cookie_size, 64, 0, NULL, NULL, NULL,
+ NULL, (int)(rte_socket_id() & 0xfff), 0);
+ if (!qp->srv[type].op_cookie_pool) {
+ ZSDA_LOG(ERR, E_CREATE);
+ goto exit;
+ }
+
+ for (i = 0; i < qp->srv[type].nb_descriptors; i++) {
+ if (rte_mempool_get(qp->srv[type].op_cookie_pool,
+ &qp->srv[type].op_cookies[i])) {
+ ZSDA_LOG(ERR, "ZSDA PMD Cannot get op_cookie");
+ goto exit;
+ }
+ memset(qp->srv[type].op_cookies[i], 0,
+ zsda_qp_conf->cookie_size);
+ }
+ return 0;
+
+exit:
+ if (qp->srv[type].op_cookie_pool)
+ rte_mempool_free(qp->srv[type].op_cookie_pool);
+ rte_free(qp->srv[type].op_cookies);
+
+ return -EFAULT;
+}
+
+int
+zsda_queue_pair_setup(const uint32_t dev_id, struct zsda_qp **qp_addr,
+ const uint16_t queue_pair_id,
+ const struct zsda_qp_config *zsda_qp_conf)
+{
+ struct zsda_qp *qp = *qp_addr;
+ struct rte_pci_device *pci_dev = zsda_devs[dev_id].pci_dev;
+ int ret = 0;
+ enum zsda_service_type type = zsda_qp_conf->service_type;
+
+ if (type >= ZSDA_SERVICE_INVALID) {
+ ZSDA_LOG(ERR, "Failed! service type");
+ return -EINVAL;
+ }
+
+ if (pci_dev->mem_resource[0].addr == NULL) {
+ ZSDA_LOG(ERR, E_NULL);
+ return -EINVAL;
+ }
+
+ if (zsda_queue_create(dev_id, &(qp->srv[type].tx_q), zsda_qp_conf,
+ RING_DIR_TX) != 0) {
+ ZSDA_LOG(ERR, E_CREATE);
+ return -EFAULT;
+ }
+
+ if (zsda_queue_create(dev_id, &(qp->srv[type].rx_q), zsda_qp_conf,
+ RING_DIR_RX) != 0) {
+ ZSDA_LOG(ERR, E_CREATE);
+ zsda_queue_delete(&(qp->srv[type].tx_q));
+ return -EFAULT;
+ }
+
+ ret = zsda_cookie_init(dev_id, qp_addr, queue_pair_id, zsda_qp_conf);
+ if (ret) {
+ zsda_queue_delete(&(qp->srv[type].tx_q));
+ zsda_queue_delete(&(qp->srv[type].rx_q));
+ qp->srv[type].used = false;
+ }
+ qp->srv[type].used = true;
+ return ret;
+}
+
+int
+zsda_queue_pair_release(struct zsda_qp **qp_addr)
+{
+ struct zsda_qp *qp = *qp_addr;
+ uint32_t i;
+ enum zsda_service_type type;
+
+ if (qp == NULL) {
+ ZSDA_LOG(DEBUG, "qp already freed");
+ return 0;
+ }
+
+ for (type = 0; type < ZSDA_SERVICE_INVALID; type++) {
+ if (!qp->srv[type].used)
+ continue;
+
+ zsda_queue_delete(&(qp->srv[type].tx_q));
+ zsda_queue_delete(&(qp->srv[type].rx_q));
+ qp->srv[type].used = false;
+ for (i = 0; i < qp->srv[type].nb_descriptors; i++)
+ rte_mempool_put(qp->srv[type].op_cookie_pool,
+ qp->srv[type].op_cookies[i]);
+
+ if (qp->srv[type].op_cookie_pool)
+ rte_mempool_free(qp->srv[type].op_cookie_pool);
+
+ rte_free(qp->srv[type].op_cookies);
+ }
+
+ rte_free(qp);
+ *qp_addr = NULL;
+
+ return 0;
+}
+
+int
+zsda_fill_sgl(const struct rte_mbuf *buf, uint32_t offset, struct zsda_sgl *sgl,
+ const phys_addr_t sgl_phy_addr, uint32_t remain_len,
+ struct comp_head_info *comp_head_info)
+{
+ uint32_t nr;
+ uint16_t put_in_len;
+ bool head_set = false;
+
+ for (nr = 0; (buf && (nr < (ZSDA_SGL_MAX_NUMBER - 1)));) {
+ if (offset >= rte_pktmbuf_data_len(buf)) {
+ offset -= rte_pktmbuf_data_len(buf);
+ buf = buf->next;
+ continue;
+ }
+ memset(&(sgl->buffers[nr]), 0, sizeof(struct zsda_buf));
+ if ((nr > 0) && (((nr + 1) % ZSDA_SGL_FRAGMENT_SIZE) == 0) &&
+ (buf->next != NULL)) {
+ sgl->buffers[nr].len = SGL_TYPE_PHYS_ADDR;
+ sgl->buffers[nr].addr =
+ sgl_phy_addr +
+ ((nr + 1) * sizeof(struct zsda_buf));
+ sgl->buffers[nr].type = SGL_TYPE_NEXT_LIST;
+ ++nr;
+ continue;
+ }
+ if (comp_head_info && !head_set) {
+ sgl->buffers[nr].len = comp_head_info->head_len;
+ sgl->buffers[nr].addr = comp_head_info->head_phys_addr;
+ sgl->buffers[nr].type = SGL_TYPE_PHYS_ADDR;
+ ++nr;
+ head_set = true;
+ remain_len -= comp_head_info->head_len;
+ continue;
+ } else {
+ put_in_len = rte_pktmbuf_data_len(buf) - (offset & 0xffff);
+ if (remain_len <= put_in_len)
+ put_in_len = remain_len;
+ remain_len -= put_in_len;
+
+ sgl->buffers[nr].len = put_in_len;
+ sgl->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset);
+ sgl->buffers[nr].type = SGL_TYPE_PHYS_ADDR;
+ }
+ offset = 0;
+ ++nr;
+ buf = buf->next;
+
+ if (remain_len == 0)
+ break;
+ }
+
+ if (nr == 0) {
+ ZSDA_LOG(ERR, "In fill_sgl, nr == 0");
+ return ZSDA_FAILED;
+ }
+
+ sgl->buffers[nr - 1].type = SGL_TYPE_LAST_PHYS_ADDR;
+
+ if (buf) {
+ if (unlikely(buf->next)) {
+ if (nr == (ZSDA_SGL_MAX_NUMBER - 1)) {
+ ZSDA_LOG(ERR, "ERR! segs size (%u)",
+ (ZSDA_SGL_MAX_NUMBER));
+ return -EINVAL;
+ }
+ }
+ }
+
+ return ZSDA_SUCCESS;
+}
+
+int
+zsda_get_sgl_num(const struct zsda_sgl *sgl)
+{
+ int sgl_num = 0;
+
+ while (sgl->buffers[sgl_num].type != 1) {
+ sgl_num++;
+ if (sgl_num >= ZSDA_SGL_MAX_NUMBER)
+ return ZSDA_FAILED;
+ }
+ sgl_num++;
+ return sgl_num;
+}
+
+static int
+zsda_find_next_free_cookie(const struct zsda_queue *queue, void **op_cookie,
+ uint16_t *idx)
+{
+ uint16_t old_tail = queue->tail;
+ uint16_t tail = queue->tail;
+ struct zsda_op_cookie *cookie;
+
+ do {
+ cookie = (struct zsda_op_cookie *)op_cookie[tail];
+ if (!cookie->used) {
+ *idx = tail & (queue->queue_size - 1);
+ return 0;
+ }
+ tail = zsda_modulo_16(tail++, queue->modulo_mask);
+ } while (old_tail != tail);
+
+ return -EINVAL;
+}
+
+static int
+zsda_enqueue(void *op, struct zsda_qp *qp)
+{
+ uint16_t new_tail;
+ enum zsda_service_type type;
+ void **op_cookie;
+ int ret = 0;
+ struct zsda_queue *queue;
+
+ for (type = 0; type < ZSDA_SERVICE_INVALID; type++) {
+ if (qp->srv[type].used) {
+ if (!qp->srv[type].match(op))
+ continue;
+ queue = &qp->srv[type].tx_q;
+ op_cookie = qp->srv[type].op_cookies;
+
+ if (zsda_find_next_free_cookie(queue, op_cookie,
+ &new_tail)) {
+ ret = -EBUSY;
+ break;
+ }
+ ret = qp->srv[type].tx_cb(op, queue, op_cookie,
+ new_tail);
+ if (ret) {
+ qp->srv[type].stats.enqueue_err_count++;
+ ZSDA_LOG(ERR, "Failed! config wqe");
+ break;
+ }
+ qp->srv[type].stats.enqueued_count++;
+
+ queue->tail = zsda_modulo_16(new_tail + 1,
+ queue->queue_size - 1);
+
+ if (new_tail > queue->tail)
+ queue->valid =
+ zsda_modulo_8(queue->valid + 1,
+ (uint8_t)(queue->cycle_size - 1));
+
+ queue->pushed_wqe++;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void
+zsda_tx_write_tail(struct zsda_queue *queue)
+{
+ if (queue->pushed_wqe)
+ WRITE_CSR_WQ_TAIL(queue->io_addr, queue->hw_queue_number,
+ queue->tail);
+
+ queue->pushed_wqe = 0;
+}
+
+uint16_t
+zsda_enqueue_op_burst(struct zsda_qp *qp, void **ops, uint16_t nb_ops)
+{
+ int ret = 0;
+ enum zsda_service_type type;
+ uint16_t i;
+ uint16_t nb_send = 0;
+ void *op;
+
+ if (nb_ops > ZSDA_MAX_DESC) {
+ ZSDA_LOG(ERR, "Enqueue number bigger than %d", ZSDA_MAX_DESC);
+ return 0;
+ }
+
+ for (i = 0; i < nb_ops; i++) {
+ op = ops[i];
+ ret = zsda_enqueue(op, qp);
+ if (ret < 0)
+ break;
+ nb_send++;
+ }
+
+ for (type = 0; type < ZSDA_SERVICE_INVALID; type++)
+ if (qp->srv[type].used)
+ zsda_tx_write_tail(&qp->srv[type].tx_q);
+
+ return nb_send;
+}
+
+static void
+zsda_dequeue(struct qp_srv *srv, void **ops, const uint16_t nb_ops, uint16_t *nb)
+{
+ uint16_t head;
+ struct zsda_cqe *cqe;
+ struct zsda_queue *queue = &srv->rx_q;
+ struct zsda_op_cookie *cookie;
+ head = queue->head;
+
+ while (*nb < nb_ops) {
+ cqe = (struct zsda_cqe *)((uint8_t *)queue->base_addr + head * queue->msg_size);
+
+ if (!CQE_VALID(cqe->err1))
+ break;
+ cookie = (struct zsda_op_cookie *)srv->op_cookies[cqe->sid];
+
+ if (cookie->decomp_no_tail && CQE_ERR0_RIGHT(cqe->err0))
+ cqe->err0 = 0x0000;
+
+ if (CQE_ERR0(cqe->err0) || CQE_ERR1(cqe->err1)) {
+ ZSDA_LOG(ERR,
+ "ERR! Cqe, opcode 0x%x, sid 0x%x, "
+ "tx_real_length 0x%x, err0 0x%x, err1 0x%x",
+ cqe->op_code, cqe->sid, cqe->tx_real_length,
+ cqe->err0, cqe->err1);
+ srv->stats.dequeue_err_count++;
+ } else
+ srv->stats.dequeued_count++;
+
+ ops[*nb] = cookie->op;
+ srv->rx_cb(cookie, cqe);
+ (*nb)++;
+ cookie->used = false;
+
+ head = zsda_modulo_16(head + 1, queue->modulo_mask);
+ queue->head = head;
+ WRITE_CSR_CQ_HEAD(queue->io_addr, queue->hw_queue_number, head);
+ memset(cqe, 0x0, sizeof(struct zsda_cqe));
+ }
+}
+
+uint16_t
+zsda_dequeue_op_burst(struct zsda_qp *qp, void **ops, const uint16_t nb_ops)
+{
+ uint16_t nb = 0;
+ uint32_t type;
+ struct qp_srv *srv;
+
+ for (type = 0; type < ZSDA_SERVICE_INVALID; type++) {
+ if (!qp->srv[type].used)
+ continue;
+ srv = &qp->srv[type];
+ zsda_dequeue(srv, ops, nb_ops, &nb);
+ if (nb >= nb_ops)
+ return nb_ops;
+ }
+ return nb;
+}
+
+int
+zsda_common_setup_qp(uint32_t zsda_dev_id, struct zsda_qp **qp_addr,
+ const uint16_t queue_pair_id, const struct zsda_qp_config *conf)
+{
+ uint32_t i;
+ int ret = 0;
+ struct zsda_qp *qp;
+ rte_iova_t cookie_phys_addr;
+
+ ret = zsda_queue_pair_setup(zsda_dev_id, qp_addr, queue_pair_id, conf);
+ if (ret)
+ return ret;
+
+ qp = (struct zsda_qp *)*qp_addr;
+
+ for (i = 0; i < qp->srv[conf->service_type].nb_descriptors; i++) {
+ struct zsda_op_cookie *cookie =
+ qp->srv[conf->service_type].op_cookies[i];
+ cookie_phys_addr = rte_mempool_virt2iova(cookie);
+
+ cookie->comp_head_phys_addr = cookie_phys_addr +
+ offsetof(struct zsda_op_cookie, comp_head);
+
+ cookie->sgl_src_phys_addr = cookie_phys_addr +
+ offsetof(struct zsda_op_cookie, sgl_src);
+
+ cookie->sgl_dst_phys_addr = cookie_phys_addr +
+ offsetof(struct zsda_op_cookie, sgl_dst);
+ }
+ return ret;
+}
diff --git a/drivers/common/zsda/zsda_qp.h b/drivers/common/zsda/zsda_qp.h
new file mode 100644
index 0000000000..11943a9be4
--- /dev/null
+++ b/drivers/common/zsda/zsda_qp.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_QP_H_
+#define _ZSDA_QP_H_
+
+#define WQ_CSR_LBASE 0x1000
+#define WQ_CSR_UBASE 0x1004
+#define CQ_CSR_LBASE 0x1400
+#define CQ_CSR_UBASE 0x1404
+#define WQ_TAIL 0x1800
+#define CQ_HEAD 0x1804
+
+/**
+ * Structure associated with each queue.
+ */
+struct zsda_queue {
+ char memz_name[RTE_MEMZONE_NAMESIZE];
+ uint8_t *io_addr;
+ uint8_t *base_addr; /* Base address */
+ rte_iova_t base_phys_addr; /* Queue physical address */
+ uint16_t head; /* Shadow copy of the head */
+ uint16_t tail; /* Shadow copy of the tail */
+ uint16_t modulo_mask;
+ uint16_t msg_size;
+ uint16_t queue_size;
+ uint16_t cycle_size;
+ uint16_t pushed_wqe;
+
+ uint8_t hw_queue_number;
+ uint32_t csr_head; /* last written head value */
+ uint32_t csr_tail; /* last written tail value */
+
+ uint8_t valid;
+ uint16_t sid;
+};
+
+typedef void (*rx_callback)(void *cookie_in, const struct zsda_cqe *cqe);
+typedef int (*tx_callback)(void *op_in, const struct zsda_queue *queue,
+ void **op_cookies, const uint16_t new_tail);
+typedef int (*srv_match)(const void *op_in);
+
+struct qp_srv {
+ bool used;
+ struct zsda_queue tx_q;
+ struct zsda_queue rx_q;
+ rx_callback rx_cb;
+ tx_callback tx_cb;
+ srv_match match;
+ struct zsda_common_stat stats;
+ struct rte_mempool *op_cookie_pool;
+ void **op_cookies;
+ uint16_t nb_descriptors;
+};
+
+struct zsda_qp {
+ struct qp_srv srv[ZSDA_MAX_SERVICES];
+
+ uint16_t max_inflights;
+ uint16_t min_enq_burst_threshold;
+ void *mmap_bar_addr;
+};
+
+struct zsda_qp_config {
+ enum zsda_service_type service_type;
+ const struct zsda_qp_hw_data *hw;
+ uint16_t nb_descriptors;
+ uint32_t cookie_size;
+ int socket_id;
+ const char *service_str;
+};
+
+struct comp_head_info {
+ uint32_t head_len;
+ phys_addr_t head_phys_addr;
+};
+
+extern uint8_t zsda_num_used_qps;
+
+struct zsda_qp_hw *
+zsda_qps_hw_per_service(struct zsda_pci_device *zsda_pci_dev,
+ const enum zsda_service_type service);
+uint16_t zsda_qps_per_service(const struct zsda_pci_device *zsda_pci_dev,
+ const enum zsda_service_type service);
+
+uint16_t zsda_comp_max_nb_qps(const struct zsda_pci_device *zsda_pci_dev);
+uint16_t zsda_crypto_max_nb_qps(struct zsda_pci_device *zsda_pci_dev);
+
+int zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev);
+
+/* CSR write macro */
+#define ZSDA_CSR_WR(csrAddr, csrOffset, val) \
+ rte_write32(val, (((uint8_t *)csrAddr) + csrOffset))
+#define ZSDA_CSR_WC_WR(csrAddr, csrOffset, val) \
+ rte_write32_wc(val, (((uint8_t *)csrAddr) + csrOffset))
+
+/* CSR read macro */
+#define ZSDA_CSR_RD(csrAddr, csrOffset) \
+ rte_read32((((uint8_t *)csrAddr) + csrOffset))
+
+#define ZSDA_CSR_WQ_RING_BASE(csr_base_addr, ring, value) \
+ do { \
+ uint32_t l_base = 0, u_base = 0; \
+ l_base = (uint32_t)(value & 0xFFFFFFFF); \
+ u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+ ZSDA_CSR_WR(csr_base_addr, (ring << 3) + WQ_CSR_LBASE, \
+ l_base); \
+ ZSDA_LOG(INFO, "l_basg - offset:0x%x, value:0x%x", \
+ ((ring << 3) + WQ_CSR_LBASE), l_base); \
+ ZSDA_CSR_WR(csr_base_addr, (ring << 3) + WQ_CSR_UBASE, \
+ u_base); \
+ ZSDA_LOG(INFO, "h_base - offset:0x%x, value:0x%x", \
+ ((ring << 3) + WQ_CSR_UBASE), u_base); \
+ } while (0)
+
+#define ZSDA_CSR_CQ_RING_BASE(csr_base_addr, ring, value) \
+ do { \
+ uint32_t l_base = 0, u_base = 0; \
+ l_base = (uint32_t)(value & 0xFFFFFFFF); \
+ u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+ ZSDA_CSR_WR(csr_base_addr, (ring << 3) + CQ_CSR_LBASE, \
+ l_base); \
+ ZSDA_CSR_WR(csr_base_addr, (ring << 3) + CQ_CSR_UBASE, \
+ u_base); \
+ } while (0)
+
+#define READ_CSR_WQ_HEAD(csr_base_addr, ring) \
+ ZSDA_CSR_RD(csr_base_addr, WQ_TAIL + (ring << 3))
+#define WRITE_CSR_WQ_TAIL(csr_base_addr, ring, value) \
+ ZSDA_CSR_WC_WR(csr_base_addr, WQ_TAIL + (ring << 3), value)
+#define READ_CSR_CQ_HEAD(csr_base_addr, ring) \
+ ZSDA_CSR_RD(csr_base_addr, WQ_TAIL + (ring << 3))
+#define WRITE_CSR_CQ_HEAD(csr_base_addr, ring, value) \
+ ZSDA_CSR_WC_WR(csr_base_addr, CQ_HEAD + (ring << 3), value)
+
+uint16_t zsda_enqueue_op_burst(struct zsda_qp *qp, void **ops, const uint16_t nb_ops);
+uint16_t zsda_dequeue_op_burst(struct zsda_qp *qp, void **ops, const uint16_t nb_ops);
+
+int zsda_queue_pair_setup(uint32_t dev_id, struct zsda_qp **qp_addr,
+ const uint16_t queue_pair_id,
+ const struct zsda_qp_config *zsda_qp_conf);
+
+int zsda_queue_pair_release(struct zsda_qp **qp_addr);
+int zsda_fill_sgl(const struct rte_mbuf *buf, uint32_t offset,
+ struct zsda_sgl *sgl, const phys_addr_t sgl_phy_addr,
+ uint32_t remain_len, struct comp_head_info *comp_head_info);
+
+int zsda_get_sgl_num(const struct zsda_sgl *sgl);
+int zsda_sgl_opt_addr_lost(struct rte_mbuf *mbuf);
+
+int zsda_common_setup_qp(uint32_t dev_id, struct zsda_qp **qp_addr,
+ const uint16_t queue_pair_id,
+ const struct zsda_qp_config *conf);
+
+void zsda_stats_get(void **queue_pairs, const uint32_t nb_queue_pairs,
+ struct zsda_common_stat *stats);
+void zsda_stats_reset(void **queue_pairs, const uint32_t nb_queue_pairs);
+
+#endif /* _ZSDA_QP_H_ */
From patchwork Wed Sep 11 07:54:26 2024
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Hanxiao Li
X-Patchwork-Id: 143944
X-Patchwork-Delegate: gakhil@marvell.com
Return-Path:
X-Original-To: patchwork@inbox.dpdk.org
Delivered-To: patchwork@inbox.dpdk.org
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
by inbox.dpdk.org (Postfix) with ESMTP id 3A6AE4595D;
Wed, 11 Sep 2024 09:56:19 +0200 (CEST)
Received: from mails.dpdk.org (localhost [127.0.0.1])
by mails.dpdk.org (Postfix) with ESMTP id 971A44325A;
Wed, 11 Sep 2024 09:55:44 +0200 (CEST)
Received: from mxhk.zte.com.cn (mxhk.zte.com.cn [63.216.63.40])
by mails.dpdk.org (Postfix) with ESMTP id A85BD43251
for ; Wed, 11 Sep 2024 09:55:38 +0200 (CEST)
Received: from mxct.zte.com.cn (unknown [192.168.251.13])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by mxhk.zte.com.cn (FangMail) with ESMTPS id 4X3XtZ4vH8z8RTZV
for ; Wed, 11 Sep 2024 15:55:34 +0800 (CST)
Received: from mse-fl1.zte.com.cn (unknown [10.5.228.132])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by mxct.zte.com.cn (FangMail) with ESMTPS id 4X3XtS2jpDz501bZ
for ; Wed, 11 Sep 2024 15:55:28 +0800 (CST)
Received: from szxlzmapp06.zte.com.cn ([10.5.230.252])
by mse-fl1.zte.com.cn with SMTP id 48B7tGL5072125
for ; Wed, 11 Sep 2024 15:55:16 +0800 (+08)
(envelope-from li.hanxiao@zte.com.cn)
Received: from localhost.localdomain (unknown [192.168.6.15])
by smtp (Zmail) with SMTP; Wed, 11 Sep 2024 15:55:19 +0800
X-Zmail-TransId: 3e8166e14ce7000-08ea8
From: Hanxiao Li
To: dev@dpdk.org
Cc: wang.yong19@zte.com.cn, Hanxiao Li
Subject: [PATCH v6 4/8] zsda: add zsda compressdev driver and interface
Date: Wed, 11 Sep 2024 15:54:26 +0800
Message-ID: <20240911075447.4074486-3-li.hanxiao@zte.com.cn>
X-Mailer: git-send-email 2.43.0
In-Reply-To: <20240911075447.4074486-1-li.hanxiao@zte.com.cn>
References: <20240911075200.4074366-1-li.hanxiao@zte.com.cn>
<20240911075447.4074486-1-li.hanxiao@zte.com.cn>
MIME-Version: 1.0
X-MAIL: mse-fl1.zte.com.cn 48B7tGL5072125
X-Fangmail-Anti-Spam-Filtered: true
X-Fangmail-MID-QID: 66E14CF6.004/4X3XtZ4vH8z8RTZV
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: dev-bounces@dpdk.org
Add zsda compressdev driver and enqueue, dequeue interface.
Signed-off-by: Hanxiao Li
---
drivers/common/zsda/meson.build | 2 +-
drivers/compress/zsda/zsda_comp.c | 358 ++++++++++++++++++++
drivers/compress/zsda/zsda_comp.h | 27 ++
drivers/compress/zsda/zsda_comp_pmd.c | 453 ++++++++++++++++++++++++++
drivers/compress/zsda/zsda_comp_pmd.h | 39 +++
drivers/meson.build | 1 +
6 files changed, 879 insertions(+), 1 deletion(-)
create mode 100644 drivers/compress/zsda/zsda_comp.c
create mode 100644 drivers/compress/zsda/zsda_comp.h
create mode 100644 drivers/compress/zsda/zsda_comp_pmd.c
create mode 100644 drivers/compress/zsda/zsda_comp_pmd.h
--
2.27.0
diff --git a/drivers/common/zsda/meson.build b/drivers/common/zsda/meson.build
index b12ef17476..e2a214bbe8 100644
--- a/drivers/common/zsda/meson.build
+++ b/drivers/common/zsda/meson.build
@@ -17,7 +17,7 @@ zsda_compress_relpath = '../../' + zsda_compress_path
includes += include_directories(zsda_compress_relpath)
if zsda_compress
-zlib = dependency('zlib', required: false, method: 'pkg-config')
+ zlib = dependency('zlib', required: false, method: 'pkg-config')
foreach f: ['zsda_comp_pmd.c', 'zsda_comp.c']
sources += files(join_paths(zsda_compress_relpath, f))
endforeach
diff --git a/drivers/compress/zsda/zsda_comp.c b/drivers/compress/zsda/zsda_comp.c
new file mode 100644
index 0000000000..0bf0736be5
--- /dev/null
+++ b/drivers/compress/zsda/zsda_comp.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include "zsda_comp.h"
+
+#define ZLIB_HEADER_SIZE 2
+#define ZLIB_TRAILER_SIZE 4
+#define GZIP_HEADER_SIZE 10
+#define GZIP_TRAILER_SIZE 8
+#define CHECKSUM_SIZE 4
+
+static uint32_t zsda_read_chksum(uint8_t *data_addr, uint8_t op_code,
+ uint32_t produced);
+
+static uint32_t
+zsda_crc32(const unsigned char *data, size_t length)
+{
+ uint32_t crc32_table[256];
+ uint32_t polynomial = 0xEDB88320;
+ uint32_t crc, i, j;
+ uint8_t index;
+
+ for (i = 0; i < 256; i++) {
+ crc = i;
+ for (j = 0; j < 8; j++) {
+ if (crc & 1)
+ crc = (crc >> 1) ^ polynomial;
+ else
+ crc >>= 1;
+ }
+ crc32_table[i] = crc;
+ }
+
+ crc = 0xFFFFFFFF;
+ for (i = 0; i < length; i++) {
+ index = (crc ^ data[i]) & 0xFF;
+ crc = (crc >> 8) ^ crc32_table[index];
+ }
+ return crc ^ 0xFFFFFFFF;
+}
+
+#define MOD_ADLER 65521
+static uint32_t
+zsda_adler32(const uint8_t *buf, uint32_t len)
+{
+ uint32_t s1 = 1;
+ uint32_t s2 = 0;
+
+ for (uint32_t i = 0; i < len; i++) {
+ s1 = (s1 + buf[i]) % MOD_ADLER;
+ s2 = (s2 + s1) % MOD_ADLER;
+ }
+
+ return (s2 << 16) | s1;
+}
+
+int
+zsda_comp_match(const void *op_in)
+{
+ const struct rte_comp_op *op = (const struct rte_comp_op *)op_in;
+ const struct zsda_comp_xform *xform =
+ (struct zsda_comp_xform *)op->private_xform;
+
+ if (op->op_type != RTE_COMP_OP_STATELESS)
+ return 0;
+
+ if (xform->type != RTE_COMP_COMPRESS)
+ return 0;
+
+ return 1;
+}
+
+static uint8_t
+get_opcode(const struct zsda_comp_xform *xform)
+{
+ if (xform->type == RTE_COMP_COMPRESS) {
+ if (xform->checksum_type == RTE_COMP_CHECKSUM_NONE ||
+ xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
+ return ZSDA_OPC_COMP_GZIP;
+ else if (xform->checksum_type == RTE_COMP_CHECKSUM_ADLER32)
+ return ZSDA_OPC_COMP_ZLIB;
+ }
+ if (xform->type == RTE_COMP_DECOMPRESS) {
+ if (xform->checksum_type == RTE_COMP_CHECKSUM_CRC32 ||
+ xform->checksum_type == RTE_COMP_CHECKSUM_NONE)
+ return ZSDA_OPC_DECOMP_GZIP;
+ else if (xform->checksum_type == RTE_COMP_CHECKSUM_ADLER32)
+ return ZSDA_OPC_DECOMP_ZLIB;
+ }
+
+ return ZSDA_OPC_INVALID;
+}
+
+int
+zsda_build_comp_request(void *op_in, const struct zsda_queue *queue,
+ void **op_cookies, const uint16_t new_tail)
+{
+ struct rte_comp_op *op = op_in;
+ struct zsda_comp_xform *xform =
+ (struct zsda_comp_xform *)op->private_xform;
+ struct zsda_wqe_comp *wqe =
+ (struct zsda_wqe_comp *)(queue->base_addr +
+ (new_tail * queue->msg_size));
+
+ struct zsda_op_cookie *cookie =
+ (struct zsda_op_cookie *)op_cookies[new_tail];
+ struct zsda_sgl *sgl_src = (struct zsda_sgl *)&cookie->sgl_src;
+ struct zsda_sgl *sgl_dst = (struct zsda_sgl *)&cookie->sgl_dst;
+ struct comp_head_info comp_head_info;
+
+ uint8_t opcode;
+ int ret = 0;
+ uint32_t op_offset;
+ uint32_t op_src_len;
+ uint32_t op_dst_len;
+ uint32_t head_len;
+
+ if ((op->m_dst == NULL) || (op->m_dst == op->m_src)) {
+ ZSDA_LOG(ERR, "Failed! m_dst");
+ return -EINVAL;
+ }
+
+ opcode = get_opcode(xform);
+ if (opcode == ZSDA_OPC_INVALID) {
+ ZSDA_LOG(ERR, E_CONFIG);
+ return -EINVAL;
+ }
+
+ cookie->used = true;
+ cookie->sid = new_tail;
+ cookie->op = op;
+
+ if (opcode == ZSDA_OPC_COMP_GZIP)
+ head_len = GZIP_HEADER_SIZE;
+ else if (opcode == ZSDA_OPC_COMP_ZLIB)
+ head_len = ZLIB_HEADER_SIZE;
+ else {
+ ZSDA_LOG(ERR, "Comp, op_code error!");
+ return -EINVAL;
+ }
+
+ comp_head_info.head_len = head_len;
+ comp_head_info.head_phys_addr = cookie->comp_head_phys_addr;
+
+ op_offset = op->src.offset;
+ op_src_len = op->src.length;
+ ret = zsda_fill_sgl(op->m_src, op_offset, sgl_src,
+ cookie->sgl_src_phys_addr, op_src_len, NULL);
+
+ op_offset = op->dst.offset;
+ op_dst_len = op->m_dst->pkt_len - op_offset;
+ op_dst_len += head_len;
+ ret |= zsda_fill_sgl(op->m_dst, op_offset, sgl_dst,
+ cookie->sgl_dst_phys_addr, op_dst_len,
+ &comp_head_info);
+
+ if (ret) {
+ ZSDA_LOG(ERR, E_FUNC);
+ return ret;
+ }
+
+ memset(wqe, 0, sizeof(struct zsda_wqe_comp));
+ wqe->rx_length = op_src_len;
+ wqe->tx_length = op_dst_len;
+ wqe->valid = queue->valid;
+ wqe->op_code = opcode;
+ wqe->sid = cookie->sid;
+ wqe->rx_sgl_type = SGL_ELM_TYPE_LIST;
+ wqe->tx_sgl_type = SGL_ELM_TYPE_LIST;
+
+ wqe->rx_addr = cookie->sgl_src_phys_addr;
+ wqe->tx_addr = cookie->sgl_dst_phys_addr;
+
+ return ret;
+}
+
+int
+zsda_decomp_match(const void *op_in)
+{
+ const struct rte_comp_op *op = (const struct rte_comp_op *)op_in;
+ const struct zsda_comp_xform *xform =
+ (struct zsda_comp_xform *)op->private_xform;
+
+ if (op->op_type != RTE_COMP_OP_STATELESS)
+ return 0;
+
+ if (xform->type != RTE_COMP_DECOMPRESS)
+ return 0;
+ return 1;
+}
+
+int
+zsda_build_decomp_request(void *op_in, const struct zsda_queue *queue,
+ void **op_cookies, const uint16_t new_tail)
+{
+ struct rte_comp_op *op = op_in;
+ struct zsda_comp_xform *xform =
+ (struct zsda_comp_xform *)op->private_xform;
+
+ struct zsda_wqe_comp *wqe =
+ (struct zsda_wqe_comp *)(queue->base_addr +
+ (new_tail * queue->msg_size));
+ struct zsda_op_cookie *cookie =
+ (struct zsda_op_cookie *)op_cookies[new_tail];
+ struct zsda_sgl *sgl_src = (struct zsda_sgl *)&cookie->sgl_src;
+ struct zsda_sgl *sgl_dst = (struct zsda_sgl *)&cookie->sgl_dst;
+ uint8_t opcode;
+ int ret = 0;
+
+ uint32_t op_offset;
+ uint32_t op_src_len;
+ uint32_t op_dst_len;
+
+ uint8_t *head_data;
+ uint16_t head_len;
+ struct comp_head_info comp_head_info;
+ uint8_t head_zlib[ZLIB_HEADER_SIZE] = {0x78, 0xDA};
+ uint8_t head_gzip[GZIP_HEADER_SIZE] = {0x1F, 0x8B, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03};
+
+ if ((op->m_dst == NULL) || (op->m_dst == op->m_src)) {
+ ZSDA_LOG(ERR, "Failed! m_dst");
+ return -EINVAL;
+ }
+
+ opcode = get_opcode(xform);
+ if (opcode == ZSDA_OPC_INVALID) {
+ ZSDA_LOG(ERR, E_CONFIG);
+ return -EINVAL;
+ }
+
+ cookie->used = true;
+ cookie->sid = new_tail;
+ cookie->op = op;
+
+ if (opcode == ZSDA_OPC_DECOMP_GZIP) {
+ head_data = head_gzip;
+ head_len = GZIP_HEADER_SIZE;
+ } else if (opcode == ZSDA_OPC_DECOMP_ZLIB) {
+ head_data = head_zlib;
+ head_len = ZLIB_HEADER_SIZE;
+ } else {
+ ZSDA_LOG(ERR, "Comp, op_code error!");
+ return -EINVAL;
+ }
+
+ op_offset = op->src.offset;
+ op_src_len = op->src.length;
+ op_src_len += head_len;
+ comp_head_info.head_len = head_len;
+ comp_head_info.head_phys_addr = cookie->comp_head_phys_addr;
+ cookie->decomp_no_tail = true;
+ for (int i = 0; i < head_len; i++)
+ cookie->comp_head[i] = head_data[i];
+
+ ret = zsda_fill_sgl(op->m_src, op_offset, sgl_src,
+ cookie->sgl_src_phys_addr, op_src_len,
+ &comp_head_info);
+
+ op_offset = op->dst.offset;
+ op_dst_len = op->m_dst->pkt_len - op_offset;
+ ret |= zsda_fill_sgl(op->m_dst, op_offset, sgl_dst,
+ cookie->sgl_dst_phys_addr, op_dst_len, NULL);
+
+ if (ret) {
+ ZSDA_LOG(ERR, E_FUNC);
+ return ret;
+ }
+
+ memset(wqe, 0, sizeof(struct zsda_wqe_comp));
+
+ wqe->rx_length = op_src_len;
+ wqe->tx_length = op_dst_len;
+ wqe->valid = queue->valid;
+ wqe->op_code = opcode;
+ wqe->sid = cookie->sid;
+ wqe->rx_sgl_type = SGL_ELM_TYPE_LIST;
+ wqe->tx_sgl_type = SGL_ELM_TYPE_LIST;
+ wqe->rx_addr = cookie->sgl_src_phys_addr;
+ wqe->tx_addr = cookie->sgl_dst_phys_addr;
+
+ return ret;
+}
+
+void
+zsda_comp_callback(void *cookie_in, const struct zsda_cqe *cqe)
+{
+ struct zsda_op_cookie *tmp_cookie = (struct zsda_op_cookie *)cookie_in;
+ struct rte_comp_op *tmp_op = (struct rte_comp_op *)tmp_cookie->op;
+ uint8_t *data_addr =
+ (uint8_t *)tmp_op->m_dst->buf_addr + tmp_op->m_dst->data_off;
+ uint32_t chksum = 0;
+ uint16_t head_len;
+ uint16_t tail_len;
+
+ if (!(CQE_ERR0(cqe->err0) || CQE_ERR1(cqe->err1)))
+ tmp_op->status = RTE_COMP_OP_STATUS_SUCCESS;
+ else {
+ tmp_op->status = RTE_COMP_OP_STATUS_ERROR;
+ return;
+ }
+
+ /* handle chksum */
+ tmp_op->produced = cqe->tx_real_length;
+ if (cqe->op_code == ZSDA_OPC_COMP_ZLIB) {
+ head_len = ZLIB_HEADER_SIZE;
+ tail_len = ZLIB_TRAILER_SIZE;
+ chksum = zsda_read_chksum(data_addr, cqe->op_code,
+ tmp_op->produced - head_len);
+ }
+ if (cqe->op_code == ZSDA_OPC_COMP_GZIP) {
+ head_len = GZIP_HEADER_SIZE;
+ tail_len = GZIP_TRAILER_SIZE;
+ chksum = zsda_read_chksum(data_addr, cqe->op_code,
+ tmp_op->produced - head_len);
+ } else if (cqe->op_code == ZSDA_OPC_DECOMP_ZLIB) {
+ head_len = ZLIB_HEADER_SIZE;
+ tail_len = ZLIB_TRAILER_SIZE;
+ chksum = zsda_adler32(data_addr, tmp_op->produced);
+ } else if (cqe->op_code == ZSDA_OPC_DECOMP_GZIP) {
+ head_len = GZIP_HEADER_SIZE;
+ tail_len = GZIP_TRAILER_SIZE;
+ chksum = zsda_crc32(data_addr, tmp_op->produced);
+ }
+ tmp_op->output_chksum = chksum;
+
+ if (cqe->op_code == ZSDA_OPC_COMP_ZLIB ||
+ cqe->op_code == ZSDA_OPC_COMP_GZIP) {
+ /* remove tail data*/
+ rte_pktmbuf_trim(tmp_op->m_dst, GZIP_TRAILER_SIZE);
+ /* remove head and tail length */
+ tmp_op->produced = tmp_op->produced - (head_len + tail_len);
+ }
+
+}
+
+static uint32_t
+zsda_read_chksum(uint8_t *data_addr, uint8_t op_code, uint32_t produced)
+{
+ uint8_t *chk_addr;
+ uint32_t chksum = 0;
+ int i = 0;
+
+ if (op_code == ZSDA_OPC_COMP_ZLIB) {
+ chk_addr = data_addr + produced - ZLIB_TRAILER_SIZE;
+ for (i = 0; i < CHECKSUM_SIZE; i++) {
+ chksum = chksum << 8;
+ chksum |= (*(chk_addr + i));
+ }
+ } else if (op_code == ZSDA_OPC_COMP_GZIP) {
+ chk_addr = data_addr + produced - GZIP_TRAILER_SIZE;
+ for (i = 0; i < CHECKSUM_SIZE; i++)
+ chksum |= (*(chk_addr + i) << (i * 8));
+ }
+
+ return chksum;
+}
diff --git a/drivers/compress/zsda/zsda_comp.h b/drivers/compress/zsda/zsda_comp.h
new file mode 100644
index 0000000000..697d3e3564
--- /dev/null
+++ b/drivers/compress/zsda/zsda_comp.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_COMP_H_
+#define _ZSDA_COMP_H_
+
+#include
+
+#include "zsda_common.h"
+#include "zsda_device.h"
+#include "zsda_qp.h"
+
+struct zsda_comp_xform {
+ enum rte_comp_xform_type type;
+ enum rte_comp_checksum_type checksum_type;
+};
+
+int zsda_comp_match(const void *op_in);
+int zsda_build_comp_request(void *op_in, const struct zsda_queue *queue,
+ void **op_cookies, const uint16_t new_tail);
+int zsda_decomp_match(const void *op_in);
+int zsda_build_decomp_request(void *op_in, const struct zsda_queue *queue,
+ void **op_cookies, const uint16_t new_tail);
+void zsda_comp_callback(void *cookie_in, const struct zsda_cqe *cqe);
+
+#endif
diff --git a/drivers/compress/zsda/zsda_comp_pmd.c b/drivers/compress/zsda/zsda_comp_pmd.c
new file mode 100644
index 0000000000..cd435927a6
--- /dev/null
+++ b/drivers/compress/zsda/zsda_comp_pmd.c
@@ -0,0 +1,453 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include
+
+#include "zsda_comp.h"
+#include "zsda_comp_pmd.h"
+
+static const struct rte_compressdev_capabilities zsda_comp_capabilities[] = {
+ {
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .comp_feature_flags = RTE_COMP_FF_HUFFMAN_DYNAMIC |
+ RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
+ RTE_COMP_FF_OOP_LB_IN_SGL_OUT |
+ RTE_COMP_FF_CRC32_CHECKSUM |
+ RTE_COMP_FF_ADLER32_CHECKSUM,
+ .window_size = {.min = 15, .max = 15, .increment = 0},
+ },
+};
+
+static void
+zsda_comp_stats_get(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats)
+{
+ struct zsda_common_stat comm = {0};
+
+ zsda_stats_get(dev->data->queue_pairs, dev->data->nb_queue_pairs,
+ &comm);
+ stats->enqueued_count = comm.enqueued_count;
+ stats->dequeued_count = comm.dequeued_count;
+ stats->enqueue_err_count = comm.enqueue_err_count;
+ stats->dequeue_err_count = comm.dequeue_err_count;
+}
+
+static void
+zsda_comp_stats_reset(struct rte_compressdev *dev)
+{
+ zsda_stats_reset(dev->data->queue_pairs, dev->data->nb_queue_pairs);
+}
+
+static int
+zsda_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id)
+{
+ return zsda_queue_pair_release(
+ (struct zsda_qp **)&(dev->data->queue_pairs[queue_pair_id]));
+}
+
+
+static int
+zsda_setup_comp_queue(struct zsda_pci_device *zsda_pci_dev, const uint16_t qp_id,
+ struct zsda_qp *qp, uint16_t nb_des, int socket_id)
+{
+ enum zsda_service_type type = ZSDA_SERVICE_COMPRESSION;
+ struct zsda_qp_config conf;
+ int ret = 0;
+ struct zsda_qp_hw *qp_hw;
+
+ qp_hw = zsda_qps_hw_per_service(zsda_pci_dev, type);
+ conf.hw = qp_hw->data + qp_id;
+ conf.service_type = type;
+ conf.cookie_size = sizeof(struct zsda_op_cookie);
+ conf.nb_descriptors = nb_des;
+ conf.socket_id = socket_id;
+ conf.service_str = "comp";
+
+ ret = zsda_common_setup_qp(zsda_pci_dev->zsda_dev_id, &qp, qp_id, &conf);
+ qp->srv[type].rx_cb = zsda_comp_callback;
+ qp->srv[type].tx_cb = zsda_build_comp_request;
+ qp->srv[type].match = zsda_comp_match;
+
+ return ret;
+}
+
+static int
+zsda_setup_decomp_queue(struct zsda_pci_device *zsda_pci_dev, const uint16_t qp_id,
+ struct zsda_qp *qp, uint16_t nb_des, int socket_id)
+{
+ enum zsda_service_type type = ZSDA_SERVICE_DECOMPRESSION;
+ struct zsda_qp_config conf;
+ int ret = 0;
+ struct zsda_qp_hw *qp_hw;
+
+ qp_hw = zsda_qps_hw_per_service(zsda_pci_dev, type);
+ conf.hw = qp_hw->data + qp_id;
+ conf.service_type = type;
+ conf.cookie_size = sizeof(struct zsda_op_cookie);
+ conf.nb_descriptors = nb_des;
+ conf.socket_id = socket_id;
+ conf.service_str = "decomp";
+
+ ret = zsda_common_setup_qp(zsda_pci_dev->zsda_dev_id, &qp, qp_id, &conf);
+ qp->srv[type].rx_cb = zsda_comp_callback;
+ qp->srv[type].tx_cb = zsda_build_decomp_request;
+ qp->srv[type].match = zsda_decomp_match;
+
+ return ret;
+}
+
+static int
+zsda_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ int ret = 0;
+ struct zsda_qp *qp_new;
+
+ struct zsda_qp **qp_addr =
+ (struct zsda_qp **)&(dev->data->queue_pairs[qp_id]);
+ struct zsda_comp_dev_private *comp_priv = dev->data->dev_private;
+ struct zsda_pci_device *zsda_pci_dev = comp_priv->zsda_pci_dev;
+ uint16_t num_qps_comp =
+ zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_COMPRESSION);
+ uint16_t num_qps_decomp =
+ zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_DECOMPRESSION);
+ uint16_t nb_des = max_inflight_ops & 0xffff;
+
+ nb_des = (nb_des == NB_DES) ? nb_des : NB_DES;
+
+ if (*qp_addr != NULL) {
+ ret = zsda_comp_qp_release(dev, qp_id);
+ if (ret)
+ return ret;
+ }
+
+ qp_new = rte_zmalloc_socket("zsda PMD qp metadata", sizeof(*qp_new),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp_new == NULL) {
+ ZSDA_LOG(ERR, E_MALLOC);
+ return -ENOMEM;
+ }
+
+ if (num_qps_comp == MAX_QPS_ON_FUNCTION)
+ ret = zsda_setup_comp_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+ socket_id);
+ else if (num_qps_decomp == MAX_QPS_ON_FUNCTION)
+ ret = zsda_setup_decomp_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+ socket_id);
+ else {
+ ret = zsda_setup_comp_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+ socket_id);
+ ret |= zsda_setup_decomp_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+ socket_id);
+ }
+
+ if (ret) {
+ rte_free(qp_new);
+ return ret;
+ }
+
+ qp_new->mmap_bar_addr =
+ comp_priv->zsda_pci_dev->pci_dev->mem_resource[0].addr;
+ *qp_addr = qp_new;
+
+ return ret;
+}
+
+static int
+zsda_comp_xform_size(void)
+{
+ return RTE_ALIGN_CEIL(sizeof(struct zsda_comp_xform), 8);
+}
+
+static struct rte_mempool *
+zsda_comp_create_xform_pool(struct zsda_comp_dev_private *comp_dev,
+ struct rte_compressdev_config *config,
+ uint32_t num_elements)
+{
+ char xform_pool_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *mp;
+
+ snprintf(xform_pool_name, RTE_MEMPOOL_NAMESIZE, "%s_xforms",
+ comp_dev->zsda_pci_dev->name);
+
+ ZSDA_LOG(DEBUG, "xformpool: %s", xform_pool_name);
+ mp = rte_mempool_lookup(xform_pool_name);
+
+ if (mp != NULL) {
+ ZSDA_LOG(DEBUG, "xformpool already created");
+ if (mp->size != num_elements) {
+ ZSDA_LOG(DEBUG, "xformpool wrong size - delete it");
+ rte_mempool_free(mp);
+ mp = NULL;
+ comp_dev->xformpool = NULL;
+ }
+ }
+
+ if (mp == NULL)
+ mp = rte_mempool_create(xform_pool_name, num_elements,
+ zsda_comp_xform_size(), 0, 0, NULL,
+ NULL, NULL, NULL, config->socket_id, 0);
+ if (mp == NULL) {
+ ZSDA_LOG(ERR, E_CREATE);
+ return NULL;
+ }
+
+ return mp;
+}
+
+static int
+zsda_comp_private_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **private_xform)
+{
+ struct zsda_comp_dev_private *zsda = dev->data->dev_private;
+
+ if (unlikely(private_xform == NULL)) {
+ ZSDA_LOG(ERR, E_NULL);
+ return -EINVAL;
+ }
+ if (unlikely(zsda->xformpool == NULL)) {
+ ZSDA_LOG(ERR, E_NULL);
+ return -ENOMEM;
+ }
+ if (rte_mempool_get(zsda->xformpool, private_xform)) {
+ ZSDA_LOG(ERR, E_NULL);
+ return -ENOMEM;
+ }
+
+ struct zsda_comp_xform *zsda_xform =
+ (struct zsda_comp_xform *)*private_xform;
+ zsda_xform->type = xform->type;
+
+ if (zsda_xform->type == RTE_COMP_COMPRESS)
+ zsda_xform->checksum_type = xform->compress.chksum;
+ else
+ zsda_xform->checksum_type = xform->decompress.chksum;
+
+ if (zsda_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32_ADLER32)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+zsda_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
+ void *private_xform)
+{
+ struct zsda_comp_xform *zsda_xform =
+ (struct zsda_comp_xform *)private_xform;
+
+ if (zsda_xform) {
+ memset(zsda_xform, 0, zsda_comp_xform_size());
+ struct rte_mempool *mp = rte_mempool_from_obj(zsda_xform);
+
+ rte_mempool_put(mp, zsda_xform);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int
+zsda_comp_dev_close(struct rte_compressdev *dev)
+{
+ uint16_t i;
+ struct zsda_comp_dev_private *comp_dev = dev->data->dev_private;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++)
+ zsda_comp_qp_release(dev, i);
+
+ rte_mempool_free(comp_dev->xformpool);
+ comp_dev->xformpool = NULL;
+
+ return 0;
+}
+
+static int
+zsda_comp_dev_config(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config)
+{
+ struct zsda_comp_dev_private *comp_dev = dev->data->dev_private;
+
+ if (config->max_nb_priv_xforms) {
+ comp_dev->xformpool = zsda_comp_create_xform_pool(
+ comp_dev, config, config->max_nb_priv_xforms);
+ if (comp_dev->xformpool == NULL)
+ return -ENOMEM;
+ } else
+ comp_dev->xformpool = NULL;
+
+ return 0;
+}
+
+static int
+zsda_comp_dev_start(struct rte_compressdev *dev)
+{
+ struct zsda_comp_dev_private *comp_dev = dev->data->dev_private;
+ int ret = 0;
+
+ ret = zsda_queue_start(comp_dev->zsda_pci_dev->pci_dev);
+
+ if (ret)
+ ZSDA_LOG(ERR, E_START_Q);
+
+ return ret;
+}
+
+static void
+zsda_comp_dev_stop(struct rte_compressdev *dev)
+{
+ struct zsda_comp_dev_private *comp_dev = dev->data->dev_private;
+
+ zsda_queue_stop(comp_dev->zsda_pci_dev->pci_dev);
+}
+
+static void
+zsda_comp_dev_info_get(struct rte_compressdev *dev,
+ struct rte_compressdev_info *info)
+{
+ struct zsda_comp_dev_private *comp_dev = dev->data->dev_private;
+
+ if (info != NULL) {
+ info->max_nb_queue_pairs =
+ zsda_comp_max_nb_qps(comp_dev->zsda_pci_dev);
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = comp_dev->zsda_dev_capabilities;
+ }
+}
+
+static uint16_t
+zsda_comp_pmd_enqueue_op_burst(void *qp, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ return zsda_enqueue_op_burst((struct zsda_qp *)qp, (void **)ops,
+ nb_ops);
+}
+
+static uint16_t
+zsda_comp_pmd_dequeue_op_burst(void *qp, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ return zsda_dequeue_op_burst((struct zsda_qp *)qp, (void **)ops,
+ nb_ops);
+}
+
+static struct rte_compressdev_ops compress_zsda_ops = {
+
+ .dev_configure = zsda_comp_dev_config,
+ .dev_start = zsda_comp_dev_start,
+ .dev_stop = zsda_comp_dev_stop,
+ .dev_close = zsda_comp_dev_close,
+ .dev_infos_get = zsda_comp_dev_info_get,
+
+ .stats_get = zsda_comp_stats_get,
+ .stats_reset = zsda_comp_stats_reset,
+ .queue_pair_setup = zsda_comp_qp_setup,
+ .queue_pair_release = zsda_comp_qp_release,
+
+ .private_xform_create = zsda_comp_private_xform_create,
+ .private_xform_free = zsda_comp_private_xform_free};
+
+/* An rte_driver is needed in the registration of the device with compressdev.
+ * The actual zsda pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the compression part of the pci device.
+ */
+static const char zsda_comp_drv_name[] = RTE_STR(COMPRESSDEV_NAME_ZSDA_PMD);
+static const struct rte_driver compdev_zsda_driver = {
+ .name = zsda_comp_drv_name, .alias = zsda_comp_drv_name};
+
+int
+zsda_comp_dev_create(struct zsda_pci_device *zsda_pci_dev)
+{
+ struct zsda_device_info *dev_info =
+ &zsda_devs[zsda_pci_dev->zsda_dev_id];
+
+ struct rte_compressdev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = (int)rte_socket_id(),
+ };
+
+ char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ char capa_memz_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ struct rte_compressdev *compressdev;
+ struct zsda_comp_dev_private *comp_dev;
+ const struct rte_compressdev_capabilities *capabilities;
+ uint16_t capa_size = sizeof(struct rte_compressdev_capabilities);
+
+ snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s_%s",
+ zsda_pci_dev->name, "comp");
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ dev_info->comp_rte_dev.driver = &compdev_zsda_driver;
+ dev_info->comp_rte_dev.numa_node = dev_info->pci_dev->device.numa_node;
+ dev_info->comp_rte_dev.devargs = NULL;
+
+ compressdev = rte_compressdev_pmd_create(
+ name, &(dev_info->comp_rte_dev),
+ sizeof(struct zsda_comp_dev_private), &init_params);
+
+ if (compressdev == NULL)
+ return -ENODEV;
+
+ compressdev->dev_ops = &compress_zsda_ops;
+
+ compressdev->enqueue_burst = zsda_comp_pmd_enqueue_op_burst;
+ compressdev->dequeue_burst = zsda_comp_pmd_dequeue_op_burst;
+
+ compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
+
+ snprintf(capa_memz_name, RTE_COMPRESSDEV_NAME_MAX_LEN,
+ "ZSDA_COMP_CAPA");
+
+ comp_dev = compressdev->data->dev_private;
+ comp_dev->zsda_pci_dev = zsda_pci_dev;
+ comp_dev->compressdev = compressdev;
+ capabilities = zsda_comp_capabilities;
+
+ comp_dev->capa_mz = rte_memzone_lookup(capa_memz_name);
+ if (comp_dev->capa_mz == NULL) {
+ comp_dev->capa_mz = rte_memzone_reserve(
+ capa_memz_name, capa_size, rte_socket_id(), 0);
+ }
+ if (comp_dev->capa_mz == NULL) {
+ ZSDA_LOG(DEBUG, E_MALLOC);
+ memset(&dev_info->comp_rte_dev, 0,
+ sizeof(dev_info->comp_rte_dev));
+ rte_compressdev_pmd_destroy(compressdev);
+ return -EFAULT;
+ }
+
+ memcpy(comp_dev->capa_mz->addr, capabilities, capa_size);
+ comp_dev->zsda_dev_capabilities = comp_dev->capa_mz->addr;
+
+ zsda_pci_dev->comp_dev = comp_dev;
+
+ return 0;
+}
+
+int
+zsda_comp_dev_destroy(struct zsda_pci_device *zsda_pci_dev)
+{
+ struct zsda_comp_dev_private *comp_dev;
+
+ if (zsda_pci_dev == NULL)
+ return -ENODEV;
+
+ comp_dev = zsda_pci_dev->comp_dev;
+ if (comp_dev == NULL)
+ return 0;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_memzone_free(zsda_pci_dev->comp_dev->capa_mz);
+
+ zsda_comp_dev_close(comp_dev->compressdev);
+
+ rte_compressdev_pmd_destroy(comp_dev->compressdev);
+ zsda_pci_dev->comp_dev = NULL;
+
+ return 0;
+}
diff --git a/drivers/compress/zsda/zsda_comp_pmd.h b/drivers/compress/zsda/zsda_comp_pmd.h
new file mode 100644
index 0000000000..b496cb53a5
--- /dev/null
+++ b/drivers/compress/zsda/zsda_comp_pmd.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_COMP_PMD_H_
+#define _ZSDA_COMP_PMD_H_
+
+#include
+
+/**< ZSDA Compression PMD driver name */
+#define COMPRESSDEV_NAME_ZSDA_PMD compress_zsda
+
+/** private data structure for a ZSDA compression device.
+ * This ZSDA device is a device offering only a compression service,
+ * there can be one of these on each zsda_pci_device (VF).
+ */
+struct zsda_comp_dev_private {
+ struct zsda_pci_device *zsda_pci_dev;
+ /**< The zsda pci device hosting the service */
+ struct rte_compressdev *compressdev;
+ /**< The pointer to this compression device structure */
+ const struct rte_compressdev_capabilities *zsda_dev_capabilities;
+ /* ZSDA device compression capabilities */
+ const struct rte_memzone *interim_buff_mz;
+ /**< The device's memory for intermediate buffers */
+ struct rte_mempool *xformpool;
+ /**< The device's pool for zsda_comp_xforms */
+ struct rte_mempool *streampool;
+ /**< The device's pool for zsda_comp_streams */
+ const struct rte_memzone *capa_mz;
+ /* Shared memzone for storing capabilities */
+ uint16_t min_enq_burst_threshold;
+};
+
+int zsda_comp_dev_create(struct zsda_pci_device *zsda_pci_dev);
+
+int zsda_comp_dev_destroy(struct zsda_pci_device *zsda_pci_dev);
+
+#endif /* _ZSDA_COMP_PMD_H_ */
diff --git a/drivers/meson.build b/drivers/meson.build
index 66931d4241..cdbd3b1c17 100644
--- a/drivers/meson.build
+++ b/drivers/meson.build
@@ -17,6 +17,7 @@ subdirs = [
'common/nitrox', # depends on bus.
'common/qat', # depends on bus.
'common/sfc_efx', # depends on bus.
+ 'common/zsda', # depends on bus.
'mempool', # depends on common and bus.
'dma', # depends on common and bus.
'net', # depends on common, bus, mempool
From patchwork Wed Sep 11 07:54:27 2024
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Hanxiao Li
X-Patchwork-Id: 143943
X-Patchwork-Delegate: gakhil@marvell.com
Return-Path:
X-Original-To: patchwork@inbox.dpdk.org
Delivered-To: patchwork@inbox.dpdk.org
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
by inbox.dpdk.org (Postfix) with ESMTP id 27AD54595D;
Wed, 11 Sep 2024 09:56:11 +0200 (CEST)
Received: from mails.dpdk.org (localhost [127.0.0.1])
by mails.dpdk.org (Postfix) with ESMTP id 35A924325C;
Wed, 11 Sep 2024 09:55:43 +0200 (CEST)
Received: from mxhk.zte.com.cn (mxhk.zte.com.cn [63.216.63.40])
by mails.dpdk.org (Postfix) with ESMTP id 77AD243036
for ; Wed, 11 Sep 2024 09:55:38 +0200 (CEST)
Received: from mxct.zte.com.cn (unknown [192.168.251.13])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by mxhk.zte.com.cn (FangMail) with ESMTPS id 4X3XtZ3kJvz8RTZT
for ; Wed, 11 Sep 2024 15:55:34 +0800 (CST)
Received: from mse-fl2.zte.com.cn (unknown [10.5.228.133])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by mxct.zte.com.cn (FangMail) with ESMTPS id 4X3XtW6z1cz501bL
for ; Wed, 11 Sep 2024 15:55:31 +0800 (CST)
Received: from szxlzmapp02.zte.com.cn ([10.5.231.79])
by mse-fl2.zte.com.cn with SMTP id 48B7tHJ8069302
for ; Wed, 11 Sep 2024 15:55:17 +0800 (+08)
(envelope-from li.hanxiao@zte.com.cn)
Received: from localhost.localdomain (unknown [192.168.6.15])
by smtp (Zmail) with SMTP; Wed, 11 Sep 2024 15:55:19 +0800
X-Zmail-TransId: 3e8166e14ce7000-08eaa
From: Hanxiao Li
To: dev@dpdk.org
Cc: wang.yong19@zte.com.cn, Hanxiao Li
Subject: [PATCH v6 5/8] zsda: modify files for introducing zsda cryptodev
Date: Wed, 11 Sep 2024 15:54:27 +0800
Message-ID: <20240911075447.4074486-4-li.hanxiao@zte.com.cn>
X-Mailer: git-send-email 2.43.0
In-Reply-To: <20240911075447.4074486-1-li.hanxiao@zte.com.cn>
References: <20240911075200.4074366-1-li.hanxiao@zte.com.cn>
<20240911075447.4074486-1-li.hanxiao@zte.com.cn>
MIME-Version: 1.0
X-MAIL: mse-fl2.zte.com.cn 48B7tHJ8069302
X-Fangmail-Anti-Spam-Filtered: true
X-Fangmail-MID-QID: 66E14CF6.002/4X3XtZ3kJvz8RTZT
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: dev-bounces@dpdk.org
It is necessary to make necessary modifications to
existing files based on the newly introduced content
Signed-off-by: Hanxiao Li
---
MAINTAINERS | 3 ++
drivers/common/zsda/zsda_common.h | 50 +++++++++++++++++++++++
drivers/common/zsda/zsda_device.c | 42 +++-----------------
drivers/common/zsda/zsda_device.h | 19 +++++++--
drivers/common/zsda/zsda_qp.c | 66 ++++++++++++++++++++++++++++++-
lib/cryptodev/rte_crypto_sym.h | 4 +-
6 files changed, 141 insertions(+), 43 deletions(-)
--
2.27.0
diff --git a/MAINTAINERS b/MAINTAINERS
index ea245fc61b..9e66c72c45 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1221,6 +1221,9 @@ F: drivers/crypto/virtio/
F: doc/guides/cryptodevs/virtio.rst
F: doc/guides/cryptodevs/features/virtio.ini
+ZTE Storage Data Accelerator
+M: Hanxiao Li
+F: drivers/crypto/zsda/
Compression Drivers
-------------------
diff --git a/drivers/common/zsda/zsda_common.h b/drivers/common/zsda/zsda_common.h
index 0dbc9b7d3c..d50a152307 100644
--- a/drivers/common/zsda/zsda_common.h
+++ b/drivers/common/zsda/zsda_common.h
@@ -97,17 +97,39 @@
enum zsda_service_type {
ZSDA_SERVICE_COMPRESSION = 0,
ZSDA_SERVICE_DECOMPRESSION,
+ ZSDA_SERVICE_SYMMETRIC_ENCRYPT,
+ ZSDA_SERVICE_SYMMETRIC_DECRYPT,
+ ZSDA_SERVICE_HASH_ENCODE = 6,
ZSDA_SERVICE_INVALID,
};
#define ZSDA_MAX_SERVICES (ZSDA_SERVICE_INVALID)
+#define ZSDA_OPC_EC_AES_XTS_256 0x0 /* Encry AES-XTS-256 */
+#define ZSDA_OPC_EC_AES_XTS_512 0x01 /* Encry AES-XTS-512 */
+#define ZSDA_OPC_EC_SM4_XTS_256 0x02 /* Encry SM4-XTS-256 */
+#define ZSDA_OPC_DC_AES_XTS_256 0x08 /* Decry AES-XTS-256 */
+#define ZSDA_OPC_DC_AES_XTS_512 0x09 /* Decry AES-XTS-512 */
+#define ZSDA_OPC_DC_SM4_XTS_256 0x0A /* Decry SM4-XTS-256 */
#define ZSDA_OPC_COMP_GZIP 0x10 /* Encomp deflate-Gzip */
#define ZSDA_OPC_COMP_ZLIB 0x11 /* Encomp deflate-Zlib */
#define ZSDA_OPC_DECOMP_GZIP 0x18 /* Decompinfalte-Gzip */
#define ZSDA_OPC_DECOMP_ZLIB 0x19 /* Decompinfalte-Zlib */
+#define ZSDA_OPC_HASH_SHA1 0x20 /* Hash-SHA1 */
+#define ZSDA_OPC_HASH_SHA2_224 0x21 /* Hash-SHA2-224 */
+#define ZSDA_OPC_HASH_SHA2_256 0x22 /* Hash-SHA2-256 */
+#define ZSDA_OPC_HASH_SHA2_384 0x23 /* Hash-SHA2-384 */
+#define ZSDA_OPC_HASH_SHA2_512 0x24 /* Hash-SHA2-512 */
+#define ZSDA_OPC_HASH_SM3 0x25 /* Hash-SM3 */
#define ZSDA_OPC_INVALID 0xff
+#define ZSDA_DIGEST_SIZE_SHA1 (20)
+#define ZSDA_DIGEST_SIZE_SHA2_224 (28)
+#define ZSDA_DIGEST_SIZE_SHA2_256 (32)
+#define ZSDA_DIGEST_SIZE_SHA2_384 (48)
+#define ZSDA_DIGEST_SIZE_SHA2_512 (64)
+#define ZSDA_DIGEST_SIZE_SM3 (32)
+
#define SET_CYCLE 0xff
#define SET_HEAD_INTI 0x0
@@ -237,9 +259,34 @@ struct zsda_op_cookie {
uint8_t comp_head[COMP_REMOVE_SPACE_LEN];
} __rte_packed;
+#define ZSDA_CIPHER_KEY_MAX_LEN 64
+struct crypto_cfg {
+ uint8_t slba_L[8];
+ uint8_t key[ZSDA_CIPHER_KEY_MAX_LEN];
+ uint8_t lbads : 4;
+ uint8_t resv1 : 4;
+ uint8_t resv2[7];
+ uint8_t slba_H[8];
+ uint8_t resv3[8];
+} __rte_packed;
+
struct compress_cfg {
} __rte_packed;
+struct zsda_wqe_crpt {
+ uint8_t valid;
+ uint8_t op_code;
+ uint16_t sid;
+ uint8_t resv[3];
+ uint8_t rx_sgl_type : 4;
+ uint8_t tx_sgl_type : 4;
+ uint64_t rx_addr;
+ uint32_t rx_length;
+ uint64_t tx_addr;
+ uint32_t tx_length;
+ struct crypto_cfg cfg;
+} __rte_packed;
+
struct zsda_wqe_comp {
uint8_t valid;
uint8_t op_code;
@@ -281,6 +328,9 @@ struct zsda_common_stat {
enum zsda_algo_core {
ZSDA_CORE_COMP,
ZSDA_CORE_DECOMP,
+ ZSDA_CORE_ENCRY,
+ ZSDA_CORE_DECRY,
+ ZSDA_CORE_HASH,
ZSDA_CORE_INVALID,
};
diff --git a/drivers/common/zsda/zsda_device.c b/drivers/common/zsda/zsda_device.c
index de8894f5a3..4ddc97e564 100644
--- a/drivers/common/zsda/zsda_device.c
+++ b/drivers/common/zsda/zsda_device.c
@@ -7,6 +7,7 @@
#include
#include "zsda_device.h"
+#include "zsda_qp.h"
/* per-process array of device data */
struct zsda_device_info zsda_devs[RTE_PMD_ZSDA_MAX_PCI_DEVICES];
@@ -306,7 +307,8 @@ zsda_pci_device_release(const struct rte_pci_device *pci_dev)
inst = &zsda_devs[zsda_pci_dev->zsda_dev_id];
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- if (zsda_pci_dev->comp_dev != NULL) {
+ if ((zsda_pci_dev->sym_dev != NULL) ||
+ (zsda_pci_dev->comp_dev != NULL)) {
ZSDA_LOG(DEBUG, "ZSDA device %s is busy", name);
return -EBUSY;
}
@@ -322,47 +324,12 @@ static int
zsda_pci_dev_destroy(struct zsda_pci_device *zsda_pci_dev,
const struct rte_pci_device *pci_dev)
{
+ zsda_sym_dev_destroy(zsda_pci_dev);
zsda_comp_dev_destroy(zsda_pci_dev);
return zsda_pci_device_release(pci_dev);
}
-int
-zsda_get_queue_cfg_by_id(const struct zsda_pci_device *zsda_pci_dev,
- const uint8_t qid, struct qinfo *qcfg)
-{
- struct zsda_admin_req_qcfg req = {0};
- struct zsda_admin_resp_qcfg resp = {0};
- int ret = 0;
- struct rte_pci_device *pci_dev =
- zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
-
- if (qid >= MAX_QPS_ON_FUNCTION) {
- ZSDA_LOG(ERR, "qid beyond limit!");
- return ZSDA_FAILED;
- }
-
- zsda_admin_msg_init(pci_dev);
- req.msg_type = ZSDA_ADMIN_QUEUE_CFG_REQ;
- req.qid = qid;
-
- ret = zsda_send_admin_msg(pci_dev, &req, sizeof(req));
- if (ret) {
- ZSDA_LOG(ERR, "Failed! Send msg");
- return ret;
- }
-
- ret = zsda_recv_admin_msg(pci_dev, &resp, sizeof(resp));
- if (ret) {
- ZSDA_LOG(ERR, "Failed! Receive msg");
- return ret;
- }
-
- memcpy(qcfg, &resp.qcfg, sizeof(*qcfg));
-
- return ZSDA_SUCCESS;
-}
-
static int
zsda_unmask_flr(const struct zsda_pci_device *zsda_pci_dev)
{
@@ -432,6 +399,7 @@ zsda_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
return ret;
}
+ ret |= zsda_sym_dev_create(zsda_pci_dev);
ret |= zsda_comp_dev_create(zsda_pci_dev);
if (ret) {
diff --git a/drivers/common/zsda/zsda_device.h b/drivers/common/zsda/zsda_device.h
index 1b2ad0ce85..51ff741840 100644
--- a/drivers/common/zsda/zsda_device.h
+++ b/drivers/common/zsda/zsda_device.h
@@ -18,6 +18,12 @@ struct zsda_device_info {
struct rte_pci_device *pci_dev;
+ struct rte_device sym_rte_dev;
+ /**< This represents the crypto sym subset of this pci device.
+ * Register with this rather than with the one in
+ * pci_dev so that its driver can have a crypto-specific name
+ */
+
struct rte_device comp_rte_dev;
/**< This represents the compression subset of this pci device.
* Register with this rather than with the one in
@@ -27,6 +33,7 @@ struct zsda_device_info {
extern struct zsda_device_info zsda_devs[];
+struct zsda_sym_dev_private;
struct zsda_comp_dev_private;
struct zsda_qp_hw_data {
@@ -64,6 +71,10 @@ struct zsda_pci_device {
struct rte_pci_device *pci_dev;
+ /* Data relating to symmetric crypto service */
+ struct zsda_sym_dev_private *sym_dev;
+ /**< link back to cryptodev private data */
+
/* Data relating to compression service */
struct zsda_comp_dev_private *comp_dev;
/**< link back to compressdev private data */
@@ -79,7 +90,10 @@ struct zsda_pci_device *
zsda_get_zsda_dev_from_pci_dev(const struct rte_pci_device *pci_dev);
__rte_weak int
-zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev);
+zsda_sym_dev_create(struct zsda_pci_device *zsda_pci_dev);
+
+__rte_weak int
+zsda_sym_dev_destroy(struct zsda_pci_device *zsda_pci_dev);
__rte_weak int
zsda_comp_dev_create(struct zsda_pci_device *zsda_pci_dev);
@@ -87,9 +101,6 @@ zsda_comp_dev_create(struct zsda_pci_device *zsda_pci_dev);
__rte_weak int
zsda_comp_dev_destroy(struct zsda_pci_device *zsda_pci_dev);
-int zsda_get_queue_cfg_by_id(const struct zsda_pci_device *zsda_pci_dev,
- const uint8_t qid, struct qinfo *qcfg);
-
int zsda_queue_start(const struct rte_pci_device *pci_dev);
int zsda_queue_stop(const struct rte_pci_device *pci_dev);
int zsda_queue_clear(const struct rte_pci_device *pci_dev);
diff --git a/drivers/common/zsda/zsda_qp.c b/drivers/common/zsda/zsda_qp.c
index f2dfe43b2e..5c5ac90771 100644
--- a/drivers/common/zsda/zsda_qp.c
+++ b/drivers/common/zsda/zsda_qp.c
@@ -20,8 +20,11 @@ struct ring_size {
};
struct ring_size zsda_qp_hw_ring_size[ZSDA_MAX_SERVICES] = {
+ [ZSDA_SERVICE_SYMMETRIC_ENCRYPT] = {128, 16},
+ [ZSDA_SERVICE_SYMMETRIC_DECRYPT] = {128, 16},
[ZSDA_SERVICE_COMPRESSION] = {32, 16},
[ZSDA_SERVICE_DECOMPRESSION] = {32, 16},
+ [ZSDA_SERVICE_HASH_ENCODE] = {32, 16},
};
static void
@@ -36,6 +39,43 @@ zsda_set_queue_head_tail(const struct zsda_pci_device *zsda_pci_dev,
SET_HEAD_INTI);
}
+static int
+zsda_get_queue_cfg_by_id(const struct zsda_pci_device *zsda_pci_dev,
+ const uint8_t qid, struct qinfo *qcfg)
+{
+ struct zsda_admin_req_qcfg req = {0};
+ struct zsda_admin_resp_qcfg resp = {0};
+ int ret = 0;
+ struct rte_pci_device *pci_dev =
+ zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
+
+ if (qid >= MAX_QPS_ON_FUNCTION) {
+ ZSDA_LOG(ERR, "qid beyond limit!");
+ return ZSDA_FAILED;
+ }
+
+ zsda_admin_msg_init(pci_dev);
+ req.msg_type = ZSDA_ADMIN_QUEUE_CFG_REQ;
+ req.qid = qid;
+
+ ret = zsda_send_admin_msg(pci_dev, &req, sizeof(req));
+ if (ret) {
+ ZSDA_LOG(ERR, "Failed! Send msg");
+ return ret;
+ }
+
+ ret = zsda_recv_admin_msg(pci_dev, &resp, sizeof(resp));
+ if (ret) {
+ ZSDA_LOG(ERR, "Failed! Receive msg");
+ return ret;
+ }
+
+ memcpy(qcfg, &resp.qcfg, sizeof(*qcfg));
+
+ return ZSDA_SUCCESS;
+}
+
+
int
zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev)
{
@@ -43,7 +83,7 @@ zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev)
uint32_t index;
enum zsda_service_type type;
struct zsda_qp_hw *zsda_hw_qps = zsda_pci_dev->zsda_hw_qps;
- struct qinfo qcfg;
+ struct qinfo qcfg = {0};
int ret = 0;
for (i = 0; i < zsda_num_used_qps; i++) {
@@ -115,6 +155,30 @@ zsda_comp_max_nb_qps(const struct zsda_pci_device *zsda_pci_dev)
return min;
}
+uint16_t
+zsda_crypto_max_nb_qps(struct zsda_pci_device *zsda_pci_dev)
+{
+ uint16_t encrypt = zsda_qps_per_service(zsda_pci_dev,
+ ZSDA_SERVICE_SYMMETRIC_ENCRYPT);
+ uint16_t decrypt = zsda_qps_per_service(zsda_pci_dev,
+ ZSDA_SERVICE_SYMMETRIC_DECRYPT);
+ uint16_t hash =
+ zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_HASH_ENCODE);
+ uint16_t min = 0;
+
+ if ((encrypt == MAX_QPS_ON_FUNCTION) ||
+ (decrypt == MAX_QPS_ON_FUNCTION) ||
+ (hash == MAX_QPS_ON_FUNCTION))
+ min = MAX_QPS_ON_FUNCTION;
+ else {
+ min = (encrypt < decrypt) ? encrypt : decrypt;
+ min = (min < hash) ? min : hash;
+ }
+
+ if (min == 0)
+ return MAX_QPS_ON_FUNCTION;
+ return min;
+}
void
zsda_stats_get(void **queue_pairs, const uint32_t nb_queue_pairs,
diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index 53b18b9412..b34d041fe0 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -176,8 +176,10 @@ enum rte_crypto_cipher_algorithm {
/**< ShangMi 4 (SM4) algorithm in CTR mode */
RTE_CRYPTO_CIPHER_SM4_OFB,
/**< ShangMi 4 (SM4) algorithm in OFB mode */
- RTE_CRYPTO_CIPHER_SM4_CFB
+ RTE_CRYPTO_CIPHER_SM4_CFB,
/**< ShangMi 4 (SM4) algorithm in CFB mode */
+ RTE_CRYPTO_CIPHER_SM4_XTS
+ /**< ShangMi 4 (SM4) algorithm in XTS mode */
};
/** Symmetric Cipher Direction */
From patchwork Wed Sep 11 07:54:28 2024
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Hanxiao Li
X-Patchwork-Id: 143939
X-Patchwork-Delegate: gakhil@marvell.com
Return-Path:
X-Original-To: patchwork@inbox.dpdk.org
Delivered-To: patchwork@inbox.dpdk.org
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
by inbox.dpdk.org (Postfix) with ESMTP id 69D1F4595D;
Wed, 11 Sep 2024 09:55:32 +0200 (CEST)
Received: from mails.dpdk.org (localhost [127.0.0.1])
by mails.dpdk.org (Postfix) with ESMTP id 47F6E402D1;
Wed, 11 Sep 2024 09:55:32 +0200 (CEST)
Received: from mxhk.zte.com.cn (mxhk.zte.com.cn [63.216.63.40])
by mails.dpdk.org (Postfix) with ESMTP id 1E3EB402CE
for ; Wed, 11 Sep 2024 09:55:29 +0200 (CEST)
Received: from mse-fl1.zte.com.cn (unknown [10.5.228.132])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by mxhk.zte.com.cn (FangMail) with ESMTPS id 4X3XtN5YFTz8RTZQ
for ; Wed, 11 Sep 2024 15:55:24 +0800 (CST)
Received: from szxlzmapp04.zte.com.cn ([10.5.231.166])
by mse-fl1.zte.com.cn with SMTP id 48B7tGjg072148
for ; Wed, 11 Sep 2024 15:55:16 +0800 (+08)
(envelope-from li.hanxiao@zte.com.cn)
Received: from localhost.localdomain (unknown [192.168.6.15])
by smtp (Zmail) with SMTP; Wed, 11 Sep 2024 15:55:19 +0800
X-Zmail-TransId: 3e8166e14ce7000-08eac
From: Hanxiao Li
To: dev@dpdk.org
Cc: wang.yong19@zte.com.cn, Hanxiao Li
Subject: [PATCH v6 6/8] zsda: add zsda crypto-pmd
Date: Wed, 11 Sep 2024 15:54:28 +0800
Message-ID: <20240911075447.4074486-5-li.hanxiao@zte.com.cn>
X-Mailer: git-send-email 2.43.0
In-Reply-To: <20240911075447.4074486-1-li.hanxiao@zte.com.cn>
References: <20240911075200.4074366-1-li.hanxiao@zte.com.cn>
<20240911075447.4074486-1-li.hanxiao@zte.com.cn>
MIME-Version: 1.0
X-MAIL: mse-fl1.zte.com.cn 48B7tGjg072148
X-Fangmail-Anti-Spam-Filtered: true
X-Fangmail-MID-QID: 66E14CEC.000/4X3XtN5YFTz8RTZQ
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: dev-bounces@dpdk.org
Add new file zsda_sym_pmd.c, zsda_sym_pmd.h in drivers/crypto/zsda
Signed-off-by: Hanxiao Li
---
drivers/crypto/zsda/zsda_sym_capabilities.h | 112 +++++
drivers/crypto/zsda/zsda_sym_pmd.c | 429 ++++++++++++++++++++
drivers/crypto/zsda/zsda_sym_pmd.h | 35 ++
3 files changed, 576 insertions(+)
create mode 100644 drivers/crypto/zsda/zsda_sym_capabilities.h
create mode 100644 drivers/crypto/zsda/zsda_sym_pmd.c
create mode 100644 drivers/crypto/zsda/zsda_sym_pmd.h
--
2.27.0
diff --git a/drivers/crypto/zsda/zsda_sym_capabilities.h b/drivers/crypto/zsda/zsda_sym_capabilities.h
new file mode 100644
index 0000000000..dd387b36ad
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym_capabilities.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_SYM_CAPABILITIES_H_
+#define _ZSDA_SYM_CAPABILITIES_H_
+
+static const struct rte_cryptodev_capabilities zsda_crypto_sym_capabilities[] = {
+ {/* SHA1 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ { .sym = {.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ { .auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1,
+ .block_size = 64,
+ .key_size = {.min = 0, .max = 0, .increment = 0},
+ .digest_size = {.min = 20, .max = 20, .increment = 2},
+ .iv_size = {0} },
+ } },
+ }
+ },
+ {/* SHA224 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ { .sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ { .auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224,
+ .block_size = 64,
+ .key_size = {.min = 0, .max = 0, .increment = 0},
+ .digest_size = {.min = 28, .max = 28, .increment = 0},
+ .iv_size = {0} },
+ } },
+ }
+ },
+ {/* SHA256 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ { .sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ { .auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256,
+ .block_size = 64,
+ .key_size = {.min = 0, .max = 0, .increment = 0},
+ .digest_size = {.min = 32, .max = 32, .increment = 0},
+ .iv_size = {0} },
+ } },
+ }
+ },
+ {/* SHA384 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ { .sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ { .auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384,
+ .block_size = 128,
+ .key_size = {.min = 0, .max = 0, .increment = 0},
+ .digest_size = {.min = 48, .max = 48, .increment = 0},
+ .iv_size = {0} },
+ } },
+ }
+ },
+ {/* SHA512 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ { .sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ { .auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512,
+ .block_size = 128,
+ .key_size = {.min = 0, .max = 0, .increment = 0},
+ .digest_size = {.min = 64, .max = 64, .increment = 0},
+ .iv_size = {0} },
+ } },
+ }
+ },
+ {/* SM3 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ { .sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ { .auth = {
+ .algo = RTE_CRYPTO_AUTH_SM3,
+ .block_size = 64,
+ .key_size = {.min = 0, .max = 0, .increment = 0},
+ .digest_size = {.min = 32, .max = 32, .increment = 0},
+ .iv_size = {0} },
+ } },
+ }
+ },
+ {/* AES XTS */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ { .sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ { .cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_XTS,
+ .block_size = 16,
+ .key_size = {.min = 16, .max = 32, .increment = 16},
+ .iv_size = {.min = 16, .max = 16, .increment = 0} },
+ } },
+ }
+ },
+ {/* SM4 XTS */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ { .sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ { .cipher = {
+ .algo = RTE_CRYPTO_CIPHER_SM4_XTS,
+ .block_size = 16,
+ .key_size = {.min = 32, .max = 32, .increment = 0},
+ .iv_size = {.min = 16, .max = 16, .increment = 0} },
+ } },
+ }
+ }
+};
+#endif /* _ZSDA_SYM_CAPABILITIES_H_ */
+
diff --git a/drivers/crypto/zsda/zsda_sym_pmd.c b/drivers/crypto/zsda/zsda_sym_pmd.c
new file mode 100644
index 0000000000..ac5a63b96e
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym_pmd.c
@@ -0,0 +1,429 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include
+
+#include "cryptodev_pmd.h"
+#include "zsda_logs.h"
+#include "zsda_sym.h"
+#include "zsda_sym_pmd.h"
+#include "zsda_sym_session.h"
+#include "zsda_sym_capabilities.h"
+
+uint8_t zsda_sym_driver_id;
+
+static int
+zsda_sym_dev_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return ZSDA_SUCCESS;
+}
+
+static int zsda_sym_qp_release(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id);
+
+static int
+zsda_sym_dev_start(struct rte_cryptodev *dev)
+{
+ struct zsda_sym_dev_private *sym_dev = dev->data->dev_private;
+ int ret = 0;
+
+ ret = zsda_queue_start(sym_dev->zsda_pci_dev->pci_dev);
+
+ if (ret)
+ ZSDA_LOG(ERR, E_START_Q);
+ return ret;
+}
+
+static void
+zsda_sym_dev_stop(struct rte_cryptodev *dev)
+{
+ struct zsda_sym_dev_private *sym_dev = dev->data->dev_private;
+
+ zsda_queue_stop(sym_dev->zsda_pci_dev->pci_dev);
+}
+
+static int
+zsda_sym_dev_close(struct rte_cryptodev *dev)
+{
+ int ret = 0;
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++)
+ ret |= zsda_sym_qp_release(dev, i);
+
+ return ret;
+}
+
+static void
+zsda_sym_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct zsda_sym_dev_private *sym_priv = dev->data->dev_private;
+
+ if (info != NULL) {
+ info->max_nb_queue_pairs =
+ zsda_crypto_max_nb_qps(sym_priv->zsda_pci_dev);
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = sym_priv->zsda_dev_capabilities;
+ info->driver_id = zsda_sym_driver_id;
+ info->sym.max_nb_sessions = 0;
+ }
+}
+
+static void
+zsda_sym_stats_get(struct rte_cryptodev *dev, struct rte_cryptodev_stats *stats)
+{
+ struct zsda_common_stat comm = {0};
+
+ zsda_stats_get(dev->data->queue_pairs, dev->data->nb_queue_pairs,
+ &comm);
+ stats->enqueued_count = comm.enqueued_count;
+ stats->dequeued_count = comm.dequeued_count;
+ stats->enqueue_err_count = comm.enqueue_err_count;
+ stats->dequeue_err_count = comm.dequeue_err_count;
+}
+
+static void
+zsda_sym_stats_reset(struct rte_cryptodev *dev)
+{
+ zsda_stats_reset(dev->data->queue_pairs, dev->data->nb_queue_pairs);
+}
+
+static int
+zsda_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+ ZSDA_LOG(DEBUG, "Release sym qp %u on device %d", queue_pair_id,
+ dev->data->dev_id);
+
+ return zsda_queue_pair_release(
+ (struct zsda_qp **)&(dev->data->queue_pairs[queue_pair_id]));
+}
+
+static int
+zsda_setup_encrypto_queue(struct zsda_pci_device *zsda_pci_dev, uint16_t qp_id,
+ struct zsda_qp *qp, uint32_t nb_des, int socket_id)
+{
+ enum zsda_service_type type = ZSDA_SERVICE_SYMMETRIC_ENCRYPT;
+ struct zsda_qp_config conf;
+ int ret = 0;
+ struct zsda_qp_hw *qp_hw;
+
+ qp_hw = zsda_qps_hw_per_service(zsda_pci_dev, type);
+ conf.hw = qp_hw->data + qp_id;
+ conf.service_type = type;
+ conf.cookie_size = sizeof(struct zsda_op_cookie);
+ conf.nb_descriptors = nb_des;
+ conf.socket_id = socket_id;
+ conf.service_str = "sym_encrypt";
+
+ ret = zsda_common_setup_qp(zsda_pci_dev->zsda_dev_id, &qp, qp_id, &conf);
+ qp->srv[type].rx_cb = zsda_crypto_callback;
+ qp->srv[type].tx_cb = zsda_build_cipher_request;
+ qp->srv[type].match = zsda_encry_match;
+
+ return ret;
+}
+
+static int
+zsda_setup_decrypto_queue(struct zsda_pci_device *zsda_pci_dev, uint16_t qp_id,
+ struct zsda_qp *qp, uint32_t nb_des, int socket_id)
+{
+ enum zsda_service_type type = ZSDA_SERVICE_SYMMETRIC_DECRYPT;
+ struct zsda_qp_config conf;
+ int ret = 0;
+ struct zsda_qp_hw *qp_hw;
+
+ qp_hw = zsda_qps_hw_per_service(zsda_pci_dev, type);
+ conf.hw = qp_hw->data + qp_id;
+ conf.service_type = type;
+
+ conf.cookie_size = sizeof(struct zsda_op_cookie);
+ conf.nb_descriptors = nb_des;
+ conf.socket_id = socket_id;
+ conf.service_str = "sym_decrypt";
+
+ ret = zsda_common_setup_qp(zsda_pci_dev->zsda_dev_id, &qp, qp_id, &conf);
+ qp->srv[type].rx_cb = zsda_crypto_callback;
+ qp->srv[type].tx_cb = zsda_build_cipher_request;
+ qp->srv[type].match = zsda_decry_match;
+
+ return ret;
+}
+
+static int
+zsda_setup_hash_queue(struct zsda_pci_device *zsda_pci_dev, uint16_t qp_id,
+ struct zsda_qp *qp, uint32_t nb_des, int socket_id)
+{
+ enum zsda_service_type type = ZSDA_SERVICE_HASH_ENCODE;
+ struct zsda_qp_config conf;
+ int ret = 0;
+ struct zsda_qp_hw *qp_hw;
+
+ qp_hw = zsda_qps_hw_per_service(zsda_pci_dev, type);
+ conf.hw = qp_hw->data + qp_id;
+ conf.service_type = type;
+ conf.cookie_size = sizeof(struct zsda_op_cookie);
+ conf.nb_descriptors = nb_des;
+ conf.socket_id = socket_id;
+ conf.service_str = "sym_hash";
+
+ ret = zsda_common_setup_qp(zsda_pci_dev->zsda_dev_id, &qp, qp_id, &conf);
+ qp->srv[type].rx_cb = zsda_crypto_callback;
+ qp->srv[type].tx_cb = zsda_build_hash_request;
+ qp->srv[type].match = zsda_hash_match;
+
+ return ret;
+}
+
+static int
+zsda_sym_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ int ret = 0;
+ struct zsda_qp *qp_new;
+
+ struct zsda_qp **qp_addr =
+ (struct zsda_qp **)&(dev->data->queue_pairs[qp_id]);
+ struct zsda_sym_dev_private *sym_priv = dev->data->dev_private;
+ struct zsda_pci_device *zsda_pci_dev = sym_priv->zsda_pci_dev;
+ uint16_t num_qps_encrypt = zsda_qps_per_service(
+ zsda_pci_dev, ZSDA_SERVICE_SYMMETRIC_ENCRYPT);
+ uint16_t num_qps_decrypt = zsda_qps_per_service(
+ zsda_pci_dev, ZSDA_SERVICE_SYMMETRIC_DECRYPT);
+ uint16_t num_qps_hash = zsda_qps_per_service(
+ zsda_pci_dev, ZSDA_SERVICE_HASH_ENCODE);
+
+ uint32_t nb_des = qp_conf->nb_descriptors;
+ nb_des = (nb_des == NB_DES) ? nb_des : NB_DES;
+
+ if (*qp_addr != NULL) {
+ ret = zsda_sym_qp_release(dev, qp_id);
+ if (ret)
+ return ret;
+ }
+
+ qp_new = rte_zmalloc_socket("zsda PMD qp metadata", sizeof(*qp_new),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp_new == NULL) {
+ ZSDA_LOG(ERR, "Failed to alloc mem for qp struct");
+ return -ENOMEM;
+ }
+
+ if (num_qps_encrypt == MAX_QPS_ON_FUNCTION)
+ ret = zsda_setup_encrypto_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+ socket_id);
+ else if (num_qps_decrypt == MAX_QPS_ON_FUNCTION)
+ ret = zsda_setup_decrypto_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+ socket_id);
+ else if (num_qps_hash == MAX_QPS_ON_FUNCTION)
+ ret = zsda_setup_hash_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+ socket_id);
+ else {
+ ret = zsda_setup_encrypto_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+ socket_id);
+ ret |= zsda_setup_decrypto_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+ socket_id);
+ ret |= zsda_setup_hash_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+ socket_id);
+ }
+
+ if (ret) {
+ rte_free(qp_new);
+ return ret;
+ }
+
+ qp_new->mmap_bar_addr =
+ sym_priv->zsda_pci_dev->pci_dev->mem_resource[0].addr;
+ *qp_addr = qp_new;
+
+ return ret;
+}
+
+static unsigned int
+zsda_sym_session_get_private_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return RTE_ALIGN_CEIL(sizeof(struct zsda_sym_session), 8);
+}
+
+static int
+zsda_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess)
+{
+ void *sess_private_data;
+ int ret = 0;
+
+ if (unlikely(sess == NULL)) {
+ ZSDA_LOG(ERR, "Invalid session struct");
+ return -EINVAL;
+ }
+
+ sess_private_data = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
+
+ ret = zsda_crypto_set_session_parameters(
+ sess_private_data, xform);
+
+ if (ret != 0) {
+ ZSDA_LOG(ERR, "Failed configure session parameters");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+zsda_sym_session_clear(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_sym_session *sess __rte_unused)
+{}
+
+static struct rte_cryptodev_ops crypto_zsda_ops = {
+
+ .dev_configure = zsda_sym_dev_config,
+ .dev_start = zsda_sym_dev_start,
+ .dev_stop = zsda_sym_dev_stop,
+ .dev_close = zsda_sym_dev_close,
+ .dev_infos_get = zsda_sym_dev_info_get,
+
+ .stats_get = zsda_sym_stats_get,
+ .stats_reset = zsda_sym_stats_reset,
+ .queue_pair_setup = zsda_sym_qp_setup,
+ .queue_pair_release = zsda_sym_qp_release,
+
+ .sym_session_get_size = zsda_sym_session_get_private_size,
+ .sym_session_configure = zsda_sym_session_configure,
+ .sym_session_clear = zsda_sym_session_clear,
+};
+
+static uint16_t
+zsda_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return zsda_enqueue_op_burst((struct zsda_qp *)qp, (void **)ops,
+ nb_ops);
+}
+
+static uint16_t
+zsda_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return zsda_dequeue_op_burst((struct zsda_qp *)qp, (void **)ops,
+ nb_ops);
+}
+
+static const char zsda_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_ZSDA_SYM_PMD);
+static const struct rte_driver cryptodev_zsda_sym_driver = {
+ .name = zsda_sym_drv_name, .alias = zsda_sym_drv_name};
+
+int
+zsda_sym_dev_create(struct zsda_pci_device *zsda_pci_dev)
+{
+ int ret = 0;
+ struct zsda_device_info *dev_info =
+ &zsda_devs[zsda_pci_dev->zsda_dev_id];
+
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = (int)rte_socket_id(),
+ .private_data_size = sizeof(struct zsda_sym_dev_private)};
+
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct rte_cryptodev *cryptodev;
+ struct zsda_sym_dev_private *sym_priv;
+ const struct rte_cryptodev_capabilities *capabilities;
+ uint64_t capa_size;
+
+ init_params.max_nb_queue_pairs = zsda_crypto_max_nb_qps(zsda_pci_dev);
+ snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", zsda_pci_dev->name,
+ "sym_encrypt");
+ ZSDA_LOG(DEBUG, "Creating ZSDA SYM device %s", name);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return ZSDA_SUCCESS;
+
+ dev_info->sym_rte_dev.driver = &cryptodev_zsda_sym_driver;
+ dev_info->sym_rte_dev.numa_node = dev_info->pci_dev->device.numa_node;
+ dev_info->sym_rte_dev.devargs = NULL;
+
+ cryptodev = rte_cryptodev_pmd_create(name, &(dev_info->sym_rte_dev),
+ &init_params);
+
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ dev_info->sym_rte_dev.name = cryptodev->data->name;
+ cryptodev->driver_id = zsda_sym_driver_id;
+
+ cryptodev->dev_ops = &crypto_zsda_ops;
+
+ cryptodev->enqueue_burst = zsda_sym_pmd_enqueue_op_burst;
+ cryptodev->dequeue_burst = zsda_sym_pmd_dequeue_op_burst;
+
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED;
+
+ sym_priv = cryptodev->data->dev_private;
+ sym_priv->zsda_pci_dev = zsda_pci_dev;
+ capabilities = zsda_crypto_sym_capabilities;
+ capa_size = sizeof(zsda_crypto_sym_capabilities);
+
+ snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN, "ZSDA_SYM_CAPA");
+
+ sym_priv->capa_mz = rte_memzone_lookup(capa_memz_name);
+ if (sym_priv->capa_mz == NULL)
+ sym_priv->capa_mz = rte_memzone_reserve(
+ capa_memz_name, capa_size, rte_socket_id(), 0);
+
+ if (sym_priv->capa_mz == NULL) {
+ ZSDA_LOG(ERR, E_MALLOC);
+ ret = -EFAULT;
+ goto error;
+ }
+
+ memcpy(sym_priv->capa_mz->addr, capabilities, capa_size);
+ sym_priv->zsda_dev_capabilities = sym_priv->capa_mz->addr;
+
+ zsda_pci_dev->sym_dev = sym_priv;
+
+ return ZSDA_SUCCESS;
+
+error:
+
+ rte_cryptodev_pmd_destroy(cryptodev);
+ memset(&dev_info->sym_rte_dev, 0, sizeof(dev_info->sym_rte_dev));
+
+ return ret;
+}
+
+int
+zsda_sym_dev_destroy(struct zsda_pci_device *zsda_pci_dev)
+{
+ struct rte_cryptodev *cryptodev;
+
+ if (zsda_pci_dev == NULL)
+ return -ENODEV;
+ if (zsda_pci_dev->sym_dev == NULL)
+ return ZSDA_SUCCESS;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_memzone_free(zsda_pci_dev->sym_dev->capa_mz);
+
+ cryptodev = rte_cryptodev_pmd_get_dev(zsda_pci_dev->zsda_dev_id);
+
+ rte_cryptodev_pmd_destroy(cryptodev);
+ zsda_devs[zsda_pci_dev->zsda_dev_id].sym_rte_dev.name = NULL;
+ zsda_pci_dev->sym_dev = NULL;
+
+ return ZSDA_SUCCESS;
+}
+
+static struct cryptodev_driver zsda_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(zsda_crypto_drv, cryptodev_zsda_sym_driver,
+ zsda_sym_driver_id);
diff --git a/drivers/crypto/zsda/zsda_sym_pmd.h b/drivers/crypto/zsda/zsda_sym_pmd.h
new file mode 100644
index 0000000000..77175fed47
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym_pmd.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_SYM_PMD_H_
+#define _ZSDA_SYM_PMD_H_
+
+#include "zsda_device.h"
+
+/** ZSDA Symmetric Crypto PMD driver name */
+#define CRYPTODEV_NAME_ZSDA_SYM_PMD crypto_zsda
+
+extern uint8_t zsda_sym_driver_id;
+
+/** private data structure for a ZSDA device.
+ * This ZSDA device is a device offering only symmetric crypto service,
+ * there can be one of these on each zsda_pci_device (VF).
+ */
+struct zsda_sym_dev_private {
+ struct zsda_pci_device *zsda_pci_dev;
+ /**< The zsda pci device hosting the service */
+
+ const struct rte_cryptodev_capabilities *zsda_dev_capabilities;
+ /* ZSDA device symmetric crypto capabilities */
+ const struct rte_memzone *capa_mz;
+ /* Shared memzone for storing capabilities */
+ uint16_t min_enq_burst_threshold;
+ uint32_t internal_capabilities; /* see flags ZSDA_SYM_CAP_xxx */
+};
+
+int zsda_sym_dev_create(struct zsda_pci_device *zsda_pci_dev);
+
+int zsda_sym_dev_destroy(struct zsda_pci_device *zsda_pci_dev);
+
+#endif /* _ZSDA_SYM_PMD_H_ */
From patchwork Wed Sep 11 07:54:29 2024
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Hanxiao Li
X-Patchwork-Id: 143942
X-Patchwork-Delegate: gakhil@marvell.com
Return-Path:
X-Original-To: patchwork@inbox.dpdk.org
Delivered-To: patchwork@inbox.dpdk.org
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
by inbox.dpdk.org (Postfix) with ESMTP id 440D94595D;
Wed, 11 Sep 2024 09:56:01 +0200 (CEST)
Received: from mails.dpdk.org (localhost [127.0.0.1])
by mails.dpdk.org (Postfix) with ESMTP id 5D76243251;
Wed, 11 Sep 2024 09:55:40 +0200 (CEST)
Received: from mxhk.zte.com.cn (mxhk.zte.com.cn [63.216.63.40])
by mails.dpdk.org (Postfix) with ESMTP id 13D8643248
for ; Wed, 11 Sep 2024 09:55:38 +0200 (CEST)
Received: from mxct.zte.com.cn (unknown [192.168.251.13])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by mxhk.zte.com.cn (FangMail) with ESMTPS id 4X3XtZ0jvvz8RTZM
for ; Wed, 11 Sep 2024 15:55:34 +0800 (CST)
Received: from mse-fl2.zte.com.cn (unknown [10.5.228.133])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by mxct.zte.com.cn (FangMail) with ESMTPS id 4X3XtX1KQ7z501bd
for ; Wed, 11 Sep 2024 15:55:32 +0800 (CST)
Received: from szxlzmapp04.zte.com.cn ([10.5.231.166])
by mse-fl2.zte.com.cn with SMTP id 48B7tH1r069303
for ; Wed, 11 Sep 2024 15:55:17 +0800 (+08)
(envelope-from li.hanxiao@zte.com.cn)
Received: from localhost.localdomain (unknown [192.168.6.15])
by smtp (Zmail) with SMTP; Wed, 11 Sep 2024 15:55:19 +0800
X-Zmail-TransId: 3e8166e14ce7000-08eae
From: Hanxiao Li
To: dev@dpdk.org
Cc: wang.yong19@zte.com.cn, Hanxiao Li
Subject: [PATCH v6 7/8] zsda: add zsda crypto-sym
Date: Wed, 11 Sep 2024 15:54:29 +0800
Message-ID: <20240911075447.4074486-6-li.hanxiao@zte.com.cn>
X-Mailer: git-send-email 2.43.0
In-Reply-To: <20240911075447.4074486-1-li.hanxiao@zte.com.cn>
References: <20240911075200.4074366-1-li.hanxiao@zte.com.cn>
<20240911075447.4074486-1-li.hanxiao@zte.com.cn>
MIME-Version: 1.0
X-MAIL: mse-fl2.zte.com.cn 48B7tH1r069303
X-Fangmail-Anti-Spam-Filtered: true
X-Fangmail-MID-QID: 66E14CF6.000/4X3XtZ0jvvz8RTZM
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: dev-bounces@dpdk.org
Add new file zsda_sym.c, zsda_sym.h in drivers/crypto/zsda
Signed-off-by: Hanxiao Li
---
drivers/crypto/zsda/zsda_sym.c | 286 +++++++++++++++++++++++++++++++++
drivers/crypto/zsda/zsda_sym.h | 25 +++
2 files changed, 311 insertions(+)
create mode 100644 drivers/crypto/zsda/zsda_sym.c
create mode 100644 drivers/crypto/zsda/zsda_sym.h
--
2.27.0
diff --git a/drivers/crypto/zsda/zsda_sym.c b/drivers/crypto/zsda/zsda_sym.c
new file mode 100644
index 0000000000..425cb36643
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym.c
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include "cryptodev_pmd.h"
+
+#include "zsda_logs.h"
+#include "zsda_sym.h"
+#include "zsda_sym_pmd.h"
+#include "zsda_sym_session.h"
+
+#define choose_dst_mbuf(mbuf_src, mbuf_dst) ((mbuf_dst) == NULL ? (mbuf_src) : (mbuf_dst))
+#define LBADS_MAX_REMAINDER (16 - 1)
+
+void
+zsda_reverse_memcpy(uint8_t *dst, const uint8_t *src, size_t n)
+{
+ size_t i;
+
+ for (i = 0; i < n; ++i)
+ dst[n - 1 - i] = src[i];
+}
+
+static uint8_t
+zsda_get_opcode_hash(struct zsda_sym_session *sess)
+{
+ switch (sess->auth.algo) {
+ case RTE_CRYPTO_AUTH_SHA1:
+ return ZSDA_OPC_HASH_SHA1;
+
+ case RTE_CRYPTO_AUTH_SHA224:
+ return ZSDA_OPC_HASH_SHA2_224;
+
+ case RTE_CRYPTO_AUTH_SHA256:
+ return ZSDA_OPC_HASH_SHA2_256;
+
+ case RTE_CRYPTO_AUTH_SHA384:
+ return ZSDA_OPC_HASH_SHA2_384;
+
+ case RTE_CRYPTO_AUTH_SHA512:
+ return ZSDA_OPC_HASH_SHA2_512;
+
+ case RTE_CRYPTO_AUTH_SM3:
+ return ZSDA_OPC_HASH_SM3;
+ default:
+ break;
+ }
+
+ return ZSDA_OPC_INVALID;
+}
+
+static uint8_t
+zsda_get_opcode_crypto(struct zsda_sym_session *sess)
+{
+
+ if (sess->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ if (sess->cipher.algo == RTE_CRYPTO_CIPHER_AES_XTS &&
+ sess->cipher.key_encry.length == 16)
+ return ZSDA_OPC_EC_AES_XTS_256;
+ else if (sess->cipher.algo == RTE_CRYPTO_CIPHER_AES_XTS &&
+ sess->cipher.key_encry.length == 32)
+ return ZSDA_OPC_EC_AES_XTS_512;
+ else if (sess->cipher.algo == RTE_CRYPTO_CIPHER_SM4_XTS)
+ return ZSDA_OPC_EC_SM4_XTS_256;
+ } else if (sess->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ if (sess->cipher.algo == RTE_CRYPTO_CIPHER_AES_XTS &&
+ sess->cipher.key_decry.length == 16)
+ return ZSDA_OPC_DC_AES_XTS_256;
+ else if (sess->cipher.algo == RTE_CRYPTO_CIPHER_AES_XTS &&
+ sess->cipher.key_decry.length == 32)
+ return ZSDA_OPC_DC_AES_XTS_512;
+ else if (sess->cipher.algo == RTE_CRYPTO_CIPHER_SM4_XTS)
+ return ZSDA_OPC_DC_SM4_XTS_256;
+ }
+ return ZSDA_OPC_INVALID;
+}
+
+int
+zsda_encry_match(const void *op_in)
+{
+ const struct rte_crypto_op *op = (const struct rte_crypto_op *)op_in;
+ struct rte_cryptodev_sym_session *session = op->sym->session;
+ struct zsda_sym_session *sess =
+ (struct zsda_sym_session *)session->driver_priv_data;
+
+ if (sess->chain_order == ZSDA_SYM_CHAIN_ONLY_CIPHER &&
+ sess->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ return 1;
+ else
+ return 0;
+}
+
+int
+zsda_decry_match(const void *op_in)
+{
+ const struct rte_crypto_op *op = (const struct rte_crypto_op *)op_in;
+ struct rte_cryptodev_sym_session *session = op->sym->session;
+ struct zsda_sym_session *sess =
+ (struct zsda_sym_session *)session->driver_priv_data;
+
+ if (sess->chain_order == ZSDA_SYM_CHAIN_ONLY_CIPHER &&
+ sess->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
+ return 1;
+ else
+ return 0;
+}
+
+int
+zsda_hash_match(const void *op_in)
+{
+ const struct rte_crypto_op *op = (const struct rte_crypto_op *)op_in;
+ struct rte_cryptodev_sym_session *session = op->sym->session;
+ struct zsda_sym_session *sess =
+ (struct zsda_sym_session *)session->driver_priv_data;
+
+ if (sess->chain_order == ZSDA_SYM_CHAIN_ONLY_AUTH)
+ return 1;
+ else
+ return 0;
+}
+
+static int
+zsda_check_len_lbads(uint32_t data_len, uint32_t lbads_size)
+{
+ if (data_len < 16) {
+ ZSDA_LOG(ERR, "data_len wrong!");
+ return ZSDA_FAILED;
+ }
+ if (lbads_size != 0) {
+ if (!(((data_len % lbads_size) == 0) ||
+ ((data_len % lbads_size) > LBADS_MAX_REMAINDER))) {
+ ZSDA_LOG(ERR, "data_len wrong!");
+ return ZSDA_FAILED;
+ }
+ }
+
+ return 0;
+}
+
+int
+zsda_build_cipher_request(void *op_in, const struct zsda_queue *queue,
+ void **op_cookies, const uint16_t new_tail)
+{
+ struct rte_crypto_op *op = (struct rte_crypto_op *)op_in;
+
+ struct rte_cryptodev_sym_session *session =
+ (struct rte_cryptodev_sym_session *)op->sym->session;
+ struct zsda_sym_session *sess =
+ (struct zsda_sym_session *)session->driver_priv_data;
+
+ struct zsda_wqe_crpt *wqe =
+ (struct zsda_wqe_crpt *)(queue->base_addr +
+ (new_tail * queue->msg_size));
+ struct zsda_op_cookie *cookie =
+ (struct zsda_op_cookie *)op_cookies[new_tail];
+ struct zsda_sgl *sgl_src = (struct zsda_sgl *)&cookie->sgl_src;
+ struct zsda_sgl *sgl_dst = (struct zsda_sgl *)&cookie->sgl_dst;
+ struct rte_mbuf *mbuf;
+
+ int ret = 0;
+ uint32_t op_offset;
+ uint32_t op_src_len;
+ uint32_t op_dst_len;
+ const uint8_t *iv_addr = NULL;
+ uint8_t iv_len = 0;
+
+ ret = zsda_check_len_lbads(op->sym->cipher.data.length,
+ sess->cipher.dataunit_len);
+ if (ret)
+ return ZSDA_FAILED;
+
+ op_offset = op->sym->cipher.data.offset;
+ op_src_len = op->sym->cipher.data.length;
+ mbuf = op->sym->m_src;
+ ret = zsda_fill_sgl(mbuf, op_offset, sgl_src, cookie->sgl_src_phys_addr,
+ op_src_len, NULL);
+
+ mbuf = choose_dst_mbuf(op->sym->m_src, op->sym->m_dst);
+ op_dst_len = mbuf->pkt_len - op_offset;
+ ret |= zsda_fill_sgl(mbuf, op_offset, sgl_dst,
+ cookie->sgl_dst_phys_addr, op_dst_len, NULL);
+
+ if (ret) {
+ ZSDA_LOG(ERR, E_FUNC);
+ return ret;
+ }
+
+ cookie->used = true;
+ cookie->sid = new_tail;
+ cookie->op = op;
+
+ memset(wqe, 0, sizeof(struct zsda_wqe_crpt));
+ wqe->rx_length = op_src_len;
+ wqe->tx_length = op_dst_len;
+ wqe->valid = queue->valid;
+ wqe->op_code = zsda_get_opcode_crypto(sess);
+ wqe->sid = cookie->sid;
+ wqe->rx_sgl_type = SGL_ELM_TYPE_LIST;
+ wqe->tx_sgl_type = SGL_ELM_TYPE_LIST;
+ wqe->rx_addr = cookie->sgl_src_phys_addr;
+ wqe->tx_addr = cookie->sgl_dst_phys_addr;
+ wqe->cfg.lbads = sess->cipher.dataunit_len;
+
+ if (sess->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ memcpy((uint8_t *)wqe->cfg.key, sess->cipher.key_encry.data,
+ ZSDA_CIPHER_KEY_MAX_LEN);
+ else
+ memcpy((uint8_t *)wqe->cfg.key, sess->cipher.key_decry.data,
+ ZSDA_CIPHER_KEY_MAX_LEN);
+
+ iv_addr = (const uint8_t *)rte_crypto_op_ctod_offset(
+ op, char *, sess->cipher.iv.offset);
+ iv_len = sess->cipher.iv.length;
+ zsda_reverse_memcpy((uint8_t *)wqe->cfg.slba_H, iv_addr, iv_len / 2);
+ zsda_reverse_memcpy((uint8_t *)wqe->cfg.slba_L, iv_addr + 8, iv_len / 2);
+
+ return ret;
+}
+
+int
+zsda_build_hash_request(void *op_in, const struct zsda_queue *queue,
+ void **op_cookies, const uint16_t new_tail)
+{
+ struct rte_crypto_op *op = (struct rte_crypto_op *)op_in;
+
+ struct rte_cryptodev_sym_session *session =
+ (struct rte_cryptodev_sym_session *)op->sym->session;
+ struct zsda_sym_session *sess =
+ (struct zsda_sym_session *)session->driver_priv_data;
+
+ struct zsda_wqe_crpt *wqe =
+ (struct zsda_wqe_crpt *)(queue->base_addr +
+ (new_tail * queue->msg_size));
+ struct zsda_op_cookie *cookie =
+ (struct zsda_op_cookie *)op_cookies[new_tail];
+ struct zsda_sgl *sgl_src = (struct zsda_sgl *)&cookie->sgl_src;
+ uint8_t opcode;
+ uint32_t op_offset;
+ uint32_t op_src_len;
+ int ret = 0;
+
+ memset(wqe, 0, sizeof(struct zsda_wqe_crpt));
+ wqe->rx_length = op->sym->auth.data.length;
+ wqe->tx_length = sess->auth.digest_length;
+
+ opcode = zsda_get_opcode_hash(sess);
+ if (opcode == ZSDA_OPC_INVALID) {
+ ZSDA_LOG(ERR, E_FUNC);
+ return ZSDA_FAILED;
+ }
+
+ op_offset = op->sym->auth.data.offset;
+ op_src_len = op->sym->auth.data.length;
+ ret = zsda_fill_sgl(op->sym->m_src, op_offset, sgl_src,
+ cookie->sgl_src_phys_addr, op_src_len, NULL);
+ if (ret) {
+ ZSDA_LOG(ERR, E_FUNC);
+ return ret;
+ }
+
+ cookie->used = true;
+ cookie->sid = new_tail;
+ cookie->op = op;
+ wqe->valid = queue->valid;
+ wqe->op_code = opcode;
+ wqe->sid = cookie->sid;
+ wqe->rx_sgl_type = SGL_ELM_TYPE_LIST;
+ wqe->tx_sgl_type = SGL_ELM_TYPE_PHYS_ADDR;
+ wqe->rx_addr = cookie->sgl_src_phys_addr;
+ wqe->tx_addr = op->sym->auth.digest.phys_addr;
+
+ return ret;
+}
+
+void
+zsda_crypto_callback(void *cookie_in, const struct zsda_cqe *cqe)
+{
+ struct zsda_op_cookie *tmp_cookie = (struct zsda_op_cookie *)cookie_in;
+ struct rte_crypto_op *op = (struct rte_crypto_op *)tmp_cookie->op;
+
+ if (!(CQE_ERR0(cqe->err0) || CQE_ERR1(cqe->err1)))
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ else
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+}
diff --git a/drivers/crypto/zsda/zsda_sym.h b/drivers/crypto/zsda/zsda_sym.h
new file mode 100644
index 0000000000..c822fd38f7
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_SYM_H_
+#define _ZSDA_SYM_H_
+
+#include "zsda_common.h"
+#include "zsda_qp.h"
+
+int zsda_build_cipher_request(void *op_in, const struct zsda_queue *queue,
+ void **op_cookies, const uint16_t new_tail);
+
+int zsda_build_hash_request(void *op_in, const struct zsda_queue *queue,
+ void **op_cookies, const uint16_t new_tail);
+
+int zsda_encry_match(const void *op_in);
+int zsda_decry_match(const void *op_in);
+int zsda_hash_match(const void *op_in);
+
+void zsda_reverse_memcpy(uint8_t *dst, const uint8_t *src, size_t n);
+
+void zsda_crypto_callback(void *cookie_in, const struct zsda_cqe *cqe);
+
+#endif /* _ZSDA_SYM_H_ */
From patchwork Wed Sep 11 07:54:30 2024
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
X-Patchwork-Submitter: Hanxiao Li
X-Patchwork-Id: 143941
X-Patchwork-Delegate: gakhil@marvell.com
Return-Path:
X-Original-To: patchwork@inbox.dpdk.org
Delivered-To: patchwork@inbox.dpdk.org
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
by inbox.dpdk.org (Postfix) with ESMTP id 8DDB54595D;
Wed, 11 Sep 2024 09:55:50 +0200 (CEST)
Received: from mails.dpdk.org (localhost [127.0.0.1])
by mails.dpdk.org (Postfix) with ESMTP id 3E28742FE5;
Wed, 11 Sep 2024 09:55:37 +0200 (CEST)
Received: from mxhk.zte.com.cn (mxhk.zte.com.cn [63.216.63.35])
by mails.dpdk.org (Postfix) with ESMTP id F1ADE42FE7
for ; Wed, 11 Sep 2024 09:55:33 +0200 (CEST)
Received: from mse-fl1.zte.com.cn (unknown [10.5.228.132])
(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
(No client certificate requested)
by mxhk.zte.com.cn (FangMail) with ESMTPS id 4X3XtY0B3tz5B1Fj
for ; Wed, 11 Sep 2024 15:55:33 +0800 (CST)
Received: from szxlzmapp06.zte.com.cn ([10.5.230.252])
by mse-fl1.zte.com.cn with SMTP id 48B7tHcL072149
for ; Wed, 11 Sep 2024 15:55:17 +0800 (+08)
(envelope-from li.hanxiao@zte.com.cn)
Received: from localhost.localdomain (unknown [192.168.6.15])
by smtp (Zmail) with SMTP; Wed, 11 Sep 2024 15:55:20 +0800
X-Zmail-TransId: 3e8166e14ce7000-08eb0
From: Hanxiao Li
To: dev@dpdk.org
Cc: wang.yong19@zte.com.cn, Hanxiao Li
Subject: [PATCH v6 8/8] zsda: add zsda crypto-session and compile file
Date: Wed, 11 Sep 2024 15:54:30 +0800
Message-ID: <20240911075447.4074486-7-li.hanxiao@zte.com.cn>
X-Mailer: git-send-email 2.43.0
In-Reply-To: <20240911075447.4074486-1-li.hanxiao@zte.com.cn>
References: <20240911075200.4074366-1-li.hanxiao@zte.com.cn>
<20240911075447.4074486-1-li.hanxiao@zte.com.cn>
MIME-Version: 1.0
X-MAIL: mse-fl1.zte.com.cn 48B7tHcL072149
X-Fangmail-Anti-Spam-Filtered: true
X-Fangmail-MID-QID: 66E14CF5.000/4X3XtY0B3tz5B1Fj
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions
List-Unsubscribe: ,
List-Archive:
List-Post:
List-Help:
List-Subscribe: ,
Errors-To: dev-bounces@dpdk.org
Add new file zsda_sym_session.c, zsda_symsession.h
and modify drivers/common/zsda/meson.build
Signed-off-by: Hanxiao Li
---
drivers/common/zsda/meson.build | 16 +-
drivers/crypto/zsda/zsda_sym_session.c | 503 +++++++++++++++++++++++++
drivers/crypto/zsda/zsda_sym_session.h | 82 ++++
3 files changed, 599 insertions(+), 2 deletions(-)
create mode 100644 drivers/crypto/zsda/zsda_sym_session.c
create mode 100644 drivers/crypto/zsda/zsda_sym_session.h
--
2.27.0
diff --git a/drivers/common/zsda/meson.build b/drivers/common/zsda/meson.build
index e2a214bbe8..ad205853f1 100644
--- a/drivers/common/zsda/meson.build
+++ b/drivers/common/zsda/meson.build
@@ -3,7 +3,7 @@
config_flag_fmt = 'RTE_LIBRTE_@0@_COMMON'
-deps += ['bus_pci', 'compressdev']
+deps += ['bus_pci', 'cryptodev', 'compressdev']
sources += files(
'zsda_common.c',
'zsda_logs.c',
@@ -15,7 +15,6 @@ zsda_compress = true
zsda_compress_path = 'compress/zsda'
zsda_compress_relpath = '../../' + zsda_compress_path
includes += include_directories(zsda_compress_relpath)
-
if zsda_compress
zlib = dependency('zlib', required: false, method: 'pkg-config')
foreach f: ['zsda_comp_pmd.c', 'zsda_comp.c']
@@ -23,3 +22,16 @@ if zsda_compress
endforeach
ext_deps += zlib
endif
+
+zsda_crypto = true
+zsda_crypto_path = 'crypto/zsda'
+zsda_crypto_relpath = '../../' + zsda_crypto_path
+if zsda_crypto
+ libcrypto = dependency('libcrypto', required: false, method: 'pkg-config')
+ foreach f: ['zsda_sym_pmd.c', 'zsda_sym_session.c', 'zsda_sym.c']
+ sources += files(join_paths(zsda_crypto_relpath, f))
+ endforeach
+ deps += ['security']
+ ext_deps += libcrypto
+ cflags += ['-DBUILD_ZSDA_SYM']
+endif
diff --git a/drivers/crypto/zsda/zsda_sym_session.c b/drivers/crypto/zsda/zsda_sym_session.c
new file mode 100644
index 0000000000..dbeb569985
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym_session.c
@@ -0,0 +1,503 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include "cryptodev_pmd.h"
+
+#include "zsda_sym_session.h"
+#include "zsda_logs.h"
+
+/**************** AES KEY EXPANSION ****************/
+/**
+ * AES S-boxes
+ * Sbox table: 8bits input convert to 8bits output
+ **/
+static const unsigned char aes_sbox[256] = {
+ /* 0 1 2 3 4 5 6 7 8 9 A B
+ * C D E F
+ */
+ 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b,
+ 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
+ 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26,
+ 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
+ 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2,
+ 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
+ 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed,
+ 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
+ 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f,
+ 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
+ 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec,
+ 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
+ 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14,
+ 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
+ 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d,
+ 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
+ 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f,
+ 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
+ 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11,
+ 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
+ 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f,
+ 0xb0, 0x54, 0xbb, 0x16};
+
+/**
+ * The round constant word array, Rcon[i]
+ *
+ * From Wikipedia's article on the Rijndael key schedule @
+ * https://en.wikipedia.org/wiki/Rijndael_key_schedule#Rcon "Only the first some
+ * of these constants are actually used – up to rcon[10] for AES-128 (as 11
+ * round keys are needed), up to rcon[8] for AES-192, up to rcon[7] for AES-256.
+ * rcon[0] is not used in AES algorithm."
+ */
+static const unsigned char Rcon[11] = {0x8d, 0x01, 0x02, 0x04, 0x08, 0x10,
+ 0x20, 0x40, 0x80, 0x1b, 0x36};
+
+#define GET_AES_SBOX_VAL(num) (aes_sbox[(num)])
+
+/**************** SM4 KEY EXPANSION ****************/
+/*
+ * 32-bit integer manipulation macros (big endian)
+ */
+#ifndef GET_ULONG_BE
+#define GET_ULONG_BE(n, b, i) \
+ { \
+ (n) = ((unsigned int)(b)[(i)] << 24) | \
+ ((unsigned int)(b)[(i) + 1] << 16) | \
+ ((unsigned int)(b)[(i) + 2] << 8) | \
+ ((unsigned int)(b)[(i) + 3]); \
+ }
+#endif
+
+#ifndef PUT_ULONG_BE
+#define PUT_ULONG_BE(n, b, i) \
+ { \
+ (b)[(i)] = (unsigned char)((n) >> 24); \
+ (b)[(i) + 1] = (unsigned char)((n) >> 16); \
+ (b)[(i) + 2] = (unsigned char)((n) >> 8); \
+ (b)[(i) + 3] = (unsigned char)((n)); \
+ }
+#endif
+
+/**
+ *rotate shift left marco definition
+ *
+ **/
+#define SHL(x, n) (((x)&0xFFFFFFFF) << n)
+#define ROTL(x, n) (SHL((x), n) | ((x) >> (32 - n)))
+
+/**
+ * SM4 S-boxes
+ * Sbox table: 8bits input convert to 8 bitg288s output
+ **/
+static unsigned char sm4_sbox[16][16] = {
+ {0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, 0x16, 0xb6, 0x14, 0xc2,
+ 0x28, 0xfb, 0x2c, 0x05},
+ {0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, 0xaa, 0x44, 0x13, 0x26,
+ 0x49, 0x86, 0x06, 0x99},
+ {0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a, 0x33, 0x54, 0x0b, 0x43,
+ 0xed, 0xcf, 0xac, 0x62},
+ {0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95, 0x80, 0xdf, 0x94, 0xfa,
+ 0x75, 0x8f, 0x3f, 0xa6},
+ {0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba, 0x83, 0x59, 0x3c, 0x19,
+ 0xe6, 0x85, 0x4f, 0xa8},
+ {0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b, 0xf8, 0xeb, 0x0f, 0x4b,
+ 0x70, 0x56, 0x9d, 0x35},
+ {0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2, 0x25, 0x22, 0x7c, 0x3b,
+ 0x01, 0x21, 0x78, 0x87},
+ {0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52, 0x4c, 0x36, 0x02, 0xe7,
+ 0xa0, 0xc4, 0xc8, 0x9e},
+ {0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5, 0xa3, 0xf7, 0xf2, 0xce,
+ 0xf9, 0x61, 0x15, 0xa1},
+ {0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55, 0xad, 0x93, 0x32, 0x30,
+ 0xf5, 0x8c, 0xb1, 0xe3},
+ {0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60, 0xc0, 0x29, 0x23, 0xab,
+ 0x0d, 0x53, 0x4e, 0x6f},
+ {0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f, 0x03, 0xff, 0x6a, 0x72,
+ 0x6d, 0x6c, 0x5b, 0x51},
+ {0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f, 0x11, 0xd9, 0x5c, 0x41,
+ 0x1f, 0x10, 0x5a, 0xd8},
+ {0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd, 0x2d, 0x74, 0xd0, 0x12,
+ 0xb8, 0xe5, 0xb4, 0xb0},
+ {0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e, 0x65, 0xb9, 0xf1, 0x09,
+ 0xc5, 0x6e, 0xc6, 0x84},
+ {0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20, 0x79, 0xee, 0x5f, 0x3e,
+ 0xd7, 0xcb, 0x39, 0x48},
+};
+
+/* System parameter */
+static const unsigned int FK[4] = {0xa3b1bac6, 0x56aa3350, 0x677d9197,
+ 0xb27022dc};
+
+/* fixed parameter */
+static const unsigned int CK[32] = {
+ 0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269, 0x70777e85, 0x8c939aa1,
+ 0xa8afb6bd, 0xc4cbd2d9, 0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
+ 0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9, 0xc0c7ced5, 0xdce3eaf1,
+ 0xf8ff060d, 0x141b2229, 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
+ 0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209, 0x10171e25, 0x2c333a41,
+ 0x484f565d, 0x646b7279};
+
+/*
+ * private function:
+ * look up in SM4 S-boxes and get the related value.
+ * args: [in] inch: 0x00~0xFF (8 bits unsigned value).
+ */
+static unsigned char
+sm4Sbox(unsigned char inch)
+{
+ unsigned char *pTable = (unsigned char *)sm4_sbox;
+ unsigned char retVal = (unsigned char)(pTable[inch]);
+ return retVal;
+}
+
+/* private function:
+ * Calculating round encryption key.
+ * args: [in] ka: ka is a 32 bits unsigned value;
+ * return: sk[i]: i{0,1,2,3,...31}.
+ */
+static unsigned int
+sm4CalciRK(unsigned int ka)
+{
+ unsigned int bb = 0;
+ unsigned int rk = 0;
+ unsigned char a[4];
+ unsigned char b[4];
+
+ PUT_ULONG_BE(ka, a, 0)
+ b[0] = sm4Sbox(a[0]);
+ b[1] = sm4Sbox(a[1]);
+ b[2] = sm4Sbox(a[2]);
+ b[3] = sm4Sbox(a[3]);
+ GET_ULONG_BE(bb, b, 0)
+ rk = bb ^ (ROTL(bb, 13)) ^ (ROTL(bb, 23));
+ return rk;
+}
+
+static void
+zsda_sm4_key_expansion(unsigned int SK[32], const uint8_t key[16])
+{
+ unsigned int MK[4];
+ unsigned int k[36];
+ unsigned int i = 0;
+
+ GET_ULONG_BE(MK[0], key, 0);
+ GET_ULONG_BE(MK[1], key, 4);
+ GET_ULONG_BE(MK[2], key, 8);
+ GET_ULONG_BE(MK[3], key, 12);
+ k[0] = MK[0] ^ FK[0];
+ k[1] = MK[1] ^ FK[1];
+ k[2] = MK[2] ^ FK[2];
+ k[3] = MK[3] ^ FK[3];
+ for (; i < 32; i++) {
+ k[i + 4] = k[i] ^
+ (sm4CalciRK(k[i + 1] ^ k[i + 2] ^ k[i + 3] ^ CK[i]));
+ SK[i] = k[i + 4];
+ }
+}
+
+static void
+u32_to_u8(uint32_t *u_int32_t_data, uint8_t *u8_data)
+{
+ *(u8_data + 0) = ((*u_int32_t_data & 0xFF000000) >> 24) & (0xFF);
+ *(u8_data + 1) = ((*u_int32_t_data & 0x00FF0000) >> 16) & (0xFF);
+ *(u8_data + 2) = ((*u_int32_t_data & 0x0000FF00) >> 8) & (0xFF);
+ *(u8_data + 3) = (*u_int32_t_data & 0x000000FF);
+}
+
+static void
+zsda_aes_key_expansion(uint8_t *round_key, uint32_t round_num,
+ const uint8_t *key, uint32_t key_len)
+{
+ uint32_t i, j, k, nk, nr;
+ uint8_t tempa[4];
+
+ nk = key_len >> 2;
+ nr = round_num;
+
+ /* The first round key is the key itself. */
+ for (i = 0; i < nk; ++i) {
+ round_key[(i * 4) + 0] = key[(i * 4) + 0];
+
+ round_key[(i * 4) + 1] = key[(i * 4) + 1];
+
+ round_key[(i * 4) + 2] = key[(i * 4) + 2];
+ round_key[(i * 4) + 3] = key[(i * 4) + 3];
+ }
+
+ /* All other round keys are found from the previous round keys. */
+ for (i = nk; i < (4 * (nr + 1)); ++i) {
+ k = (i - 1) * 4;
+ tempa[0] = round_key[k + 0];
+ tempa[1] = round_key[k + 1];
+ tempa[2] = round_key[k + 2];
+ tempa[3] = round_key[k + 3];
+
+ if ((nk != 0) && ((i % nk) == 0)) {
+ /* This function shifts the 4 bytes in a word to the
+ * left once. [a0,a1,a2,a3] becomes [a1,a2,a3,a0]
+ * Function RotWord()
+ */
+ {
+ const u_int8_t u8tmp = tempa[0];
+
+ tempa[0] = tempa[1];
+ tempa[1] = tempa[2];
+ tempa[2] = tempa[3];
+ tempa[3] = u8tmp;
+ }
+
+ /* SubWord() is a function that takes a four-byte input
+ * word and applies the S-box to each of the four bytes
+ * to produce an output word. Function Subword()
+ */
+ {
+ tempa[0] = GET_AES_SBOX_VAL(tempa[0]);
+ tempa[1] = GET_AES_SBOX_VAL(tempa[1]);
+ tempa[2] = GET_AES_SBOX_VAL(tempa[2]);
+ tempa[3] = GET_AES_SBOX_VAL(tempa[3]);
+ }
+
+ tempa[0] = tempa[0] ^ Rcon[i / nk];
+ }
+
+ if (nk == 8) {
+ if ((i % nk) == 4) {
+ /* Function Subword() */
+ {
+ tempa[0] = GET_AES_SBOX_VAL(tempa[0]);
+ tempa[1] = GET_AES_SBOX_VAL(tempa[1]);
+ tempa[2] = GET_AES_SBOX_VAL(tempa[2]);
+ tempa[3] = GET_AES_SBOX_VAL(tempa[3]);
+ }
+ }
+ }
+
+ j = i * 4;
+ k = (i - nk) * 4;
+ round_key[j + 0] = round_key[k + 0] ^ tempa[0];
+ round_key[j + 1] = round_key[k + 1] ^ tempa[1];
+ round_key[j + 2] = round_key[k + 2] ^ tempa[2];
+ round_key[j + 3] = round_key[k + 3] ^ tempa[3];
+ }
+}
+
+static void
+zsda_decry_set_key(uint8_t key[64], const uint8_t *key1_ptr, uint8_t skey_len,
+ enum rte_crypto_cipher_algorithm algo)
+{
+ uint8_t round_num;
+ uint8_t dec_key1[ZSDA_AES_MAX_KEY_BYTE_LEN] = {0};
+ uint8_t aes_round_key[ZSDA_AES_MAX_EXP_BYTE_SIZE] = {0};
+ uint32_t sm4_round_key[ZSDA_SM4_MAX_EXP_DWORD_SIZE] = {0};
+
+ switch (algo) {
+ case RTE_CRYPTO_CIPHER_AES_XTS:
+ round_num = (skey_len == ZSDA_SYM_XTS_256_SKEY_LEN)
+ ? ZSDA_AES256_ROUND_NUM
+ : ZSDA_AES512_ROUND_NUM;
+ zsda_aes_key_expansion(aes_round_key, round_num, key1_ptr,
+ skey_len);
+ rte_memcpy(dec_key1,
+ ((uint8_t *)aes_round_key + (16 * round_num)), 16);
+
+ if (skey_len == ZSDA_SYM_XTS_512_SKEY_LEN &&
+ (16 * round_num) <= ZSDA_AES_MAX_EXP_BYTE_SIZE) {
+ for (int i = 0; i < 16; i++) {
+ dec_key1[i + 16] =
+ aes_round_key[(16 * (round_num - 1)) + i];
+ }
+ }
+ break;
+ case RTE_CRYPTO_CIPHER_SM4_XTS:
+ zsda_sm4_key_expansion(sm4_round_key, key1_ptr);
+ for (size_t i = 0; i < 4; i++)
+ u32_to_u8((uint32_t *)sm4_round_key +
+ ZSDA_SM4_MAX_EXP_DWORD_SIZE - 1 - i,
+ dec_key1 + (4 * i));
+ break;
+ default:
+ ZSDA_LOG(ERR, "unknown cipher algo!");
+ return;
+ }
+
+ if (skey_len == ZSDA_SYM_XTS_256_SKEY_LEN) {
+ zsda_reverse_memcpy((uint8_t *)key + ZSDA_SYM_XTS_256_KEY2_OFF,
+ key1_ptr + skey_len, skey_len);
+ zsda_reverse_memcpy((uint8_t *)key + ZSDA_SYM_XTS_256_KEY1_OFF,
+ dec_key1, skey_len);
+ } else {
+ zsda_reverse_memcpy(key, key1_ptr + skey_len, skey_len);
+ zsda_reverse_memcpy((uint8_t *)key + ZSDA_SYM_XTS_512_KEY1_OFF,
+ dec_key1, skey_len);
+ }
+}
+
+static uint8_t
+zsda_sym_lbads(uint32_t dataunit_len)
+{
+ uint8_t lbads;
+
+ switch (dataunit_len) {
+ case ZSDA_AES_LBADS_512:
+ lbads = ZSDA_AES_LBADS_INDICATE_512;
+ break;
+ case ZSDA_AES_LBADS_4096:
+ lbads = ZSDA_AES_LBADS_INDICATE_4096;
+ break;
+ case ZSDA_AES_LBADS_8192:
+ lbads = ZSDA_AES_LBADS_INDICATE_8192;
+ break;
+ case ZSDA_AES_LBADS_0:
+ lbads = ZSDA_AES_LBADS_INDICATE_0;
+ break;
+ default:
+ ZSDA_LOG(ERR, "dataunit_len should be 0/512/4096/8192 - %d.",
+ dataunit_len);
+ lbads = ZSDA_AES_LBADS_INDICATE_INVALID;
+ break;
+ }
+ return lbads;
+}
+
+static int
+zsda_set_session_cipher(struct zsda_sym_session *sess,
+ struct rte_crypto_cipher_xform *cipher_xform)
+{
+ uint8_t skey_len = 0;
+ const uint8_t *key1_ptr = NULL;
+
+ if (cipher_xform->key.length > ZSDA_CIPHER_KEY_MAX_LEN) {
+ ZSDA_LOG(ERR, "key length not supported");
+ return -EINVAL;
+ }
+
+ sess->chain_order = ZSDA_SYM_CHAIN_ONLY_CIPHER;
+ sess->cipher.iv.offset = cipher_xform->iv.offset;
+ sess->cipher.iv.length = cipher_xform->iv.length;
+ sess->cipher.op = cipher_xform->op;
+ sess->cipher.algo = cipher_xform->algo;
+ sess->cipher.dataunit_len = cipher_xform->dataunit_len;
+ sess->cipher.lbads = zsda_sym_lbads(cipher_xform->dataunit_len);
+ if (sess->cipher.lbads == 0xff) {
+ ZSDA_LOG(ERR, "dataunit_len wrong!");
+ return -EINVAL;
+ }
+
+ skey_len = (cipher_xform->key.length / 2) & 0xff;
+
+ /* key set */
+ if (sess->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ sess->cipher.key_encry.length = cipher_xform->key.length;
+ if (skey_len == ZSDA_SYM_XTS_256_SKEY_LEN) {
+ zsda_reverse_memcpy((uint8_t *)sess->cipher.key_encry.data +
+ ZSDA_SYM_XTS_256_KEY2_OFF,
+ (cipher_xform->key.data + skey_len),
+ skey_len);
+ zsda_reverse_memcpy(((uint8_t *)sess->cipher.key_encry.data +
+ ZSDA_SYM_XTS_256_KEY1_OFF),
+ cipher_xform->key.data, skey_len);
+ } else
+ zsda_reverse_memcpy((uint8_t *)sess->cipher.key_encry.data,
+ cipher_xform->key.data,
+ cipher_xform->key.length);
+ } else if (sess->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ sess->cipher.key_decry.length = cipher_xform->key.length;
+ key1_ptr = cipher_xform->key.data;
+ zsda_decry_set_key(sess->cipher.key_decry.data, key1_ptr, skey_len,
+ sess->cipher.algo);
+ }
+
+ return 0;
+}
+
+static void
+zsda_set_session_auth(struct zsda_sym_session *sess,
+ struct rte_crypto_auth_xform *xform)
+{
+ sess->auth.op = xform->op;
+ sess->auth.algo = xform->algo;
+ sess->auth.digest_length = xform->digest_length;
+ sess->chain_order = ZSDA_SYM_CHAIN_ONLY_AUTH;
+}
+
+static struct rte_crypto_auth_xform *
+zsda_get_auth_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return &xform->auth;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+static struct rte_crypto_cipher_xform *
+zsda_get_cipher_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return &xform->cipher;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+/** Configure the session from a crypto xform chain */
+static enum zsda_sym_chain_order
+zsda_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+ enum zsda_sym_chain_order res = ZSDA_SYM_CHAIN_NOT_SUPPORTED;
+
+ if (xform != NULL) {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ res = ZSDA_SYM_CHAIN_ONLY_AUTH;
+ else if (xform->next->type ==
+ RTE_CRYPTO_SYM_XFORM_CIPHER)
+ res = ZSDA_SYM_CHAIN_AUTH_CIPHER;
+ }
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ res = ZSDA_SYM_CHAIN_ONLY_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ res = ZSDA_SYM_CHAIN_CIPHER_AUTH;
+ }
+ }
+
+ return res;
+}
+
+/* Set session cipher parameters */
+int
+zsda_crypto_set_session_parameters(void *sess_priv,
+ struct rte_crypto_sym_xform *xform)
+{
+
+ struct zsda_sym_session *sess = (struct zsda_sym_session *) sess_priv;
+ struct rte_crypto_cipher_xform *cipher_xform =
+ zsda_get_cipher_xform(xform);
+ struct rte_crypto_auth_xform *auth_xform =
+ zsda_get_auth_xform(xform);
+
+ int ret = 0;
+
+ sess->chain_order = zsda_crypto_get_chain_order(xform);
+ switch (sess->chain_order) {
+ case ZSDA_SYM_CHAIN_ONLY_CIPHER:
+ zsda_set_session_cipher(sess, cipher_xform);
+ break;
+ case ZSDA_SYM_CHAIN_ONLY_AUTH:
+ zsda_set_session_auth(sess, auth_xform);
+ break;
+
+ default:
+ ZSDA_LOG(ERR, "Invalid chain order");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
diff --git a/drivers/crypto/zsda/zsda_sym_session.h b/drivers/crypto/zsda/zsda_sym_session.h
new file mode 100644
index 0000000000..1797e46cb3
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym_session.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_SYM_SESSION_H_
+#define _ZSDA_SYM_SESSION_H_
+
+#include "zsda_sym.h"
+
+#define ZSDA_SYM_XTS_IV_SLBA_OFF (8)
+#define ZSDA_SYM_XTS_256_SKEY_LEN (16)
+#define ZSDA_SYM_XTS_512_SKEY_LEN (32)
+#define ZSDA_SYM_XTS_256_KEY2_OFF (16)
+#define ZSDA_SYM_XTS_256_KEY1_OFF (48)
+#define ZSDA_SYM_XTS_512_KEY1_OFF (32)
+#define ZSDA_SYM_MIN_SRC_LEN_HASH (16)
+
+#define ZSDA_AES256_ROUND_NUM (10)
+#define ZSDA_AES512_ROUND_NUM (14)
+#define ZSDA_AES_MAX_EXP_BYTE_SIZE (240)
+#define ZSDA_AES_MAX_KEY_BYTE_LEN (32)
+#define ZSDA_SM4_MAX_EXP_DWORD_SIZE (32)
+
+#define ZSDA_AES_LBADS_0 (0)
+#define ZSDA_AES_LBADS_512 (512)
+#define ZSDA_AES_LBADS_4096 (4096)
+#define ZSDA_AES_LBADS_8192 (8192)
+
+#define ZSDA_AES_LBADS_INDICATE_0 (0x0)
+#define ZSDA_AES_LBADS_INDICATE_512 (0x9)
+#define ZSDA_AES_LBADS_INDICATE_4096 (0xC)
+#define ZSDA_AES_LBADS_INDICATE_8192 (0xD)
+#define ZSDA_AES_LBADS_INDICATE_INVALID (0xff)
+
+enum zsda_sym_chain_order {
+ ZSDA_SYM_CHAIN_ONLY_CIPHER,
+ ZSDA_SYM_CHAIN_ONLY_AUTH,
+ ZSDA_SYM_CHAIN_CIPHER_AUTH,
+ ZSDA_SYM_CHAIN_AUTH_CIPHER,
+ ZSDA_SYM_CHAIN_NOT_SUPPORTED
+};
+
+struct __rte_cache_aligned zsda_sym_session {
+ enum zsda_sym_chain_order chain_order;
+
+ /* Cipher Parameters */
+ struct {
+ enum rte_crypto_cipher_operation op;
+ enum rte_crypto_cipher_algorithm algo;
+ struct {
+ uint8_t data[ZSDA_CIPHER_KEY_MAX_LEN];
+ size_t length;
+ } key_encry;
+ struct {
+ uint8_t data[ZSDA_CIPHER_KEY_MAX_LEN];
+ size_t length;
+ } key_decry;
+ struct {
+ uint32_t offset;
+ size_t length;
+ } iv;
+
+ uint32_t dataunit_len;
+ uint8_t lbads;
+ } cipher;
+
+ struct {
+ enum rte_crypto_auth_operation op;
+ /* Auth operation */
+ enum rte_crypto_auth_algorithm algo;
+ /* Auth algorithm */
+ uint16_t digest_length;
+ } auth;
+
+ bool cipher_first;
+};
+
+
+int zsda_crypto_set_session_parameters(void *sess_priv,
+ struct rte_crypto_sym_xform *xform);
+
+#endif /* _ZSDA_SYM_SESSION_H_ */