From patchwork Tue Jul 17 17:55:49 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Fiona Trahe X-Patchwork-Id: 43169 X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E615C2BD3; Tue, 17 Jul 2018 19:56:13 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by dpdk.org (Postfix) with ESMTP id 4807F160 for ; Tue, 17 Jul 2018 19:56:11 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga107.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 17 Jul 2018 10:56:09 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.51,366,1526367600"; d="scan'208";a="55071824" Received: from sivswdev01.ir.intel.com (HELO localhost.localdomain) ([10.237.217.45]) by fmsmga007.fm.intel.com with ESMTP; 17 Jul 2018 10:55:54 -0700 From: Fiona Trahe To: dev@dpdk.org Cc: pablo.de.lara.guarch@intel.com, fiona.trahe@intel.com, tomaszx.jozwiak@intel.com Date: Tue, 17 Jul 2018 18:55:49 +0100 Message-Id: <1531850150-21767-1-git-send-email-fiona.trahe@intel.com> X-Mailer: git-send-email 1.7.0.7 Subject: [dpdk-dev] [PATCH 1/2] common/qat: add sgl header X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch refactors the sgl struct so it includes a flexible array of flat buffers as sym and compress PMDs can have different size sgls. Signed-off-by: Tomasz Jozwiak Signed-off-by: Fiona Trahe --- drivers/common/qat/qat_common.c | 53 ++++++++++++++++++++++++++++++----------- drivers/common/qat/qat_common.h | 23 ++++++++++-------- drivers/crypto/qat/qat_sym.c | 12 ++++++---- drivers/crypto/qat/qat_sym.h | 14 +++++++++-- 4 files changed, 71 insertions(+), 31 deletions(-) diff --git a/drivers/common/qat/qat_common.c b/drivers/common/qat/qat_common.c index c206d3b..c25372d 100644 --- a/drivers/common/qat/qat_common.c +++ b/drivers/common/qat/qat_common.c @@ -8,40 +8,53 @@ int qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start, - struct qat_sgl *list, uint32_t data_len) + void *list_in, uint32_t data_len, + const int32_t max_segs) { int nr = 1; - - uint32_t buf_len = rte_pktmbuf_iova(buf) - - buf_start + rte_pktmbuf_data_len(buf); + struct qat_sgl *list = (struct qat_sgl *)list_in; + /* buf_start allows the first buffer to start at an address before or + * after the mbuf data start. It's used to either optimally align the + * dma to 64 or to start dma from an offset. + */ + uint32_t buf_len; + uint32_t first_buf_len = rte_pktmbuf_data_len(buf) + + (rte_pktmbuf_mtophys(buf) - buf_start); +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + uint8_t *virt_addr[max_segs]; + virt_addr[0] = rte_pktmbuf_mtod(buf, uint8_t*) + + (rte_pktmbuf_mtophys(buf) - buf_start); +#endif list->buffers[0].addr = buf_start; list->buffers[0].resrvd = 0; - list->buffers[0].len = buf_len; + list->buffers[0].len = first_buf_len; - if (data_len <= buf_len) { + if (data_len <= first_buf_len) { list->num_bufs = nr; list->buffers[0].len = data_len; - return 0; + goto sgl_end; } buf = buf->next; + buf_len = first_buf_len; while (buf) { - if (unlikely(nr == QAT_SGL_MAX_NUMBER)) { - QAT_LOG(ERR, - "QAT PMD exceeded size of QAT SGL entry(%u)", - QAT_SGL_MAX_NUMBER); + if (unlikely(nr == max_segs)) { + QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)", + max_segs); return -EINVAL; } list->buffers[nr].len = rte_pktmbuf_data_len(buf); list->buffers[nr].resrvd = 0; - list->buffers[nr].addr = rte_pktmbuf_iova(buf); - + list->buffers[nr].addr = rte_pktmbuf_mtophys(buf); +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + virt_addr[nr] = rte_pktmbuf_mtod(buf, uint8_t*); +#endif buf_len += list->buffers[nr].len; buf = buf->next; - if (buf_len > data_len) { + if (buf_len >= data_len) { list->buffers[nr].len -= buf_len - data_len; buf = NULL; @@ -50,6 +63,18 @@ qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start, } list->num_bufs = nr; +sgl_end: +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs); + for (uint8_t i = 0; i < list->num_bufs; i++) { + QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012lx", + i, list->buffers[i].len, + list->buffers[i].addr); + QAT_DP_HEXDUMP_LOG(DEBUG, "qat SGL", + virt_addr[i], list->buffers[i].len); + } +#endif + return 0; } diff --git a/drivers/common/qat/qat_common.h b/drivers/common/qat/qat_common.h index db85d54..e6da7fb 100644 --- a/drivers/common/qat/qat_common.h +++ b/drivers/common/qat/qat_common.h @@ -10,11 +10,6 @@ /**< Intel(R) QAT device name for PCI registration */ #define QAT_PCI_NAME qat -/* - * Maximum number of SGL entries - */ -#define QAT_SGL_MAX_NUMBER 16 - #define QAT_64_BTYE_ALIGN_MASK (~0x3f) /* Intel(R) QuickAssist Technology device generation is enumerated @@ -31,6 +26,7 @@ enum qat_service_type { QAT_SERVICE_COMPRESSION, QAT_SERVICE_INVALID }; + #define QAT_MAX_SERVICES (QAT_SERVICE_INVALID) /**< Common struct for scatter-gather list operations */ @@ -40,11 +36,17 @@ struct qat_flat_buf { uint64_t addr; } __rte_packed; +#define qat_sgl_hdr struct { \ + uint64_t resrvd; \ + uint32_t num_bufs; \ + uint32_t num_mapped_bufs; \ +} + +__extension__ struct qat_sgl { - uint64_t resrvd; - uint32_t num_bufs; - uint32_t num_mapped_bufs; - struct qat_flat_buf buffers[QAT_SGL_MAX_NUMBER]; + qat_sgl_hdr; + /* flexible array of flat buffers*/ + struct qat_flat_buf buffers[0]; } __rte_packed __rte_cache_aligned; /** Common, i.e. not service-specific, statistics */ @@ -64,7 +66,8 @@ struct qat_pci_device; int qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start, - struct qat_sgl *list, uint32_t data_len); + void *list_in, uint32_t data_len, + const int32_t max_segs); void qat_stats_get(struct qat_pci_device *dev, struct qat_common_stats *stats, diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c index 4ed7d95..8273968 100644 --- a/drivers/crypto/qat/qat_sym.c +++ b/drivers/crypto/qat/qat_sym.c @@ -495,8 +495,9 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg, ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags, QAT_COMN_PTR_TYPE_SGL); ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start, - &cookie->qat_sgl_src, - qat_req->comn_mid.src_length); + &cookie->qat_sgl_src, + qat_req->comn_mid.src_length, + QAT_SYM_SGL_MAX_NUMBER); if (unlikely(ret)) { QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array"); @@ -509,9 +510,10 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg, cookie->qat_sgl_src_phys_addr; else { ret = qat_sgl_fill_array(op->sym->m_dst, - dst_buf_start, - &cookie->qat_sgl_dst, - qat_req->comn_mid.dst_length); + dst_buf_start, + &cookie->qat_sgl_dst, + qat_req->comn_mid.dst_length, + QAT_SYM_SGL_MAX_NUMBER); if (unlikely(ret)) { QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array"); diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h index e4e1ae8..bc6426c 100644 --- a/drivers/crypto/qat/qat_sym.h +++ b/drivers/crypto/qat/qat_sym.h @@ -21,11 +21,21 @@ */ #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ +/* + * Maximum number of SGL entries + */ +#define QAT_SYM_SGL_MAX_NUMBER 16 + struct qat_sym_session; +struct qat_sym_sgl { + qat_sgl_hdr; + struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER]; +} __rte_packed __rte_cache_aligned; + struct qat_sym_op_cookie { - struct qat_sgl qat_sgl_src; - struct qat_sgl qat_sgl_dst; + struct qat_sym_sgl qat_sgl_src; + struct qat_sym_sgl qat_sgl_dst; phys_addr_t qat_sgl_src_phys_addr; phys_addr_t qat_sgl_dst_phys_addr; }; From patchwork Tue Jul 17 17:55:50 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Fiona Trahe X-Patchwork-Id: 43170 X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C4E762BE2; Tue, 17 Jul 2018 19:56:16 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by dpdk.org (Postfix) with ESMTP id 241762BDB for ; Tue, 17 Jul 2018 19:56:12 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga107.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 17 Jul 2018 10:56:09 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.51,366,1526367600"; d="scan'208";a="55071827" Received: from sivswdev01.ir.intel.com (HELO localhost.localdomain) ([10.237.217.45]) by fmsmga007.fm.intel.com with ESMTP; 17 Jul 2018 10:56:02 -0700 From: Fiona Trahe To: dev@dpdk.org Cc: pablo.de.lara.guarch@intel.com, fiona.trahe@intel.com, tomaszx.jozwiak@intel.com Date: Tue, 17 Jul 2018 18:55:50 +0100 Message-Id: <1531850150-21767-2-git-send-email-fiona.trahe@intel.com> X-Mailer: git-send-email 1.7.0.7 In-Reply-To: <1531850150-21767-1-git-send-email-fiona.trahe@intel.com> References: <1531850150-21767-1-git-send-email-fiona.trahe@intel.com> Subject: [dpdk-dev] [PATCH 2/2] compression/qat: add sgl feature X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch add sgl feature to QAT compression PMD Signed-off-by: Tomasz Jozwiak Signed-off-by: Fiona Trahe --- config/common_base | 1 + config/rte_config.h | 1 + doc/guides/compressdevs/features/qat.ini | 3 +++ doc/guides/compressdevs/qat_comp.rst | 2 -- drivers/compress/qat/qat_comp.c | 41 ++++++++++++++++++++++++++++---- drivers/compress/qat/qat_comp.h | 9 +++++++ drivers/compress/qat/qat_comp_pmd.c | 25 ++++++++++++++++++- 7 files changed, 75 insertions(+), 7 deletions(-) diff --git a/config/common_base b/config/common_base index a061c21..6d82b91 100644 --- a/config/common_base +++ b/config/common_base @@ -499,6 +499,7 @@ CONFIG_RTE_LIBRTE_PMD_QAT_SYM=n # Max. number of QuickAssist devices, which can be detected and attached # CONFIG_RTE_PMD_QAT_MAX_PCI_DEVICES=48 +CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS=16 # # Compile PMD for virtio crypto devices diff --git a/config/rte_config.h b/config/rte_config.h index 28f04b4..a8e4797 100644 --- a/config/rte_config.h +++ b/config/rte_config.h @@ -89,6 +89,7 @@ /* QuickAssist device */ /* Max. number of QuickAssist devices which can be attached */ #define RTE_PMD_QAT_MAX_PCI_DEVICES 48 +#define RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS 16 /* virtio crypto defines */ #define RTE_MAX_VIRTIO_CRYPTO 32 diff --git a/doc/guides/compressdevs/features/qat.ini b/doc/guides/compressdevs/features/qat.ini index 12bfb21..5cd4524 100644 --- a/doc/guides/compressdevs/features/qat.ini +++ b/doc/guides/compressdevs/features/qat.ini @@ -5,6 +5,9 @@ ; [Features] HW Accelerated = Y +OOP SGL In SGL Out = Y +OOP SGL In LB Out = Y +OOP LB In SGL Out = Y Deflate = Y Adler32 = Y Crc32 = Y diff --git a/doc/guides/compressdevs/qat_comp.rst b/doc/guides/compressdevs/qat_comp.rst index 167f816..8b1270b 100644 --- a/doc/guides/compressdevs/qat_comp.rst +++ b/doc/guides/compressdevs/qat_comp.rst @@ -35,8 +35,6 @@ Checksum generation: Limitations ----------- -* Chained mbufs are not yet supported, therefore max data size which can be passed to the PMD in a single mbuf is 64K - 1. If data is larger than this it will need to be split up and sent as multiple operations. - * Compressdev level 0, no compression, is not supported. * Dynamic Huffman encoding is not yet supported. diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c index e8019eb..cbf7614 100644 --- a/drivers/compress/qat/qat_comp.c +++ b/drivers/compress/qat/qat_comp.c @@ -21,10 +21,12 @@ int qat_comp_build_request(void *in_op, uint8_t *out_msg, - void *op_cookie __rte_unused, + void *op_cookie, enum qat_device_gen qat_dev_gen __rte_unused) { struct rte_comp_op *op = in_op; + struct qat_comp_op_cookie *cookie = + (struct qat_comp_op_cookie *)op_cookie; struct qat_comp_xform *qat_xform = op->private_xform; const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl; struct icp_qat_fw_comp_req *comp_req = @@ -44,12 +46,43 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg, comp_req->comp_pars.comp_len = op->src.length; comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst); - /* sgl */ if (op->m_src->next != NULL || op->m_dst->next != NULL) { - QAT_DP_LOG(ERR, "QAT PMD doesn't support scatter gather"); - return -EINVAL; + /* sgl */ + int ret = 0; + + ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags, + QAT_COMN_PTR_TYPE_SGL); + ret = qat_sgl_fill_array(op->m_src, + rte_pktmbuf_mtophys_offset(op->m_src, + op->src.offset), + &cookie->qat_sgl_src, + op->src.length, + RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS); + if (ret) { + QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array"); + return ret; + } + + ret = qat_sgl_fill_array(op->m_dst, + rte_pktmbuf_mtophys_offset(op->m_dst, + op->dst.offset), + &cookie->qat_sgl_dst, + comp_req->comp_pars.out_buffer_sz, + RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS); + if (ret) { + QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array"); + return ret; + } + + comp_req->comn_mid.src_data_addr = + cookie->qat_sgl_src_phys_addr; + comp_req->comn_mid.dest_data_addr = + cookie->qat_sgl_dst_phys_addr; + comp_req->comn_mid.src_length = 0; + comp_req->comn_mid.dst_length = 0; } else { + /* flat aka linear buffer */ ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags, QAT_COMN_PTR_TYPE_FLAT); comp_req->comn_mid.src_length = rte_pktmbuf_data_len(op->m_src); diff --git a/drivers/compress/qat/qat_comp.h b/drivers/compress/qat/qat_comp.h index 9e6861b..8d315ef 100644 --- a/drivers/compress/qat/qat_comp.h +++ b/drivers/compress/qat/qat_comp.h @@ -24,7 +24,16 @@ enum qat_comp_request_type { REQ_COMP_END }; +struct qat_comp_sgl { + qat_sgl_hdr; + struct qat_flat_buf buffers[RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS]; +} __rte_packed __rte_cache_aligned; + struct qat_comp_op_cookie { + struct qat_comp_sgl qat_sgl_src; + struct qat_comp_sgl qat_sgl_dst; + phys_addr_t qat_sgl_src_phys_addr; + phys_addr_t qat_sgl_dst_phys_addr; }; struct qat_comp_xform { diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c index 764c053..b89975f 100644 --- a/drivers/compress/qat/qat_comp_pmd.c +++ b/drivers/compress/qat/qat_comp_pmd.c @@ -13,7 +13,10 @@ static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = { RTE_COMP_FF_ADLER32_CHECKSUM | RTE_COMP_FF_CRC32_ADLER32_CHECKSUM | RTE_COMP_FF_SHAREABLE_PRIV_XFORM | - RTE_COMP_FF_HUFFMAN_FIXED, + RTE_COMP_FF_HUFFMAN_FIXED | + RTE_COMP_FF_OOP_SGL_IN_SGL_OUT | + RTE_COMP_FF_OOP_SGL_IN_LB_OUT | + RTE_COMP_FF_OOP_LB_IN_SGL_OUT, .window_size = {.min = 15, .max = 15, .increment = 0} }, {RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } }; @@ -71,7 +74,9 @@ static int qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, uint32_t max_inflight_ops, int socket_id) { + struct qat_qp *qp; int ret = 0; + uint32_t i; struct qat_qp_config qat_qp_conf; struct qat_qp **qp_addr = @@ -109,6 +114,24 @@ qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id] = *qp_addr; + qp = (struct qat_qp *)*qp_addr; + + for (i = 0; i < qp->nb_descriptors; i++) { + + struct qat_comp_op_cookie *cookie = + qp->op_cookies[i]; + + cookie->qat_sgl_src_phys_addr = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_comp_op_cookie, + qat_sgl_src); + + cookie->qat_sgl_dst_phys_addr = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_comp_op_cookie, + qat_sgl_dst); + } + return ret; }