From patchwork Sun Apr 8 12:41:20 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xueming Li X-Patchwork-Id: 37523 X-Patchwork-Delegate: shahafs@mellanox.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 3E6E11B68A; Sun, 8 Apr 2018 14:41:43 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id E971C1B67D for ; Sun, 8 Apr 2018 14:41:39 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from xuemingl@mellanox.com) with ESMTPS (AES256-SHA encrypted); 8 Apr 2018 15:42:43 +0300 Received: from dev-r630-06.mtbc.labs.mlnx (dev-r630-06.mtbc.labs.mlnx [10.12.205.180]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id w38CfZjU006081; Sun, 8 Apr 2018 15:41:36 +0300 Received: from dev-r630-06.mtbc.labs.mlnx (localhost [127.0.0.1]) by dev-r630-06.mtbc.labs.mlnx (8.14.7/8.14.7) with ESMTP id w38CfZTo111036; Sun, 8 Apr 2018 20:41:35 +0800 Received: (from xuemingl@localhost) by dev-r630-06.mtbc.labs.mlnx (8.14.7/8.14.7/Submit) id w38CfZ4N111035; Sun, 8 Apr 2018 20:41:35 +0800 From: Xueming Li To: Yongseok Koh , Shahaf Shuler Cc: Xueming Li , dev@dpdk.org Date: Sun, 8 Apr 2018 20:41:20 +0800 Message-Id: <20180408124121.110975-3-xuemingl@mellanox.com> X-Mailer: git-send-email 2.13.3 In-Reply-To: <20180408124121.110975-1-xuemingl@mellanox.com> References: <20180408124121.110975-1-xuemingl@mellanox.com> Subject: [dpdk-dev] [PATCH 2/3] net/mlx5: support generic tunnel offloading X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This commit adds support for generic tunnel TSO and checksum offload. PMD will compute the inner/outer headers offset according to the mbuf fields. Hardware will do calculation based on offsets and types. Signed-off-by: Xueming Li Acked-by: Yongseok Koh --- drivers/net/mlx5/Makefile | 5 ++ drivers/net/mlx5/mlx5.c | 14 +++- drivers/net/mlx5/mlx5.h | 1 + drivers/net/mlx5/mlx5_ethdev.c | 5 +- drivers/net/mlx5/mlx5_prm.h | 24 +++++++ drivers/net/mlx5/mlx5_rxtx.c | 122 ++++++++++++++++++++++++++-------- drivers/net/mlx5/mlx5_rxtx.h | 100 ++++++++++++++++++++++------ drivers/net/mlx5/mlx5_rxtx_vec.c | 9 +-- drivers/net/mlx5/mlx5_rxtx_vec_neon.h | 2 +- drivers/net/mlx5/mlx5_rxtx_vec_sse.h | 2 +- drivers/net/mlx5/mlx5_txq.c | 10 ++- 11 files changed, 234 insertions(+), 60 deletions(-) diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile index 201f6f06a..cc128ef69 100644 --- a/drivers/net/mlx5/Makefile +++ b/drivers/net/mlx5/Makefile @@ -135,6 +135,11 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh enum IBV_WQ_FLAG_RX_END_PADDING \ $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ + HAVE_IBV_MLX5_MOD_SWP \ + infiniband/mlx5dv.h \ + enum MLX5DV_CONTEXT_MASK_SWP \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ HAVE_IBV_MLX5_MOD_MPW \ infiniband/mlx5dv.h \ enum MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED \ diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 7d58d66bb..d886ddd4f 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -600,6 +600,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, unsigned int mps; unsigned int cqe_comp; unsigned int tunnel_en = 0; + unsigned int swp = 0; int idx; int i; struct mlx5dv_context attrs_out = {0}; @@ -667,6 +668,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, } ibv_dev = list[i]; DRV_LOG(DEBUG, "device opened"); +#ifdef HAVE_IBV_MLX5_MOD_SWP + attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_SWP; +#endif /* * Multi-packet send is supported by ConnectX-4 Lx PF as well * as all ConnectX-5 devices. @@ -687,6 +691,11 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, DRV_LOG(DEBUG, "MPW isn't supported"); mps = MLX5_MPW_DISABLED; } +#ifdef HAVE_IBV_MLX5_MOD_SWP + if (attrs_out.comp_mask | MLX5DV_CONTEXT_MASK_SWP) + swp = attrs_out.sw_parsing_caps.sw_parsing_offloads; + DRV_LOG(DEBUG, "SWP support: %u", swp); +#endif if (RTE_CACHE_LINE_SIZE == 128 && !(attrs_out.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) cqe_comp = 0; @@ -733,6 +742,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, .txq_inline = MLX5_ARG_UNSET, .txqs_inline = MLX5_ARG_UNSET, .inline_max_packet_sz = MLX5_ARG_UNSET, + .swp = !!swp, }; len = snprintf(name, sizeof(name), PCI_PRI_FMT, @@ -1182,8 +1192,10 @@ RTE_INIT(rte_mlx5_pmd_init); static void rte_mlx5_pmd_init(void) { - /* Build the static table for ptype conversion. */ + /* Build the static tables for Verbs conversion. */ mlx5_set_ptype_table(); + mlx5_set_cksum_table(); + mlx5_set_swp_types_table(); /* * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use * huge pages. Calling ibv_fork_init() during init allows diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index faacfd9d6..b5e5e0b6c 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -87,6 +87,7 @@ struct mlx5_dev_config { unsigned int tx_vec_en:1; /* Tx vector is enabled. */ unsigned int rx_vec_en:1; /* Rx vector is enabled. */ unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */ + unsigned int swp:1; /* Tx generic tunnel checksum and TSO offload. */ unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */ unsigned int ind_table_max_size; /* Maximum indirection table size. */ int txq_inline; /* Maximum packet size for inlining. */ diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index b6f5101cf..aecfdc1d4 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -1063,11 +1063,14 @@ mlx5_select_tx_function(struct rte_eth_dev *dev) int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)); + int swp = !!(tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)); int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT); assert(priv != NULL); /* Select appropriate TX function. */ - if (vlan_insert || tso) + if (vlan_insert || tso || swp) return tx_pkt_burst; if (config->mps == MLX5_MPW_ENHANCED) { if (mlx5_check_vec_tx_support(dev) > 0) { diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h index 9eb9c15e1..2129d74a3 100644 --- a/drivers/net/mlx5/mlx5_prm.h +++ b/drivers/net/mlx5/mlx5_prm.h @@ -107,6 +107,30 @@ /* Inner L4 checksum offload (Tunneled packets only). */ #define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5) +/* Outer L4 type is TCP. */ +#define MLX5_ETH_WQE_L4_OUTER_TCP (0u << 5) + +/* Outer L4 type is UDP. */ +#define MLX5_ETH_WQE_L4_OUTER_UDP (1u << 5) + +/* Outer L3 type is IPV4. */ +#define MLX5_ETH_WQE_L3_OUTER_IPV4 (0u << 4) + +/* Outer L3 type is IPV6. */ +#define MLX5_ETH_WQE_L3_OUTER_IPV6 (1u << 4) + +/* Inner L4 type is TCP. */ +#define MLX5_ETH_WQE_L4_INNER_TCP (0u << 1) + +/* Inner L4 type is UDP. */ +#define MLX5_ETH_WQE_L4_INNER_UDP (1u << 1) + +/* Inner L3 type is IPV4. */ +#define MLX5_ETH_WQE_L3_INNER_IPV4 (0u << 0) + +/* Inner L3 type is IPV6. */ +#define MLX5_ETH_WQE_L3_INNER_IPV6 (1u << 0) + /* Is flow mark valid. */ #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN #define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00) diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index a9de69131..d1dc7d327 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -47,6 +47,9 @@ uint32_t mlx5_ptype_table[] __rte_cache_aligned = { [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */ }; +uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned; +uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned; + /** * Build a table to translate Rx completion flags to packet type. * @@ -203,6 +206,74 @@ mlx5_set_ptype_table(void) } /** + * Build a table to translate packet to checksum type of Verbs. + */ +void +mlx5_set_cksum_table(void) +{ + unsigned int i; + uint8_t v; + + /* + * The index should have: + * bit[0] = PKT_TX_TCP_SEG + * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM + * bit[4] = PKT_TX_IP_CKSUM + * bit[8] = PKT_TX_OUTER_IP_CKSUM + * bit[9] = tunnel + */ + for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) { + v = 0; + if (i & (1 << 9)) { + /* Tunneled packet. */ + if (i & (1 << 8)) /* Outer IP. */ + v |= MLX5_ETH_WQE_L3_CSUM; + if (i & (1 << 4)) /* Inner IP. */ + v |= MLX5_ETH_WQE_L3_INNER_CSUM; + if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */ + v |= MLX5_ETH_WQE_L4_INNER_CSUM; + } else { + /* No tunnel. */ + if (i & (1 << 4)) /* IP. */ + v |= MLX5_ETH_WQE_L3_CSUM; + if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */ + v |= MLX5_ETH_WQE_L4_CSUM; + } + mlx5_cksum_table[i] = v; + } +} + +/** + * Build a table to translate packet type of mbuf to SWP type of Verbs. + */ +void +mlx5_set_swp_types_table(void) +{ + unsigned int i; + uint8_t v; + + /* + * The index should have: + * bit[0:1] = PKT_TX_L4_MASK + * bit[4] = PKT_TX_IPV6 + * bit[8] = PKT_TX_OUTER_IPV6 + * bit[9] = PKT_TX_OUTER_UDP + */ + for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) { + v = 0; + if (i & (1 << 8)) + v |= MLX5_ETH_WQE_L3_OUTER_IPV6; + if (i & (1 << 9)) + v |= MLX5_ETH_WQE_L4_OUTER_UDP; + if (i & (1 << 4)) + v |= MLX5_ETH_WQE_L3_INNER_IPV6; + if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52)) + v |= MLX5_ETH_WQE_L4_INNER_UDP; + mlx5_swp_types_table[i] = v; + } +} + +/** * Return the size of tailroom of WQ. * * @param txq @@ -267,7 +338,6 @@ mlx5_copy_to_wq(void *dst, const void *src, size_t n, static int inline_tso(struct mlx5_txq_data *txq, struct rte_mbuf *buf, uint32_t *length, - uint8_t *cs_flags, uintptr_t *addr, uint16_t *pkt_inline_sz, uint8_t **raw, @@ -279,9 +349,8 @@ inline_tso(struct mlx5_txq_data *txq, struct rte_mbuf *buf, (1 << txq->wqe_n) * MLX5_WQE_SIZE); unsigned int copy_b; uint8_t vlan_sz = (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0; - const uint8_t tunneled = txq->tunnel_en && - (buf->ol_flags & (PKT_TX_TUNNEL_GRE | - PKT_TX_TUNNEL_VXLAN)); + const uint8_t tunneled = txq->tunnel_en && (buf->ol_flags & + PKT_TX_TUNNEL_MASK); uint16_t n_wqe; *tso_segsz = buf->tso_segsz; @@ -290,19 +359,15 @@ inline_tso(struct mlx5_txq_data *txq, struct rte_mbuf *buf, txq->stats.oerrors++; return -EINVAL; } - if (tunneled) { + if (tunneled) *tso_header_sz += buf->outer_l2_len + buf->outer_l3_len; - *cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM; - } else { - *cs_flags |= MLX5_ETH_WQE_L4_CSUM; - } - if (unlikely(*tso_header_sz > MLX5_MAX_TSO_HEADER)) { + /* First seg must contain all TSO headers. */ + if (unlikely(*tso_header_sz > MLX5_MAX_TSO_HEADER) || + *tso_header_sz > DATA_LEN(buf)) { txq->stats.oerrors++; return -EINVAL; } copy_b = *tso_header_sz - *pkt_inline_sz; - /* First seg must contain all TSO headers. */ - assert(copy_b <= *length); if (!copy_b || ((end - (uintptr_t)*raw) < copy_b)) return -EAGAIN; n_wqe = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4; @@ -435,7 +500,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (unlikely(!max_wqe)) return 0; do { - struct rte_mbuf *buf = NULL; + struct rte_mbuf *buf = *pkts; /* First_seg. */ uint8_t *raw; volatile struct mlx5_wqe_v *wqe = NULL; volatile rte_v128u32_t *dseg = NULL; @@ -447,15 +512,16 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) uint16_t tso_header_sz = 0; uint16_t ehdr; uint8_t cs_flags; - uint64_t tso = 0; + uint8_t tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG); + uint8_t is_vlan = !!(buf->ol_flags & PKT_TX_VLAN_PKT); + uint32_t swp_offsets = 0; + uint8_t swp_types = 0; uint16_t tso_segsz = 0; #ifdef MLX5_PMD_SOFT_COUNTERS uint32_t total_length = 0; #endif int ret; - /* first_seg */ - buf = *pkts; segs_n = buf->nb_segs; /* * Make sure there is enough room to store this packet and @@ -490,10 +556,12 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (pkts_n - i > 1) rte_prefetch0( rte_pktmbuf_mtod(*(pkts + 1), volatile void *)); - cs_flags = txq_ol_cksum_to_cs(txq, buf); + cs_flags = txq_ol_cksum_to_cs(buf); + txq_mbuf_to_swp(txq, buf, tso, is_vlan, + (uint8_t *)&swp_offsets, &swp_types); raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE; /* Replace the Ethernet type by the VLAN if necessary. */ - if (buf->ol_flags & PKT_TX_VLAN_PKT) { + if (is_vlan) { uint32_t vlan = rte_cpu_to_be_32(0x81000000 | buf->vlan_tci); unsigned int len = 2 * ETHER_ADDR_LEN - 2; @@ -516,9 +584,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) addr += pkt_inline_sz; } raw += MLX5_WQE_DWORD_SIZE; - tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG); if (tso) { - ret = inline_tso(txq, buf, &length, &cs_flags, + ret = inline_tso(txq, buf, &length, &addr, &pkt_inline_sz, &raw, &max_wqe, &tso_segsz, &tso_header_sz); @@ -695,8 +762,9 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) 0, }; wqe->eseg = (rte_v128u32_t){ - 0, - cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16), + swp_offsets, + cs_flags | (swp_types << 8) | + (rte_cpu_to_be_16(tso_segsz) << 16), 0, (ehdr << 16) | rte_cpu_to_be_16(tso_header_sz), }; @@ -709,8 +777,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) 0, }; wqe->eseg = (rte_v128u32_t){ - 0, - cs_flags, + swp_offsets, + cs_flags | (swp_types << 8), 0, (ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz), }; @@ -882,7 +950,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) } max_elts -= segs_n; --pkts_n; - cs_flags = txq_ol_cksum_to_cs(txq, buf); + cs_flags = txq_ol_cksum_to_cs(buf); /* Retrieve packet information. */ length = PKT_LEN(buf); assert(length); @@ -1114,7 +1182,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, * iteration. */ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); - cs_flags = txq_ol_cksum_to_cs(txq, buf); + cs_flags = txq_ol_cksum_to_cs(buf); /* Retrieve packet information. */ length = PKT_LEN(buf); /* Start new session if packet differs. */ @@ -1391,7 +1459,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, /* Make sure there is enough room to store this packet. */ if (max_elts - j == 0) break; - cs_flags = txq_ol_cksum_to_cs(txq, buf); + cs_flags = txq_ol_cksum_to_cs(buf); /* Retrieve packet information. */ length = PKT_LEN(buf); /* Start new session if: diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index f5af43735..2bcf316ed 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -168,6 +168,7 @@ struct mlx5_txq_data { uint16_t tso_en:1; /* When set hardware TSO is enabled. */ uint16_t tunnel_en:1; /* When set TX offload for tunneled packets are supported. */ + uint16_t swp_en:1; /* Whether SW parser is enabled. */ uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */ uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */ uint16_t inline_max_packet_sz; /* Max packet size for inlining. */ @@ -280,8 +281,12 @@ uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev); /* mlx5_rxtx.c */ extern uint32_t mlx5_ptype_table[]; +extern uint8_t mlx5_cksum_table[]; +extern uint8_t mlx5_swp_types_table[]; void mlx5_set_ptype_table(void); +void mlx5_set_cksum_table(void); +void mlx5_set_swp_types_table(void); uint16_t mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n); uint16_t mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, @@ -614,38 +619,89 @@ mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe) } /** - * Convert the Checksum offloads to Verbs. + * Convert mbuf to Verb SWP. * * @param txq_data * Pointer to the Tx queue. * @param buf * Pointer to the mbuf. + * @param tso + * TSO offloads enabled. + * @param vlan + * VLAN offloads enabled + * @param offsets + * Pointer to the SWP header offsets. + * @param swp_types + * Pointer to the SWP header types. + */ +static __rte_always_inline void +txq_mbuf_to_swp(struct mlx5_txq_data *txq, struct rte_mbuf *buf, + uint8_t tso, uint64_t vlan, + uint8_t *offsets, uint8_t *swp_types) +{ + uint64_t tunnel = buf->ol_flags & PKT_TX_TUNNEL_MASK; + uint16_t idx; + uint16_t off; + const uint64_t ol_flags_mask = PKT_TX_L4_MASK | PKT_TX_IPV6 | + PKT_TX_OUTER_IPV6; + + if (likely(!tunnel || !txq->swp_en || + (tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))) + return; + /* + * The index should have: + * bit[0:1] = PKT_TX_L4_MASK + * bit[4] = PKT_TX_IPV6 + * bit[8] = PKT_TX_OUTER_IPV6 + * bit[9] = PKT_TX_OUTER_UDP + */ + idx = (buf->ol_flags & ol_flags_mask) >> 52; + if (tunnel == PKT_TX_TUNNEL_UDP) + idx |= 1 << 9; + *swp_types = mlx5_swp_types_table[idx]; + /* swp offsets. */ + off = buf->outer_l2_len + (vlan ? 4 : 0); /* Outer L3 offset. */ + if (tso || (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)) + offsets[1] = off >> 1; + off += buf->outer_l3_len; /* Outer L4 offset. */ + if (tunnel == PKT_TX_TUNNEL_UDP) + offsets[0] = off >> 1; + off += buf->l2_len; /* Inner L3 offset. */ + if (tso || (buf->ol_flags & PKT_TX_IP_CKSUM)) + offsets[3] = off >> 1; + off += buf->l3_len; /* Inner L4 offset. */ + if (tso || ((buf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) || + ((buf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)) + offsets[2] = off >> 1; +} + +/** + * Convert the Checksum offloads to Verbs. + * + * @param buf + * Pointer to the mbuf. * * @return - * the converted cs_flags. + * Converted checksum flags. */ static __rte_always_inline uint8_t -txq_ol_cksum_to_cs(struct mlx5_txq_data *txq_data, struct rte_mbuf *buf) +txq_ol_cksum_to_cs(struct rte_mbuf *buf) { - uint8_t cs_flags = 0; - - /* Should we enable HW CKSUM offload */ - if (buf->ol_flags & - (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | - PKT_TX_OUTER_IP_CKSUM)) { - if (txq_data->tunnel_en && - (buf->ol_flags & - (PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN))) { - cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM | - MLX5_ETH_WQE_L4_INNER_CSUM; - if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM) - cs_flags |= MLX5_ETH_WQE_L3_CSUM; - } else { - cs_flags = MLX5_ETH_WQE_L3_CSUM | - MLX5_ETH_WQE_L4_CSUM; - } - } - return cs_flags; + uint32_t idx; + uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK); + const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK | + PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM; + + /* + * The index should have: + * bit[0] = PKT_TX_TCP_SEG + * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM + * bit[4] = PKT_TX_IP_CKSUM + * bit[8] = PKT_TX_OUTER_IP_CKSUM + * bit[9] = tunnel + */ + idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9); + return mlx5_cksum_table[idx]; } /** diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c index 257d7b11c..24a80cf07 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec.c +++ b/drivers/net/mlx5/mlx5_rxtx_vec.c @@ -42,8 +42,6 @@ /** * Count the number of packets having same ol_flags and calculate cs_flags. * - * @param txq - * Pointer to TX queue structure. * @param pkts * Pointer to array of packets. * @param pkts_n @@ -55,8 +53,7 @@ * Number of packets having same ol_flags. */ static inline unsigned int -txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, - uint16_t pkts_n, uint8_t *cs_flags) +txq_calc_offload(struct rte_mbuf **pkts, uint16_t pkts_n, uint8_t *cs_flags) { unsigned int pos; const uint64_t ol_mask = @@ -70,7 +67,7 @@ txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, for (pos = 1; pos < pkts_n; ++pos) if ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & ol_mask) break; - *cs_flags = txq_ol_cksum_to_cs(txq, pkts[0]); + *cs_flags = txq_ol_cksum_to_cs(pkts[0]); return pos; } @@ -141,7 +138,7 @@ mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) n = txq_count_contig_single_seg(&pkts[nb_tx], n); if (txq->offloads & MLX5_VEC_TX_CKSUM_OFFLOAD_CAP) - n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags); + n = txq_calc_offload(&pkts[nb_tx], n, &cs_flags); ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags); nb_tx += ret; if (!ret) diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h index bbe1818ef..37ad768e5 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h +++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h @@ -142,7 +142,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, break; wqe = &((volatile struct mlx5_wqe64 *) txq->wqes)[wqe_ci & wq_mask].hdr; - cs_flags = txq_ol_cksum_to_cs(txq, buf); + cs_flags = txq_ol_cksum_to_cs(buf); /* Title WQEBB pointer. */ t_wqe = (uint8x16_t *)wqe; dseg = (uint8_t *)(wqe + 1); diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h index c088bcb51..d531d2b10 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h +++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h @@ -144,7 +144,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, } wqe = &((volatile struct mlx5_wqe64 *) txq->wqes)[wqe_ci & wq_mask].hdr; - cs_flags = txq_ol_cksum_to_cs(txq, buf); + cs_flags = txq_ol_cksum_to_cs(buf); /* Title WQEBB pointer. */ t_wqe = (__m128i *)wqe; dseg = (__m128i *)(wqe + 1); diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 9139429be..3f3912b45 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -119,6 +119,9 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev) if (config->tso) offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO); + if (config->swp) + offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO); } return offloads; } @@ -686,7 +689,9 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) int is_empw_func = is_empw_burst_func(tx_pkt_burst); int tso = !!(txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GRE_TNL_TSO)); + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO)); txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ? 0 : config->txq_inline; @@ -767,6 +772,9 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) txq_ctrl->txq.tso_en = 1; } txq_ctrl->txq.tunnel_en = config->tunnel_en; + txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO) & + txq_ctrl->txq.offloads) && config->swp; } /**