From patchwork Tue Apr 10 13:34:05 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xueming Li X-Patchwork-Id: 37790 X-Patchwork-Delegate: shahafs@mellanox.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id B9F441BAAD; Tue, 10 Apr 2018 15:34:47 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id BC0FA1BA8A for ; Tue, 10 Apr 2018 15:34:36 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from xuemingl@mellanox.com) with ESMTPS (AES256-SHA encrypted); 10 Apr 2018 16:35:41 +0300 Received: from dev-r630-06.mtbc.labs.mlnx (dev-r630-06.mtbc.labs.mlnx [10.12.205.180]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id w3ADYVVX023199; Tue, 10 Apr 2018 16:34:31 +0300 Received: from dev-r630-06.mtbc.labs.mlnx (localhost [127.0.0.1]) by dev-r630-06.mtbc.labs.mlnx (8.14.7/8.14.7) with ESMTP id w3ADYUQG189977; Tue, 10 Apr 2018 21:34:30 +0800 Received: (from xuemingl@localhost) by dev-r630-06.mtbc.labs.mlnx (8.14.7/8.14.7/Submit) id w3ADYUbq189976; Tue, 10 Apr 2018 21:34:30 +0800 From: Xueming Li To: Nelio Laranjeiro , Shahaf Shuler Cc: Xueming Li , dev@dpdk.org Date: Tue, 10 Apr 2018 21:34:05 +0800 Message-Id: <20180410133415.189905-6-xuemingl@mellanox.com> X-Mailer: git-send-email 2.13.3 In-Reply-To: <20180410133415.189905-1-xuemingl@mellanox.com> References: <20180410133415.189905-1-xuemingl@mellanox.com> Subject: [dpdk-dev] [PATCH v2 05/15] net/mlx5: support tunnel inner checksum offloads X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch support tunnel inner checksum offloads. By creating tunnel flow, once tunnel packet type(RTE_PTYPE_TUNNEL_xxx) identified, PKT_RX_IP_CKSUM_XXX and PKT_RX_L4_CKSUM_XXX represent checksum result of inner headers, outer L3 and L4 header checksum are always valid as soon as tunnel identified. If no tunnel identified, PKT_RX_IP_CKSUM_XXX and PKT_RX_L4_CKSUM_XXX represent checksum result of outer L3 and L4 headers. Signed-off-by: Xueming Li --- drivers/net/mlx5/mlx5_flow.c | 7 +++++-- drivers/net/mlx5/mlx5_rxq.c | 2 -- drivers/net/mlx5/mlx5_rxtx.c | 18 ++++-------------- drivers/net/mlx5/mlx5_rxtx.h | 1 - 4 files changed, 9 insertions(+), 19 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 65d7a9b62..b3ad6dc85 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -829,6 +829,8 @@ mlx5_flow_convert_actions(struct rte_eth_dev *dev, /** * Validate items. * + * @param dev + * Pointer to Ethernet device. * @param[in] items * Pattern specification (list terminated by the END pattern item). * @param[out] error @@ -840,7 +842,8 @@ mlx5_flow_convert_actions(struct rte_eth_dev *dev, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_flow_convert_items_validate(const struct rte_flow_item items[], +mlx5_flow_convert_items_validate(struct rte_eth_dev *dev __rte_unused, + const struct rte_flow_item items[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { @@ -1146,7 +1149,7 @@ mlx5_flow_convert(struct rte_eth_dev *dev, ret = mlx5_flow_convert_actions(dev, actions, error, parser); if (ret) return ret; - ret = mlx5_flow_convert_items_validate(items, error, parser); + ret = mlx5_flow_convert_items_validate(dev, items, error, parser); if (ret) return ret; mlx5_flow_convert_finalise(parser); diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 351acfc0f..073732e16 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -1045,8 +1045,6 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, } /* Toggle RX checksum offload if hardware supports it. */ tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM); - tmpl->rxq.csum_l2tun = (!!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) && - priv->config.tunnel_en); tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP); /* Configure VLAN stripping. */ tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index d061dfc8a..285b2dbf0 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -41,7 +41,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, uint16_t cqe_cnt, uint32_t *rss_hash); static __rte_always_inline uint32_t -rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe); +rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe); uint32_t mlx5_ptype_table[] __rte_cache_aligned = { [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */ @@ -1728,8 +1728,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, /** * Translate RX completion flags to offload flags. * - * @param[in] rxq - * Pointer to RX queue structure. * @param[in] cqe * Pointer to CQE. * @@ -1737,7 +1735,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, * Offload flags (ol_flags) for struct rte_mbuf. */ static inline uint32_t -rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe) +rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe) { uint32_t ol_flags = 0; uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc); @@ -1749,14 +1747,6 @@ rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe) TRANSPOSE(flags, MLX5_CQE_RX_L4_HDR_VALID, PKT_RX_L4_CKSUM_GOOD); - if ((cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun)) - ol_flags |= - TRANSPOSE(flags, - MLX5_CQE_RX_L3_HDR_VALID, - PKT_RX_IP_CKSUM_GOOD) | - TRANSPOSE(flags, - MLX5_CQE_RX_L4_HDR_VALID, - PKT_RX_L4_CKSUM_GOOD); return ol_flags; } @@ -1855,8 +1845,8 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) mlx5_flow_mark_get(mark); } } - if (rxq->csum | rxq->csum_l2tun) - pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe); + if (rxq->csum) + pkt->ol_flags |= rxq_cq_to_ol_flags(cqe); if (rxq->vlan_strip && (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) { diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 6866f6818..d35605b55 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -77,7 +77,6 @@ struct rxq_zip { /* RX queue descriptor. */ struct mlx5_rxq_data { unsigned int csum:1; /* Enable checksum offloading. */ - unsigned int csum_l2tun:1; /* Same for L2 tunnels. */ unsigned int hw_timestamp:1; /* Enable HW timestamp. */ unsigned int vlan_strip:1; /* Enable VLAN stripping. */ unsigned int crc_present:1; /* CRC must be subtracted. */