From patchwork Wed Mar 13 09:03:40 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wei Hu X-Patchwork-Id: 138307 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id B604243C9E; Wed, 13 Mar 2024 11:06:13 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 80BD942DDD; Wed, 13 Mar 2024 11:05:49 +0100 (CET) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 4A8AA4069D for ; Wed, 13 Mar 2024 10:04:04 +0100 (CET) Received: from weh-ub2204-202-s0.corp.microsoft.com (unknown [131.107.8.26]) by linux.microsoft.com (Postfix) with ESMTPSA id 43B6420B74C0; Wed, 13 Mar 2024 02:04:03 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 43B6420B74C0 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1710320643; bh=LTVgiRp/NU3B7/la6R2w9B66a9SMRl4KPmnx5/hpumk=; h=From:To:Cc:Subject:Date:From; b=h6KC7sOu2p6WeNmx+49yoBpW9mN6fOBa+lNAEwGulZ2FShCAbrKx/zmQXEx+tUC7y 4/xjwIIOhvMiAlhlxgh3odrPotTQ9f7a7DGz6boXgpk2BzX+W9bw0BpQZzFu3SGZNO 4rYca5RlqGAPRt1stCRgek/rzeRri9HnG7M1cetI= From: Wei Hu To: ferruh.yigit@amd.com, andrew.rybchenko@oktetlabs.ru, thomas@monjalon.net, Long Li Cc: dev@dpdk.org, Wei Hu , Wei Hu Subject: [PATCH v3 1/1] net/mana: add vlan tagging support Date: Wed, 13 Mar 2024 09:03:40 +0000 Message-Id: <20240313090341.373037-1-weh@linux.microsoft.com> X-Mailer: git-send-email 2.34.1 MIME-Version: 1.0 X-Mailman-Approved-At: Wed, 13 Mar 2024 11:05:45 +0100 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org For tx path, use LONG_PACKET_FORMAT if vlan tag is present. For rx, extract vlan id from oob, put into mbuf and set the vlan flags in mbuf. Signed-off-by: Wei Hu Acked-by: Long Li --- v3: - Adjust the pkt_idx position in the code so it will be executed even when adding vlan header fails. v2: - Use existing vlan tag processing macros. - Add vlan header back if vlan_strip flag is not set on the receiving path. drivers/net/mana/mana.c | 3 +++ drivers/net/mana/mana.h | 4 ++++ drivers/net/mana/rx.c | 22 ++++++++++++++++++---- drivers/net/mana/tx.c | 21 ++++++++++++++++++--- 4 files changed, 43 insertions(+), 7 deletions(-) diff --git a/drivers/net/mana/mana.c b/drivers/net/mana/mana.c index 2df2461d2f..68c625258e 100644 --- a/drivers/net/mana/mana.c +++ b/drivers/net/mana/mana.c @@ -94,6 +94,9 @@ mana_dev_configure(struct rte_eth_dev *dev) return -EINVAL; } + priv->vlan_strip = !!(dev_conf->rxmode.offloads & + RTE_ETH_RX_OFFLOAD_VLAN_STRIP); + priv->num_queues = dev->data->nb_rx_queues; manadv_set_context_attr(priv->ib_ctx, MANADV_CTX_ATTR_BUF_ALLOCATORS, diff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h index 3626925871..37f654f0e6 100644 --- a/drivers/net/mana/mana.h +++ b/drivers/net/mana/mana.h @@ -21,10 +21,12 @@ struct mana_shared_data { #define MANA_MAX_MAC_ADDR 1 #define MANA_DEV_RX_OFFLOAD_SUPPORT ( \ + RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \ RTE_ETH_RX_OFFLOAD_CHECKSUM | \ RTE_ETH_RX_OFFLOAD_RSS_HASH) #define MANA_DEV_TX_OFFLOAD_SUPPORT ( \ + RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \ RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \ @@ -345,6 +347,8 @@ struct mana_priv { /* IB device port */ uint8_t dev_port; + uint8_t vlan_strip; + struct ibv_context *ib_ctx; struct ibv_pd *ib_pd; struct ibv_pd *ib_parent_pd; diff --git a/drivers/net/mana/rx.c b/drivers/net/mana/rx.c index 16e647baf5..0c26702b73 100644 --- a/drivers/net/mana/rx.c +++ b/drivers/net/mana/rx.c @@ -532,10 +532,6 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) mbuf->hash.rss = oob->packet_info[pkt_idx].packet_hash; } - pkts[pkt_received++] = mbuf; - rxq->stats.packets++; - rxq->stats.bytes += mbuf->data_len; - pkt_idx++; /* Move on the next completion if all packets are processed */ if (pkt_idx >= RX_COM_OOB_NUM_PACKETINFO_SEGMENTS) { @@ -543,6 +539,24 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) i++; } + if (oob->rx_vlan_tag_present) { + mbuf->ol_flags |= + RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; + mbuf->vlan_tci = oob->rx_vlan_id; + + if (!priv->vlan_strip && rte_vlan_insert(&mbuf)) { + DRV_LOG(ERR, "vlan insert failed"); + rxq->stats.errors++; + rte_pktmbuf_free(mbuf); + + goto drop; + } + } + + pkts[pkt_received++] = mbuf; + rxq->stats.packets++; + rxq->stats.bytes += mbuf->data_len; + drop: rxq->desc_ring_tail++; if (rxq->desc_ring_tail >= rxq->num_desc) diff --git a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c index 58c4a1d976..272a28bcba 100644 --- a/drivers/net/mana/tx.c +++ b/drivers/net/mana/tx.c @@ -254,7 +254,18 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) } /* Fill in the oob */ - tx_oob.short_oob.packet_format = SHORT_PACKET_FORMAT; + if (m_pkt->ol_flags & RTE_MBUF_F_TX_VLAN) { + tx_oob.short_oob.packet_format = LONG_PACKET_FORMAT; + tx_oob.long_oob.inject_vlan_prior_tag = 1; + tx_oob.long_oob.priority_code_point = + RTE_VLAN_TCI_PRI(m_pkt->vlan_tci); + tx_oob.long_oob.drop_eligible_indicator = + RTE_VLAN_TCI_DEI(m_pkt->vlan_tci); + tx_oob.long_oob.vlan_identifier = + RTE_VLAN_TCI_ID(m_pkt->vlan_tci); + } else { + tx_oob.short_oob.packet_format = SHORT_PACKET_FORMAT; + } tx_oob.short_oob.tx_is_outer_ipv4 = m_pkt->ol_flags & RTE_MBUF_F_TX_IPV4 ? 1 : 0; tx_oob.short_oob.tx_is_outer_ipv6 = @@ -409,8 +420,12 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) work_req.sgl = sgl.gdma_sgl; work_req.num_sgl_elements = m_pkt->nb_segs; - work_req.inline_oob_size_in_bytes = - sizeof(struct transmit_short_oob_v2); + if (tx_oob.short_oob.packet_format == SHORT_PACKET_FORMAT) + work_req.inline_oob_size_in_bytes = + sizeof(struct transmit_short_oob_v2); + else + work_req.inline_oob_size_in_bytes = + sizeof(struct transmit_oob_v2); work_req.inline_oob_data = &tx_oob; work_req.flags = 0; work_req.client_data_unit = NOT_USING_CLIENT_DATA_UNIT;