From patchwork Tue Mar 31 12:51:54 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Yunjian Wang X-Patchwork-Id: 67479 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id E0AE7A0562; Tue, 31 Mar 2020 14:52:12 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id B78061C032; Tue, 31 Mar 2020 14:52:12 +0200 (CEST) Received: from huawei.com (szxga04-in.huawei.com [45.249.212.190]) by dpdk.org (Postfix) with ESMTP id A86562BCE; Tue, 31 Mar 2020 14:52:09 +0200 (CEST) Received: from DGGEMS407-HUB.china.huawei.com (unknown [172.30.72.59]) by Forcepoint Email with ESMTP id 8BC475E898912DA8E99F; Tue, 31 Mar 2020 20:52:04 +0800 (CST) Received: from localhost (10.173.251.152) by DGGEMS407-HUB.china.huawei.com (10.3.19.207) with Microsoft SMTP Server id 14.3.487.0; Tue, 31 Mar 2020 20:51:57 +0800 From: wangyunjian To: CC: , , , Yunjian Wang , Date: Tue, 31 Mar 2020 20:51:54 +0800 Message-ID: <1585659114-17032-1-git-send-email-wangyunjian@huawei.com> X-Mailer: git-send-email 1.9.5.msysgit.1 MIME-Version: 1.0 X-Originating-IP: [10.173.251.152] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v2 1/3] net/tap: fix mbuf double free when writev fails X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Yunjian Wang When the tap_write_mbufs() function return with break, mbuf was freed without incrementing num_packets. This may lead applications also free the mbuf. And the pmd_tx_burst() function should returns the number of original packets it actually sent excluding tso mbufs. Fixes: 9396ad334672 ("net/tap: fix reported number of Tx packets") CC: stable@dpdk.org Signed-off-by: Yunjian Wang --- drivers/net/tap/rte_eth_tap.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c index 05470a211..4c4b6b0b2 100644 --- a/drivers/net/tap/rte_eth_tap.c +++ b/drivers/net/tap/rte_eth_tap.c @@ -521,7 +521,7 @@ tap_tx_l3_cksum(char *packet, uint64_t ol_flags, unsigned int l2_len, } } -static inline void +static inline int tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs, struct rte_mbuf **pmbufs, uint16_t *num_packets, unsigned long *num_tx_bytes) @@ -588,7 +588,7 @@ tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs, seg_len = rte_pktmbuf_data_len(mbuf); l234_hlen = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len; if (seg_len < l234_hlen) - break; + return -1; /* To change checksums, work on a * copy of l2, l3 * headers + l4 pseudo header @@ -634,10 +634,12 @@ tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs, /* copy the tx frame data */ n = writev(process_private->txq_fds[txq->queue_id], iovecs, j); if (n <= 0) - break; + return -1; + (*num_packets)++; (*num_tx_bytes) += rte_pktmbuf_pkt_len(mbuf); } + return 0; } /* Callback to handle sending packets from the tap interface @@ -708,8 +710,15 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) num_mbufs = 1; } - tap_write_mbufs(txq, num_mbufs, mbuf, - &num_packets, &num_tx_bytes); + ret = tap_write_mbufs(txq, num_mbufs, mbuf, + &num_packets, &num_tx_bytes); + if (ret != 0) { + txq->stats.errs++; + /* free tso mbufs */ + for (j = 0; j < ret; j++) + rte_pktmbuf_free(mbuf[j]); + break; + } num_tx++; /* free original mbuf */ rte_pktmbuf_free(mbuf_in); @@ -722,7 +731,7 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) txq->stats.errs += nb_pkts - num_tx; txq->stats.obytes += num_tx_bytes; - return num_packets; + return num_tx; } static const char * From patchwork Tue Mar 31 12:52:20 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Yunjian Wang X-Patchwork-Id: 67481 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id BBE1CA0562; Tue, 31 Mar 2020 14:53:03 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 95F981C025; Tue, 31 Mar 2020 14:53:03 +0200 (CEST) Received: from huawei.com (szxga04-in.huawei.com [45.249.212.190]) by dpdk.org (Postfix) with ESMTP id D20DD1C025; Tue, 31 Mar 2020 14:52:57 +0200 (CEST) Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id D62C63000F3B2F869CD6; Tue, 31 Mar 2020 20:52:27 +0800 (CST) Received: from localhost (10.173.251.152) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.487.0; Tue, 31 Mar 2020 20:52:21 +0800 From: wangyunjian To: CC: , , , Yunjian Wang , Date: Tue, 31 Mar 2020 20:52:20 +0800 Message-ID: <1585659140-18388-1-git-send-email-wangyunjian@huawei.com> X-Mailer: git-send-email 1.9.5.msysgit.1 MIME-Version: 1.0 X-Originating-IP: [10.173.251.152] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v2 2/3] net/tap: fix mbuf and mem leak during queue release X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Yunjian Wang For the tap PMD, we should release mbufs and iovecs from the Rx queue when close or remove device. Fixes: 0781f5762cfe ("net/tap: support segmented mbufs") CC: stable@dpdk.org Signed-off-by: Yunjian Wang --- drivers/net/tap/rte_eth_tap.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c index 4c4b6b0b2..a9ba0ca68 100644 --- a/drivers/net/tap/rte_eth_tap.c +++ b/drivers/net/tap/rte_eth_tap.c @@ -1022,6 +1022,7 @@ tap_dev_close(struct rte_eth_dev *dev) int i; struct pmd_internals *internals = dev->data->dev_private; struct pmd_process_private *process_private = dev->process_private; + struct rx_queue *rxq; tap_link_set_down(dev); tap_flow_flush(dev, NULL); @@ -1029,8 +1030,13 @@ tap_dev_close(struct rte_eth_dev *dev) for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) { if (process_private->rxq_fds[i] != -1) { + rxq = &internals->rxq[i]; close(process_private->rxq_fds[i]); process_private->rxq_fds[i] = -1; + rte_pktmbuf_free(rxq->pool); + rte_free(rxq->iovecs); + rxq->pool = NULL; + rxq->iovecs = NULL; } if (process_private->txq_fds[i] != -1) { close(process_private->txq_fds[i]); @@ -2399,6 +2405,7 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev) struct rte_eth_dev *eth_dev = NULL; struct pmd_internals *internals; struct pmd_process_private *process_private; + struct rx_queue *rxq; int i; /* find the ethdev entry */ @@ -2425,8 +2432,13 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev) } for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) { if (process_private->rxq_fds[i] != -1) { + rxq = &internals->rxq[i]; close(process_private->rxq_fds[i]); process_private->rxq_fds[i] = -1; + rte_pktmbuf_free(rxq->pool); + rte_free(rxq->iovecs); + rxq->pool = NULL; + rxq->iovecs = NULL; } if (process_private->txq_fds[i] != -1) { close(process_private->txq_fds[i]); From patchwork Tue Mar 31 12:52:30 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Yunjian Wang X-Patchwork-Id: 67480 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 56891A0562; Tue, 31 Mar 2020 14:52:42 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 350A52C15; Tue, 31 Mar 2020 14:52:42 +0200 (CEST) Received: from huawei.com (szxga07-in.huawei.com [45.249.212.35]) by dpdk.org (Postfix) with ESMTP id 872CC2BCE; Tue, 31 Mar 2020 14:52:39 +0200 (CEST) Received: from DGGEMS414-HUB.china.huawei.com (unknown [172.30.72.59]) by Forcepoint Email with ESMTP id E607CCEEC354A1E0F2BC; Tue, 31 Mar 2020 20:52:37 +0800 (CST) Received: from localhost (10.173.251.152) by DGGEMS414-HUB.china.huawei.com (10.3.19.214) with Microsoft SMTP Server id 14.3.487.0; Tue, 31 Mar 2020 20:52:31 +0800 From: wangyunjian To: CC: , , , Yunjian Wang , Date: Tue, 31 Mar 2020 20:52:30 +0800 Message-ID: <1585659150-16728-1-git-send-email-wangyunjian@huawei.com> X-Mailer: git-send-email 1.9.5.msysgit.1 MIME-Version: 1.0 X-Originating-IP: [10.173.251.152] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v2 3/3] net/tap: fix check for mbuf's nb_segs failure X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Yunjian Wang Now the rxq->pool is mbuf concatenation, But its nb_segs is 1. When do some sanity checks on the mbuf, it fails. Fixes: 0781f5762cfe ("net/tap: support segmented mbufs") CC: stable@dpdk.org Signed-off-by: Yunjian Wang --- drivers/net/tap/rte_eth_tap.c | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c index a9ba0ca68..703fcceb9 100644 --- a/drivers/net/tap/rte_eth_tap.c +++ b/drivers/net/tap/rte_eth_tap.c @@ -339,6 +339,23 @@ tap_rx_offload_get_queue_capa(void) DEV_RX_OFFLOAD_TCP_CKSUM; } +static void +tap_rxq_pool_free(struct rte_mbuf *pool) +{ + struct rte_mbuf *mbuf = pool; + uint16_t nb_segs = 1; + + if (mbuf == NULL) + return; + + while (mbuf->next) { + mbuf = mbuf->next; + nb_segs++; + } + pool->nb_segs = nb_segs; + rte_pktmbuf_free(pool); +} + /* Callback to handle the rx burst of packets to the correct interface and * file descriptor(s) in a multi-queue setup. */ @@ -389,7 +406,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) goto end; seg->next = NULL; - rte_pktmbuf_free(mbuf); + tap_rxq_pool_free(mbuf); goto end; } @@ -1033,7 +1050,7 @@ tap_dev_close(struct rte_eth_dev *dev) rxq = &internals->rxq[i]; close(process_private->rxq_fds[i]); process_private->rxq_fds[i] = -1; - rte_pktmbuf_free(rxq->pool); + tap_rxq_pool_free(rxq->pool); rte_free(rxq->iovecs); rxq->pool = NULL; rxq->iovecs = NULL; @@ -1072,7 +1089,7 @@ tap_rx_queue_release(void *queue) if (process_private->rxq_fds[rxq->queue_id] > 0) { close(process_private->rxq_fds[rxq->queue_id]); process_private->rxq_fds[rxq->queue_id] = -1; - rte_pktmbuf_free(rxq->pool); + tap_rxq_pool_free(rxq->pool); rte_free(rxq->iovecs); rxq->pool = NULL; rxq->iovecs = NULL; @@ -1480,7 +1497,7 @@ tap_rx_queue_setup(struct rte_eth_dev *dev, return 0; error: - rte_pktmbuf_free(rxq->pool); + tap_rxq_pool_free(rxq->pool); rxq->pool = NULL; rte_free(rxq->iovecs); rxq->iovecs = NULL; @@ -2435,7 +2452,7 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev) rxq = &internals->rxq[i]; close(process_private->rxq_fds[i]); process_private->rxq_fds[i] = -1; - rte_pktmbuf_free(rxq->pool); + tap_rxq_pool_free(rxq->pool); rte_free(rxq->iovecs); rxq->pool = NULL; rxq->iovecs = NULL;