[v9,1/1] app/testpmd: support mulitiple mbuf pools per Rx queue

Message ID 20221017084826.1620342-1-hpothula@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: Andrew Rybchenko
Headers
Series [v9,1/1] app/testpmd: support mulitiple mbuf pools per Rx queue |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/github-robot: build success github build: passed
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS

Commit Message

Hanumanth Pothula Oct. 17, 2022, 8:48 a.m. UTC
  Some of the HW has support for choosing memory pools based on
the packet's size. The pool sort capability allows PMD/NIC to
choose a memory pool based on the packet's length.

On multiple mempool support enabled, populate mempool array
accordingly. Also, print pool name on which packet is received.

Signed-off-by: Hanumanth Pothula <hpothula@marvell.com>
---
 app/test-pmd/testpmd.c | 40 ++++++++++++++++++++++++++++------------
 app/test-pmd/testpmd.h |  3 +++
 app/test-pmd/util.c    |  4 ++--
 3 files changed, 33 insertions(+), 14 deletions(-)
  

Comments

Singh, Aman Deep Oct. 21, 2022, 3:57 p.m. UTC | #1
On 10/17/2022 2:18 PM, Hanumanth Pothula wrote:
> Some of the HW has support for choosing memory pools based on
> the packet's size. The pool sort capability allows PMD/NIC to
> choose a memory pool based on the packet's length.
>
> On multiple mempool support enabled, populate mempool array
> accordingly. Also, print pool name on which packet is received.
>
> Signed-off-by: Hanumanth Pothula <hpothula@marvell.com>
> ---
>   app/test-pmd/testpmd.c | 40 ++++++++++++++++++++++++++++------------
>   app/test-pmd/testpmd.h |  3 +++
>   app/test-pmd/util.c    |  4 ++--
>   3 files changed, 33 insertions(+), 14 deletions(-)
>
> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
> index 5b0f0838dc..1549551640 100644
> --- a/app/test-pmd/testpmd.c
> +++ b/app/test-pmd/testpmd.c
> @@ -2647,10 +2647,16 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
>   	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
>   {
>   	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
> +	struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {};
>   	unsigned int i, mp_n;
>   	int ret;
>   
> -	if (rx_pkt_nb_segs <= 1 ||
> +	/* For multiple mempools per Rx queue support,
> +	 * rx_pkt_nb_segs greater than 1 and
> +	 * Rx offload flag, RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT won't be set.
> +	 * @see rte_eth_rxconf::rx_mempools

I have a basic question about the feature, do we need rx_pkt_nb_segs > 1
for feature to work. My understanding is, if multiple mempools are defined
the driver will move pkts according to its size, even without split of pkts.
Just for my understanding, Thanks :)

> +	 */
> +	if (rx_pkt_nb_segs <= 1 &&
>   	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
>   		rx_conf->rx_seg = NULL;
>   		rx_conf->rx_nseg = 0;
> @@ -2668,20 +2674,30 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
>   		 */
>   		mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
>   		mpx = mbuf_pool_find(socket_id, mp_n);
> -		/* Handle zero as mbuf data buffer size. */
> -		rx_seg->offset = i < rx_pkt_nb_offs ?
> -				   rx_pkt_seg_offsets[i] : 0;
> -		rx_seg->mp = mpx ? mpx : mp;
> -		if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) {
> -			rx_seg->proto_hdr = rx_pkt_hdr_protos[i];
> +
> +		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
> +			/* Handle zero as mbuf data buffer size. */
> +			rx_seg->offset = i < rx_pkt_nb_offs ?
> +					   rx_pkt_seg_offsets[i] : 0;
> +			rx_seg->mp = mpx ? mpx : mp;
> +			if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) {
> +				rx_seg->proto_hdr = rx_pkt_hdr_protos[i];
> +			} else {
> +				rx_seg->length = rx_pkt_seg_lengths[i] ?
> +						 rx_pkt_seg_lengths[i] :
> +						 mbuf_data_size[mp_n];
> +			}
>   		} else {
> -			rx_seg->length = rx_pkt_seg_lengths[i] ?
> -					rx_pkt_seg_lengths[i] :
> -					mbuf_data_size[mp_n];
> +			rx_mempool[i] = mpx ? mpx : mp;
>   		}
>   	}
> -	rx_conf->rx_nseg = rx_pkt_nb_segs;
> -	rx_conf->rx_seg = rx_useg;
> +	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
> +		rx_conf->rx_nseg = rx_pkt_nb_segs;
> +		rx_conf->rx_seg = rx_useg;
> +	} else {
> +		rx_conf->rx_mempools = rx_mempool;
> +		rx_conf->rx_nmempool = rx_pkt_nb_segs;
> +	}
>   	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
>   				    socket_id, rx_conf, NULL);
>   	rx_conf->rx_seg = NULL;
> diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
> index e65be323b8..14be10dcef 100644
> --- a/app/test-pmd/testpmd.h
> +++ b/app/test-pmd/testpmd.h
> @@ -80,6 +80,9 @@ extern uint8_t cl_quit;
>   
>   #define MIN_TOTAL_NUM_MBUFS 1024
>   
> +/* Maximum number of pools supported per Rx queue */
> +#define MAX_MEMPOOL 8

Shoud we set it to MAX_SEGS_BUFFER_SPLIT to avoid mismatch.

> +
>   typedef uint8_t  lcoreid_t;
>   typedef uint16_t portid_t;
>   typedef uint16_t queueid_t;
> diff --git a/app/test-pmd/util.c b/app/test-pmd/util.c
> index fd98e8b51d..f9df5f69ef 100644
> --- a/app/test-pmd/util.c
> +++ b/app/test-pmd/util.c
> @@ -150,8 +150,8 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
>   		print_ether_addr(" - dst=", &eth_hdr->dst_addr,
>   				 print_buf, buf_size, &cur_len);
>   		MKDUMPSTR(print_buf, buf_size, cur_len,
> -			  " - type=0x%04x - length=%u - nb_segs=%d",
> -			  eth_type, (unsigned int) mb->pkt_len,
> +			  " - pool=%s - type=0x%04x - length=%u - nb_segs=%d",
> +			  mb->pool->name, eth_type, (unsigned int) mb->pkt_len,
>   			  (int)mb->nb_segs);
>   		ol_flags = mb->ol_flags;
>   		if (ol_flags & RTE_MBUF_F_RX_RSS_HASH) {
  
Hanumanth Pothula Oct. 24, 2022, 3:32 a.m. UTC | #2
> -----Original Message-----
> From: Singh, Aman Deep <aman.deep.singh@intel.com>
> Sent: Friday, October 21, 2022 9:28 PM
> To: Hanumanth Reddy Pothula <hpothula@marvell.com>; Yuying Zhang
> <yuying.zhang@intel.com>
> Cc: dev@dpdk.org; andrew.rybchenko@oktetlabs.ru; thomas@monjalon.net;
> Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Nithin Kumar Dabilpuram
> <ndabilpuram@marvell.com>
> Subject: [EXT] Re: [PATCH v9 1/1] app/testpmd: support mulitiple mbuf pools per
> Rx queue
> 
> External Email
> 
> ----------------------------------------------------------------------
> 
> 
> On 10/17/2022 2:18 PM, Hanumanth Pothula wrote:
> > Some of the HW has support for choosing memory pools based on the
> > packet's size. The pool sort capability allows PMD/NIC to choose a
> > memory pool based on the packet's length.
> >
> > On multiple mempool support enabled, populate mempool array
> > accordingly. Also, print pool name on which packet is received.
> >
> > Signed-off-by: Hanumanth Pothula <hpothula@marvell.com>
> > ---
> >   app/test-pmd/testpmd.c | 40 ++++++++++++++++++++++++++++------------
> >   app/test-pmd/testpmd.h |  3 +++
> >   app/test-pmd/util.c    |  4 ++--
> >   3 files changed, 33 insertions(+), 14 deletions(-)
> >
> > diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index
> > 5b0f0838dc..1549551640 100644
> > --- a/app/test-pmd/testpmd.c
> > +++ b/app/test-pmd/testpmd.c
> > @@ -2647,10 +2647,16 @@ rx_queue_setup(uint16_t port_id, uint16_t
> rx_queue_id,
> >   	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
> >   {
> >   	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
> > +	struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {};
> >   	unsigned int i, mp_n;
> >   	int ret;
> >
> > -	if (rx_pkt_nb_segs <= 1 ||
> > +	/* For multiple mempools per Rx queue support,
> > +	 * rx_pkt_nb_segs greater than 1 and
> > +	 * Rx offload flag, RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT won't be set.
> > +	 * @see rte_eth_rxconf::rx_mempools
> 
> I have a basic question about the feature, do we need rx_pkt_nb_segs > 1 for
> feature to work. My understanding is, if multiple mempools are defined the
> driver will move pkts according to its size, even without split of pkts.
> Just for my understanding, Thanks :)
> 
Thanks Aman for the review.

Yes, rx_pkt_nb_segs > 1  not required for the multi-mempool feature. 
rx_pkt_nb_segs points to number of segments.  Need to use mbuf_data_size_n, total number of mbuf mempools, instead. Will take care this and upload new patch-set.

> > +	 */
> > +	if (rx_pkt_nb_segs <= 1 &&
> >   	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
> >   		rx_conf->rx_seg = NULL;
> >   		rx_conf->rx_nseg = 0;
> > @@ -2668,20 +2674,30 @@ rx_queue_setup(uint16_t port_id, uint16_t
> rx_queue_id,
> >   		 */
> >   		mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
> >   		mpx = mbuf_pool_find(socket_id, mp_n);
> > -		/* Handle zero as mbuf data buffer size. */
> > -		rx_seg->offset = i < rx_pkt_nb_offs ?
> > -				   rx_pkt_seg_offsets[i] : 0;
> > -		rx_seg->mp = mpx ? mpx : mp;
> > -		if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) {
> > -			rx_seg->proto_hdr = rx_pkt_hdr_protos[i];
> > +
> > +		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)
> {
> > +			/* Handle zero as mbuf data buffer size. */
> > +			rx_seg->offset = i < rx_pkt_nb_offs ?
> > +					   rx_pkt_seg_offsets[i] : 0;
> > +			rx_seg->mp = mpx ? mpx : mp;
> > +			if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i]
> == 0) {
> > +				rx_seg->proto_hdr = rx_pkt_hdr_protos[i];
> > +			} else {
> > +				rx_seg->length = rx_pkt_seg_lengths[i] ?
> > +						 rx_pkt_seg_lengths[i] :
> > +						 mbuf_data_size[mp_n];
> > +			}
> >   		} else {
> > -			rx_seg->length = rx_pkt_seg_lengths[i] ?
> > -					rx_pkt_seg_lengths[i] :
> > -					mbuf_data_size[mp_n];
> > +			rx_mempool[i] = mpx ? mpx : mp;
> >   		}
> >   	}
> > -	rx_conf->rx_nseg = rx_pkt_nb_segs;
> > -	rx_conf->rx_seg = rx_useg;
> > +	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
> > +		rx_conf->rx_nseg = rx_pkt_nb_segs;
> > +		rx_conf->rx_seg = rx_useg;
> > +	} else {
> > +		rx_conf->rx_mempools = rx_mempool;
> > +		rx_conf->rx_nmempool = rx_pkt_nb_segs;
> > +	}
> >   	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
> >   				    socket_id, rx_conf, NULL);
> >   	rx_conf->rx_seg = NULL;
> > diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index
> > e65be323b8..14be10dcef 100644
> > --- a/app/test-pmd/testpmd.h
> > +++ b/app/test-pmd/testpmd.h
> > @@ -80,6 +80,9 @@ extern uint8_t cl_quit;
> >
> >   #define MIN_TOTAL_NUM_MBUFS 1024
> >
> > +/* Maximum number of pools supported per Rx queue */ #define
> > +MAX_MEMPOOL 8
> 
> Shoud we set it to MAX_SEGS_BUFFER_SPLIT to avoid mismatch.
> 
> > +
> >   typedef uint8_t  lcoreid_t;
> >   typedef uint16_t portid_t;
> >   typedef uint16_t queueid_t;
> > diff --git a/app/test-pmd/util.c b/app/test-pmd/util.c index
> > fd98e8b51d..f9df5f69ef 100644
> > --- a/app/test-pmd/util.c
> > +++ b/app/test-pmd/util.c
> > @@ -150,8 +150,8 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue,
> struct rte_mbuf *pkts[],
> >   		print_ether_addr(" - dst=", &eth_hdr->dst_addr,
> >   				 print_buf, buf_size, &cur_len);
> >   		MKDUMPSTR(print_buf, buf_size, cur_len,
> > -			  " - type=0x%04x - length=%u - nb_segs=%d",
> > -			  eth_type, (unsigned int) mb->pkt_len,
> > +			  " - pool=%s - type=0x%04x - length=%u -
> nb_segs=%d",
> > +			  mb->pool->name, eth_type, (unsigned int) mb-
> >pkt_len,
> >   			  (int)mb->nb_segs);
> >   		ol_flags = mb->ol_flags;
> >   		if (ol_flags & RTE_MBUF_F_RX_RSS_HASH) {
  

Patch

diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 5b0f0838dc..1549551640 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2647,10 +2647,16 @@  rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
 {
 	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
+	struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {};
 	unsigned int i, mp_n;
 	int ret;
 
-	if (rx_pkt_nb_segs <= 1 ||
+	/* For multiple mempools per Rx queue support,
+	 * rx_pkt_nb_segs greater than 1 and
+	 * Rx offload flag, RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT won't be set.
+	 * @see rte_eth_rxconf::rx_mempools
+	 */
+	if (rx_pkt_nb_segs <= 1 &&
 	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
 		rx_conf->rx_seg = NULL;
 		rx_conf->rx_nseg = 0;
@@ -2668,20 +2674,30 @@  rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 		 */
 		mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
 		mpx = mbuf_pool_find(socket_id, mp_n);
-		/* Handle zero as mbuf data buffer size. */
-		rx_seg->offset = i < rx_pkt_nb_offs ?
-				   rx_pkt_seg_offsets[i] : 0;
-		rx_seg->mp = mpx ? mpx : mp;
-		if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) {
-			rx_seg->proto_hdr = rx_pkt_hdr_protos[i];
+
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
+			/* Handle zero as mbuf data buffer size. */
+			rx_seg->offset = i < rx_pkt_nb_offs ?
+					   rx_pkt_seg_offsets[i] : 0;
+			rx_seg->mp = mpx ? mpx : mp;
+			if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) {
+				rx_seg->proto_hdr = rx_pkt_hdr_protos[i];
+			} else {
+				rx_seg->length = rx_pkt_seg_lengths[i] ?
+						 rx_pkt_seg_lengths[i] :
+						 mbuf_data_size[mp_n];
+			}
 		} else {
-			rx_seg->length = rx_pkt_seg_lengths[i] ?
-					rx_pkt_seg_lengths[i] :
-					mbuf_data_size[mp_n];
+			rx_mempool[i] = mpx ? mpx : mp;
 		}
 	}
-	rx_conf->rx_nseg = rx_pkt_nb_segs;
-	rx_conf->rx_seg = rx_useg;
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
+		rx_conf->rx_nseg = rx_pkt_nb_segs;
+		rx_conf->rx_seg = rx_useg;
+	} else {
+		rx_conf->rx_mempools = rx_mempool;
+		rx_conf->rx_nmempool = rx_pkt_nb_segs;
+	}
 	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
 				    socket_id, rx_conf, NULL);
 	rx_conf->rx_seg = NULL;
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index e65be323b8..14be10dcef 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -80,6 +80,9 @@  extern uint8_t cl_quit;
 
 #define MIN_TOTAL_NUM_MBUFS 1024
 
+/* Maximum number of pools supported per Rx queue */
+#define MAX_MEMPOOL 8
+
 typedef uint8_t  lcoreid_t;
 typedef uint16_t portid_t;
 typedef uint16_t queueid_t;
diff --git a/app/test-pmd/util.c b/app/test-pmd/util.c
index fd98e8b51d..f9df5f69ef 100644
--- a/app/test-pmd/util.c
+++ b/app/test-pmd/util.c
@@ -150,8 +150,8 @@  dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
 		print_ether_addr(" - dst=", &eth_hdr->dst_addr,
 				 print_buf, buf_size, &cur_len);
 		MKDUMPSTR(print_buf, buf_size, cur_len,
-			  " - type=0x%04x - length=%u - nb_segs=%d",
-			  eth_type, (unsigned int) mb->pkt_len,
+			  " - pool=%s - type=0x%04x - length=%u - nb_segs=%d",
+			  mb->pool->name, eth_type, (unsigned int) mb->pkt_len,
 			  (int)mb->nb_segs);
 		ol_flags = mb->ol_flags;
 		if (ol_flags & RTE_MBUF_F_RX_RSS_HASH) {