[v11,1/1] app/testpmd: support multiple mbuf pools per Rx queue

Message ID 20221025014005.2054913-1-hpothula@marvell.com (mailing list archive)
State Changes Requested, archived
Delegated to: Andrew Rybchenko
Headers
Series [v11,1/1] app/testpmd: support multiple mbuf pools per Rx queue |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation success Compilation OK
ci/github-robot: build success github build: passed
ci/intel-Testing success Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS

Commit Message

Hanumanth Pothula Oct. 25, 2022, 1:40 a.m. UTC
  Some of the HW has support for choosing memory pools based on
the packet's size. The pool sort capability allows PMD/NIC to
choose a memory pool based on the packet's length.

On multiple mempool support enabled, populate mempool array
accordingly. Also, print pool name on which packet is received.

Signed-off-by: Hanumanth Pothula <hpothula@marvell.com>
v11:
 - Resolve compilation and warning.
v10:
 - Populate multi-mempool array based on mbuf_data_size_n instead
   of rx_pkt_nb_segs.
---
 app/test-pmd/testpmd.c | 63 +++++++++++++++++++++++++++---------------
 app/test-pmd/testpmd.h |  3 ++
 app/test-pmd/util.c    |  4 +--
 3 files changed, 45 insertions(+), 25 deletions(-)
  

Comments

Hanumanth Pothula Nov. 1, 2022, 2:13 p.m. UTC | #1
Ping

> -----Original Message-----
> From: Hanumanth Pothula <hpothula@marvell.com>
> Sent: Tuesday, October 25, 2022 7:10 AM
> To: Aman Singh <aman.deep.singh@intel.com>; Yuying Zhang
> <yuying.zhang@intel.com>
> Cc: dev@dpdk.org; andrew.rybchenko@oktetlabs.ru; thomas@monjalon.net;
> Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Nithin Kumar Dabilpuram
> <ndabilpuram@marvell.com>; Hanumanth Reddy Pothula
> <hpothula@marvell.com>
> Subject: [PATCH v11 1/1] app/testpmd: support multiple mbuf pools per Rx
> queue
> 
> Some of the HW has support for choosing memory pools based on the packet's
> size. The pool sort capability allows PMD/NIC to choose a memory pool based
> on the packet's length.
> 
> On multiple mempool support enabled, populate mempool array accordingly.
> Also, print pool name on which packet is received.
> 
> Signed-off-by: Hanumanth Pothula <hpothula@marvell.com>
> v11:
>  - Resolve compilation and warning.
> v10:
>  - Populate multi-mempool array based on mbuf_data_size_n instead
>    of rx_pkt_nb_segs.
> ---
>  app/test-pmd/testpmd.c | 63 +++++++++++++++++++++++++++---------------
>  app/test-pmd/testpmd.h |  3 ++
>  app/test-pmd/util.c    |  4 +--
>  3 files changed, 45 insertions(+), 25 deletions(-)
> 
> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index
> 5b0f0838dc..62f7c9dba8 100644
> --- a/app/test-pmd/testpmd.c
> +++ b/app/test-pmd/testpmd.c
> @@ -2647,11 +2647,18 @@ rx_queue_setup(uint16_t port_id, uint16_t
> rx_queue_id,
>  	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)  {
>  	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
> +	struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {};
> +	struct rte_mempool *mpx;
>  	unsigned int i, mp_n;
>  	int ret;
> 
> -	if (rx_pkt_nb_segs <= 1 ||
> -	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
> +	/* Verify Rx queue configuration is single pool and segment or
> +	 * multiple pool/segment.
> +	 * @see rte_eth_rxconf::rx_mempools
> +	 * @see rte_eth_rxconf::rx_seg
> +	 */
> +	if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 ||
> +	    ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) {
>  		rx_conf->rx_seg = NULL;
>  		rx_conf->rx_nseg = 0;
>  		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, @@ -
> 2659,29 +2666,39 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
>  					     rx_conf, mp);
>  		goto exit;
>  	}
> -	for (i = 0; i < rx_pkt_nb_segs; i++) {
> -		struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
> -		struct rte_mempool *mpx;
> -		/*
> -		 * Use last valid pool for the segments with number
> -		 * exceeding the pool index.
> -		 */
> -		mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
> -		mpx = mbuf_pool_find(socket_id, mp_n);
> -		/* Handle zero as mbuf data buffer size. */
> -		rx_seg->offset = i < rx_pkt_nb_offs ?
> -				   rx_pkt_seg_offsets[i] : 0;
> -		rx_seg->mp = mpx ? mpx : mp;
> -		if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) {
> -			rx_seg->proto_hdr = rx_pkt_hdr_protos[i];
> -		} else {
> -			rx_seg->length = rx_pkt_seg_lengths[i] ?
> -					rx_pkt_seg_lengths[i] :
> -					mbuf_data_size[mp_n];
> +	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
> +		for (i = 0; i < rx_pkt_nb_segs; i++) {
> +			struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
> +			/*
> +			 * Use last valid pool for the segments with number
> +			 * exceeding the pool index.
> +			 */
> +			mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 :
> i;
> +			mpx = mbuf_pool_find(socket_id, mp_n);
> +			if (rx_conf->offloads &
> RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
> +				/**
> +				 * On Segment length zero, update length as,
> +				 *      buffer size - headroom size
> +				 * to make sure enough space is accomidate for
> header.
> +				 */
> +				rx_seg->length = rx_pkt_seg_lengths[i] ?
> +						 rx_pkt_seg_lengths[i] :
> +						 mbuf_data_size[mp_n] -
> RTE_PKTMBUF_HEADROOM;
> +				rx_seg->offset = i < rx_pkt_nb_offs ?
> +						 rx_pkt_seg_offsets[i] : 0;
> +				rx_seg->mp = mpx ? mpx : mp;
> +			}
> +		}
> +		rx_conf->rx_nseg = rx_pkt_nb_segs;
> +		rx_conf->rx_seg = rx_useg;
> +	} else {
> +		for (i = 0; i < mbuf_data_size_n; i++) {
> +			mpx = mbuf_pool_find(socket_id, i);
> +			rx_mempool[i] = mpx ? mpx : mp;
>  		}
> +		rx_conf->rx_mempools = rx_mempool;
> +		rx_conf->rx_nmempool = mbuf_data_size_n;
>  	}
> -	rx_conf->rx_nseg = rx_pkt_nb_segs;
> -	rx_conf->rx_seg = rx_useg;
>  	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
>  				    socket_id, rx_conf, NULL);
>  	rx_conf->rx_seg = NULL;
> diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index
> e65be323b8..14be10dcef 100644
> --- a/app/test-pmd/testpmd.h
> +++ b/app/test-pmd/testpmd.h
> @@ -80,6 +80,9 @@ extern uint8_t cl_quit;
> 
>  #define MIN_TOTAL_NUM_MBUFS 1024
> 
> +/* Maximum number of pools supported per Rx queue */ #define
> +MAX_MEMPOOL 8
> +
>  typedef uint8_t  lcoreid_t;
>  typedef uint16_t portid_t;
>  typedef uint16_t queueid_t;
> diff --git a/app/test-pmd/util.c b/app/test-pmd/util.c index
> fd98e8b51d..f9df5f69ef 100644
> --- a/app/test-pmd/util.c
> +++ b/app/test-pmd/util.c
> @@ -150,8 +150,8 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue,
> struct rte_mbuf *pkts[],
>  		print_ether_addr(" - dst=", &eth_hdr->dst_addr,
>  				 print_buf, buf_size, &cur_len);
>  		MKDUMPSTR(print_buf, buf_size, cur_len,
> -			  " - type=0x%04x - length=%u - nb_segs=%d",
> -			  eth_type, (unsigned int) mb->pkt_len,
> +			  " - pool=%s - type=0x%04x - length=%u -
> nb_segs=%d",
> +			  mb->pool->name, eth_type, (unsigned int) mb-
> >pkt_len,
>  			  (int)mb->nb_segs);
>  		ol_flags = mb->ol_flags;
>  		if (ol_flags & RTE_MBUF_F_RX_RSS_HASH) {
> --
> 2.25.1
  
Singh, Aman Deep Nov. 3, 2022, 12:15 p.m. UTC | #2
On 10/25/2022 7:10 AM, Hanumanth Pothula wrote:
> Some of the HW has support for choosing memory pools based on
> the packet's size. The pool sort capability allows PMD/NIC to
> choose a memory pool based on the packet's length.
>
> On multiple mempool support enabled, populate mempool array
> accordingly. Also, print pool name on which packet is received.
>
> Signed-off-by: Hanumanth Pothula <hpothula@marvell.com>
> v11:
>   - Resolve compilation and warning.
> v10:
>   - Populate multi-mempool array based on mbuf_data_size_n instead
>     of rx_pkt_nb_segs.
> ---
>   app/test-pmd/testpmd.c | 63 +++++++++++++++++++++++++++---------------
>   app/test-pmd/testpmd.h |  3 ++
>   app/test-pmd/util.c    |  4 +--
>   3 files changed, 45 insertions(+), 25 deletions(-)
>
> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
> index 5b0f0838dc..62f7c9dba8 100644
> --- a/app/test-pmd/testpmd.c
> +++ b/app/test-pmd/testpmd.c
> @@ -2647,11 +2647,18 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
>   	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
>   {
>   	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
> +	struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {};
> +	struct rte_mempool *mpx;
>   	unsigned int i, mp_n;
>   	int ret;
>   
> -	if (rx_pkt_nb_segs <= 1 ||
> -	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
> +	/* Verify Rx queue configuration is single pool and segment or
> +	 * multiple pool/segment.
> +	 * @see rte_eth_rxconf::rx_mempools
> +	 * @see rte_eth_rxconf::rx_seg
> +	 */
> +	if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 ||
> +	    ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) {
>   		rx_conf->rx_seg = NULL;
>   		rx_conf->rx_nseg = 0;
>   		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
> @@ -2659,29 +2666,39 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
>   					     rx_conf, mp);
>   		goto exit;
>   	}
> -	for (i = 0; i < rx_pkt_nb_segs; i++) {
> -		struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
> -		struct rte_mempool *mpx;
> -		/*
> -		 * Use last valid pool for the segments with number
> -		 * exceeding the pool index.
> -		 */
> -		mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
> -		mpx = mbuf_pool_find(socket_id, mp_n);
> -		/* Handle zero as mbuf data buffer size. */
> -		rx_seg->offset = i < rx_pkt_nb_offs ?
> -				   rx_pkt_seg_offsets[i] : 0;
> -		rx_seg->mp = mpx ? mpx : mp;
> -		if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) {
> -			rx_seg->proto_hdr = rx_pkt_hdr_protos[i];
> -		} else {
> -			rx_seg->length = rx_pkt_seg_lengths[i] ?
> -					rx_pkt_seg_lengths[i] :
> -					mbuf_data_size[mp_n];
> +	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {

In case this flag *_OFFLOAD_BUFFER_SPLIT is not set, but rx_pkt_nb_segs > 1
Will it still enter below loop, as before.

> +		for (i = 0; i < rx_pkt_nb_segs; i++) {
> +			struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
> +			/*
> +			 * Use last valid pool for the segments with number
> +			 * exceeding the pool index.
> +			 */
> +			mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
> +			mpx = mbuf_pool_find(socket_id, mp_n);
> +			if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {

Isn't above check already found to be TRUE, before we reached here.

> +				/**
> +				 * On Segment length zero, update length as,
> +				 *      buffer size - headroom size
> +				 * to make sure enough space is accomidate for header.
> +				 */
> +				rx_seg->length = rx_pkt_seg_lengths[i] ?
> +						 rx_pkt_seg_lengths[i] :
> +						 mbuf_data_size[mp_n] - RTE_PKTMBUF_HEADROOM;
> +				rx_seg->offset = i < rx_pkt_nb_offs ?
> +						 rx_pkt_seg_offsets[i] : 0;
> +				rx_seg->mp = mpx ? mpx : mp;
> +			}
> +		}
> +		rx_conf->rx_nseg = rx_pkt_nb_segs;
> +		rx_conf->rx_seg = rx_useg;
> +	} else {
> +		for (i = 0; i < mbuf_data_size_n; i++) {
> +			mpx = mbuf_pool_find(socket_id, i);
> +			rx_mempool[i] = mpx ? mpx : mp;
>   		}
> +		rx_conf->rx_mempools = rx_mempool;
> +		rx_conf->rx_nmempool = mbuf_data_size_n;
>   	}
> -	rx_conf->rx_nseg = rx_pkt_nb_segs;
> -	rx_conf->rx_seg = rx_useg;
>   	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
>   				    socket_id, rx_conf, NULL);
>   	rx_conf->rx_seg = NULL;
> diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
> index e65be323b8..14be10dcef 100644
> --- a/app/test-pmd/testpmd.h
> +++ b/app/test-pmd/testpmd.h
> @@ -80,6 +80,9 @@ extern uint8_t cl_quit;
>   
>   #define MIN_TOTAL_NUM_MBUFS 1024
>   
> +/* Maximum number of pools supported per Rx queue */
> +#define MAX_MEMPOOL 8
> +
>   typedef uint8_t  lcoreid_t;
>   typedef uint16_t portid_t;
>   typedef uint16_t queueid_t;
> diff --git a/app/test-pmd/util.c b/app/test-pmd/util.c
> index fd98e8b51d..f9df5f69ef 100644
> --- a/app/test-pmd/util.c
> +++ b/app/test-pmd/util.c
> @@ -150,8 +150,8 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
>   		print_ether_addr(" - dst=", &eth_hdr->dst_addr,
>   				 print_buf, buf_size, &cur_len);
>   		MKDUMPSTR(print_buf, buf_size, cur_len,
> -			  " - type=0x%04x - length=%u - nb_segs=%d",
> -			  eth_type, (unsigned int) mb->pkt_len,
> +			  " - pool=%s - type=0x%04x - length=%u - nb_segs=%d",
> +			  mb->pool->name, eth_type, (unsigned int) mb->pkt_len,
>   			  (int)mb->nb_segs);
>   		ol_flags = mb->ol_flags;
>   		if (ol_flags & RTE_MBUF_F_RX_RSS_HASH) {
  
Hanumanth Pothula Nov. 3, 2022, 12:36 p.m. UTC | #3
> -----Original Message-----
> From: Singh, Aman Deep <aman.deep.singh@intel.com>
> Sent: Thursday, November 3, 2022 5:46 PM
> To: Hanumanth Reddy Pothula <hpothula@marvell.com>; Yuying Zhang
> <yuying.zhang@intel.com>
> Cc: dev@dpdk.org; andrew.rybchenko@oktetlabs.ru; thomas@monjalon.net;
> Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Nithin Kumar Dabilpuram
> <ndabilpuram@marvell.com>
> Subject: [EXT] Re: [PATCH v11 1/1] app/testpmd: support multiple mbuf pools
> per Rx queue
> 
> External Email
> 
> ----------------------------------------------------------------------
> 
> 
> On 10/25/2022 7:10 AM, Hanumanth Pothula wrote:
> > Some of the HW has support for choosing memory pools based on the
> > packet's size. The pool sort capability allows PMD/NIC to choose a
> > memory pool based on the packet's length.
> >
> > On multiple mempool support enabled, populate mempool array
> > accordingly. Also, print pool name on which packet is received.
> >
> > Signed-off-by: Hanumanth Pothula <hpothula@marvell.com>
> > v11:
> >   - Resolve compilation and warning.
> > v10:
> >   - Populate multi-mempool array based on mbuf_data_size_n instead
> >     of rx_pkt_nb_segs.
> > ---
> >   app/test-pmd/testpmd.c | 63 +++++++++++++++++++++++++++---------------
> >   app/test-pmd/testpmd.h |  3 ++
> >   app/test-pmd/util.c    |  4 +--
> >   3 files changed, 45 insertions(+), 25 deletions(-)
> >
> > diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index
> > 5b0f0838dc..62f7c9dba8 100644
> > --- a/app/test-pmd/testpmd.c
> > +++ b/app/test-pmd/testpmd.c
> > @@ -2647,11 +2647,18 @@ rx_queue_setup(uint16_t port_id, uint16_t
> rx_queue_id,
> >   	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
> >   {
> >   	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
> > +	struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {};
> > +	struct rte_mempool *mpx;
> >   	unsigned int i, mp_n;
> >   	int ret;
> >
> > -	if (rx_pkt_nb_segs <= 1 ||
> > -	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
> > +	/* Verify Rx queue configuration is single pool and segment or
> > +	 * multiple pool/segment.
> > +	 * @see rte_eth_rxconf::rx_mempools
> > +	 * @see rte_eth_rxconf::rx_seg
> > +	 */
> > +	if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 ||
> > +	    ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) {
> >   		rx_conf->rx_seg = NULL;
> >   		rx_conf->rx_nseg = 0;
> >   		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, @@ -
> 2659,29
> > +2666,39 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
> >   					     rx_conf, mp);
> >   		goto exit;
> >   	}
> > -	for (i = 0; i < rx_pkt_nb_segs; i++) {
> > -		struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
> > -		struct rte_mempool *mpx;
> > -		/*
> > -		 * Use last valid pool for the segments with number
> > -		 * exceeding the pool index.
> > -		 */
> > -		mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
> > -		mpx = mbuf_pool_find(socket_id, mp_n);
> > -		/* Handle zero as mbuf data buffer size. */
> > -		rx_seg->offset = i < rx_pkt_nb_offs ?
> > -				   rx_pkt_seg_offsets[i] : 0;
> > -		rx_seg->mp = mpx ? mpx : mp;
> > -		if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) {
> > -			rx_seg->proto_hdr = rx_pkt_hdr_protos[i];
> > -		} else {
> > -			rx_seg->length = rx_pkt_seg_lengths[i] ?
> > -					rx_pkt_seg_lengths[i] :
> > -					mbuf_data_size[mp_n];
> > +	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
> 
> In case this flag *_OFFLOAD_BUFFER_SPLIT is not set, but rx_pkt_nb_segs > 1
> Will it still enter below loop, as before.

Yes Aman, RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT flag to be set to proceed further.
Do you suggest to enter the loop on  rx_pkt_nb_segs > 1 irrespective of RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT flag. 
Something like, 
if (rx_pkt_nb_segs > 1) {
	for(i = 0; i < rx_pkt_nb_segs; i++){
	}
}

> 
> > +		for (i = 0; i < rx_pkt_nb_segs; i++) {
> > +			struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
> > +			/*
> > +			 * Use last valid pool for the segments with number
> > +			 * exceeding the pool index.
> > +			 */
> > +			mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 :
> i;
> > +			mpx = mbuf_pool_find(socket_id, mp_n);
> > +			if (rx_conf->offloads &
> RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
> 
> Isn't above check already found to be TRUE, before we reached here.
Yes this is redundant, will remove.
> 
> > +				/**
> > +				 * On Segment length zero, update length as,
> > +				 *      buffer size - headroom size
> > +				 * to make sure enough space is accomidate for
> header.
> > +				 */
> > +				rx_seg->length = rx_pkt_seg_lengths[i] ?
> > +						 rx_pkt_seg_lengths[i] :
> > +						 mbuf_data_size[mp_n] -
> RTE_PKTMBUF_HEADROOM;
> > +				rx_seg->offset = i < rx_pkt_nb_offs ?
> > +						 rx_pkt_seg_offsets[i] : 0;
> > +				rx_seg->mp = mpx ? mpx : mp;
> > +			}
> > +		}
> > +		rx_conf->rx_nseg = rx_pkt_nb_segs;
> > +		rx_conf->rx_seg = rx_useg;
> > +	} else {
> > +		for (i = 0; i < mbuf_data_size_n; i++) {
> > +			mpx = mbuf_pool_find(socket_id, i);
> > +			rx_mempool[i] = mpx ? mpx : mp;
> >   		}
> > +		rx_conf->rx_mempools = rx_mempool;
> > +		rx_conf->rx_nmempool = mbuf_data_size_n;
> >   	}
> > -	rx_conf->rx_nseg = rx_pkt_nb_segs;
> > -	rx_conf->rx_seg = rx_useg;
> >   	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
> >   				    socket_id, rx_conf, NULL);
> >   	rx_conf->rx_seg = NULL;
> > diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index
> > e65be323b8..14be10dcef 100644
> > --- a/app/test-pmd/testpmd.h
> > +++ b/app/test-pmd/testpmd.h
> > @@ -80,6 +80,9 @@ extern uint8_t cl_quit;
> >
> >   #define MIN_TOTAL_NUM_MBUFS 1024
> >
> > +/* Maximum number of pools supported per Rx queue */ #define
> > +MAX_MEMPOOL 8
> > +
> >   typedef uint8_t  lcoreid_t;
> >   typedef uint16_t portid_t;
> >   typedef uint16_t queueid_t;
> > diff --git a/app/test-pmd/util.c b/app/test-pmd/util.c index
> > fd98e8b51d..f9df5f69ef 100644
> > --- a/app/test-pmd/util.c
> > +++ b/app/test-pmd/util.c
> > @@ -150,8 +150,8 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue,
> struct rte_mbuf *pkts[],
> >   		print_ether_addr(" - dst=", &eth_hdr->dst_addr,
> >   				 print_buf, buf_size, &cur_len);
> >   		MKDUMPSTR(print_buf, buf_size, cur_len,
> > -			  " - type=0x%04x - length=%u - nb_segs=%d",
> > -			  eth_type, (unsigned int) mb->pkt_len,
> > +			  " - pool=%s - type=0x%04x - length=%u -
> nb_segs=%d",
> > +			  mb->pool->name, eth_type, (unsigned int) mb-
> >pkt_len,
> >   			  (int)mb->nb_segs);
> >   		ol_flags = mb->ol_flags;
> >   		if (ol_flags & RTE_MBUF_F_RX_RSS_HASH) {
  
Singh, Aman Deep Nov. 3, 2022, 3:20 p.m. UTC | #4
On 11/3/2022 6:06 PM, Hanumanth Reddy Pothula wrote:
>
>> -----Original Message-----
>> From: Singh, Aman Deep <aman.deep.singh@intel.com>
>> Sent: Thursday, November 3, 2022 5:46 PM
>> To: Hanumanth Reddy Pothula <hpothula@marvell.com>; Yuying Zhang
>> <yuying.zhang@intel.com>
>> Cc: dev@dpdk.org; andrew.rybchenko@oktetlabs.ru; thomas@monjalon.net;
>> Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Nithin Kumar Dabilpuram
>> <ndabilpuram@marvell.com>
>> Subject: [EXT] Re: [PATCH v11 1/1] app/testpmd: support multiple mbuf pools
>> per Rx queue
>>
>> External Email
>>
>> ----------------------------------------------------------------------
>>
>>
>> On 10/25/2022 7:10 AM, Hanumanth Pothula wrote:
>>> Some of the HW has support for choosing memory pools based on the
>>> packet's size. The pool sort capability allows PMD/NIC to choose a
>>> memory pool based on the packet's length.
>>>
>>> On multiple mempool support enabled, populate mempool array
>>> accordingly. Also, print pool name on which packet is received.
>>>
>>> Signed-off-by: Hanumanth Pothula <hpothula@marvell.com>
>>> v11:
>>>    - Resolve compilation and warning.
>>> v10:
>>>    - Populate multi-mempool array based on mbuf_data_size_n instead
>>>      of rx_pkt_nb_segs.
>>> ---
>>>    app/test-pmd/testpmd.c | 63 +++++++++++++++++++++++++++---------------
>>>    app/test-pmd/testpmd.h |  3 ++
>>>    app/test-pmd/util.c    |  4 +--
>>>    3 files changed, 45 insertions(+), 25 deletions(-)
>>>
>>> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index
>>> 5b0f0838dc..62f7c9dba8 100644
>>> --- a/app/test-pmd/testpmd.c
>>> +++ b/app/test-pmd/testpmd.c
>>> @@ -2647,11 +2647,18 @@ rx_queue_setup(uint16_t port_id, uint16_t
>> rx_queue_id,
>>>    	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
>>>    {
>>>    	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
>>> +	struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {};
>>> +	struct rte_mempool *mpx;
>>>    	unsigned int i, mp_n;
>>>    	int ret;
>>>
>>> -	if (rx_pkt_nb_segs <= 1 ||
>>> -	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
>>> +	/* Verify Rx queue configuration is single pool and segment or
>>> +	 * multiple pool/segment.
>>> +	 * @see rte_eth_rxconf::rx_mempools
>>> +	 * @see rte_eth_rxconf::rx_seg
>>> +	 */
>>> +	if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 ||
>>> +	    ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) {
>>>    		rx_conf->rx_seg = NULL;
>>>    		rx_conf->rx_nseg = 0;
>>>    		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, @@ -
>> 2659,29
>>> +2666,39 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
>>>    					     rx_conf, mp);
>>>    		goto exit;
>>>    	}
>>> -	for (i = 0; i < rx_pkt_nb_segs; i++) {
>>> -		struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
>>> -		struct rte_mempool *mpx;
>>> -		/*
>>> -		 * Use last valid pool for the segments with number
>>> -		 * exceeding the pool index.
>>> -		 */
>>> -		mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
>>> -		mpx = mbuf_pool_find(socket_id, mp_n);
>>> -		/* Handle zero as mbuf data buffer size. */
>>> -		rx_seg->offset = i < rx_pkt_nb_offs ?
>>> -				   rx_pkt_seg_offsets[i] : 0;
>>> -		rx_seg->mp = mpx ? mpx : mp;
>>> -		if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) {
>>> -			rx_seg->proto_hdr = rx_pkt_hdr_protos[i];
>>> -		} else {
>>> -			rx_seg->length = rx_pkt_seg_lengths[i] ?
>>> -					rx_pkt_seg_lengths[i] :
>>> -					mbuf_data_size[mp_n];
>>> +	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
>> In case this flag *_OFFLOAD_BUFFER_SPLIT is not set, but rx_pkt_nb_segs > 1
>> Will it still enter below loop, as before.
> Yes Aman, RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT flag to be set to proceed further.
> Do you suggest to enter the loop on  rx_pkt_nb_segs > 1 irrespective of RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT flag.
> Something like,
> if (rx_pkt_nb_segs > 1) {
> 	for(i = 0; i < rx_pkt_nb_segs; i++){
> 	}
> }

As per the old logic, either of the case was supported-
if (rx_pkt_nb_segs <= 1 ||
(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0)

>
>>> +		for (i = 0; i < rx_pkt_nb_segs; i++) {
>>> +			struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
>>> +			/*
>>> +			 * Use last valid pool for the segments with number
>>> +			 * exceeding the pool index.
>>> +			 */
>>> +			mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 :
>> i;
>>> +			mpx = mbuf_pool_find(socket_id, mp_n);
>>> +			if (rx_conf->offloads &
>> RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
>>
>> Isn't above check already found to be TRUE, before we reached here.
> Yes this is redundant, will remove.
>>> +				/**
>>> +				 * On Segment length zero, update length as,
>>> +				 *      buffer size - headroom size
>>> +				 * to make sure enough space is accomidate for
>> header.
>>> +				 */
>>> +				rx_seg->length = rx_pkt_seg_lengths[i] ?
>>> +						 rx_pkt_seg_lengths[i] :
>>> +						 mbuf_data_size[mp_n] -
>> RTE_PKTMBUF_HEADROOM;
>>> +				rx_seg->offset = i < rx_pkt_nb_offs ?
>>> +						 rx_pkt_seg_offsets[i] : 0;
>>> +				rx_seg->mp = mpx ? mpx : mp;
>>> +			}
>>> +		}
>>> +		rx_conf->rx_nseg = rx_pkt_nb_segs;
>>> +		rx_conf->rx_seg = rx_useg;
>>> +	} else {
>>> +		for (i = 0; i < mbuf_data_size_n; i++) {
>>> +			mpx = mbuf_pool_find(socket_id, i);
>>> +			rx_mempool[i] = mpx ? mpx : mp;
>>>    		}
>>> +		rx_conf->rx_mempools = rx_mempool;
>>> +		rx_conf->rx_nmempool = mbuf_data_size_n;
>>>    	}
>>> -	rx_conf->rx_nseg = rx_pkt_nb_segs;
>>> -	rx_conf->rx_seg = rx_useg;
>>>    	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
>>>    				    socket_id, rx_conf, NULL);
>>>    	rx_conf->rx_seg = NULL;
>>> diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index
>>> e65be323b8..14be10dcef 100644
>>> --- a/app/test-pmd/testpmd.h
>>> +++ b/app/test-pmd/testpmd.h
>>> @@ -80,6 +80,9 @@ extern uint8_t cl_quit;
>>>
>>>    #define MIN_TOTAL_NUM_MBUFS 1024
>>>
>>> +/* Maximum number of pools supported per Rx queue */ #define
>>> +MAX_MEMPOOL 8
>>> +
>>>    typedef uint8_t  lcoreid_t;
>>>    typedef uint16_t portid_t;
>>>    typedef uint16_t queueid_t;
>>> diff --git a/app/test-pmd/util.c b/app/test-pmd/util.c index
>>> fd98e8b51d..f9df5f69ef 100644
>>> --- a/app/test-pmd/util.c
>>> +++ b/app/test-pmd/util.c
>>> @@ -150,8 +150,8 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue,
>> struct rte_mbuf *pkts[],
>>>    		print_ether_addr(" - dst=", &eth_hdr->dst_addr,
>>>    				 print_buf, buf_size, &cur_len);
>>>    		MKDUMPSTR(print_buf, buf_size, cur_len,
>>> -			  " - type=0x%04x - length=%u - nb_segs=%d",
>>> -			  eth_type, (unsigned int) mb->pkt_len,
>>> +			  " - pool=%s - type=0x%04x - length=%u -
>> nb_segs=%d",
>>> +			  mb->pool->name, eth_type, (unsigned int) mb-
>>> pkt_len,
>>>    			  (int)mb->nb_segs);
>>>    		ol_flags = mb->ol_flags;
>>>    		if (ol_flags & RTE_MBUF_F_RX_RSS_HASH) {
  
Hanumanth Pothula Nov. 4, 2022, 3:38 p.m. UTC | #5
> -----Original Message-----
> From: Singh, Aman Deep <aman.deep.singh@intel.com>
> Sent: Thursday, November 3, 2022 8:50 PM
> To: Hanumanth Reddy Pothula <hpothula@marvell.com>; Yuying Zhang
> <yuying.zhang@intel.com>
> Cc: dev@dpdk.org; andrew.rybchenko@oktetlabs.ru;
> thomas@monjalon.net; Jerin Jacob Kollanukkaran <jerinj@marvell.com>;
> Nithin Kumar Dabilpuram <ndabilpuram@marvell.com>
> Subject: Re: [EXT] Re: [PATCH v11 1/1] app/testpmd: support multiple
> mbuf pools per Rx queue
> 
> 
> 
> On 11/3/2022 6:06 PM, Hanumanth Reddy Pothula wrote:
> >
> >> -----Original Message-----
> >> From: Singh, Aman Deep <aman.deep.singh@intel.com>
> >> Sent: Thursday, November 3, 2022 5:46 PM
> >> To: Hanumanth Reddy Pothula <hpothula@marvell.com>; Yuying Zhang
> >> <yuying.zhang@intel.com>
> >> Cc: dev@dpdk.org; andrew.rybchenko@oktetlabs.ru;
> thomas@monjalon.net;
> >> Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Nithin Kumar
> >> Dabilpuram <ndabilpuram@marvell.com>
> >> Subject: [EXT] Re: [PATCH v11 1/1] app/testpmd: support multiple mbuf
> >> pools per Rx queue
> >>
> >> External Email
> >>
> >> ---------------------------------------------------------------------
> >> -
> >>
> >>
> >> On 10/25/2022 7:10 AM, Hanumanth Pothula wrote:
> >>> Some of the HW has support for choosing memory pools based on the
> >>> packet's size. The pool sort capability allows PMD/NIC to choose a
> >>> memory pool based on the packet's length.
> >>>
> >>> On multiple mempool support enabled, populate mempool array
> >>> accordingly. Also, print pool name on which packet is received.
> >>>
> >>> Signed-off-by: Hanumanth Pothula <hpothula@marvell.com>
> >>> v11:
> >>>    - Resolve compilation and warning.
> >>> v10:
> >>>    - Populate multi-mempool array based on mbuf_data_size_n instead
> >>>      of rx_pkt_nb_segs.
> >>> ---
> >>>    app/test-pmd/testpmd.c | 63 +++++++++++++++++++++++++++------
> ---------
> >>>    app/test-pmd/testpmd.h |  3 ++
> >>>    app/test-pmd/util.c    |  4 +--
> >>>    3 files changed, 45 insertions(+), 25 deletions(-)
> >>>
> >>> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index
> >>> 5b0f0838dc..62f7c9dba8 100644
> >>> --- a/app/test-pmd/testpmd.c
> >>> +++ b/app/test-pmd/testpmd.c
> >>> @@ -2647,11 +2647,18 @@ rx_queue_setup(uint16_t port_id,
> uint16_t
> >> rx_queue_id,
> >>>    	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
> >>>    {
> >>>    	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
> >>> +	struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {};
> >>> +	struct rte_mempool *mpx;
> >>>    	unsigned int i, mp_n;
> >>>    	int ret;
> >>>
> >>> -	if (rx_pkt_nb_segs <= 1 ||
> >>> -	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) ==
> 0) {
> >>> +	/* Verify Rx queue configuration is single pool and segment or
> >>> +	 * multiple pool/segment.
> >>> +	 * @see rte_eth_rxconf::rx_mempools
> >>> +	 * @see rte_eth_rxconf::rx_seg
> >>> +	 */
> >>> +	if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 ||
> >>> +	    ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) !=
> 0)))
> >>> +{
> >>>    		rx_conf->rx_seg = NULL;
> >>>    		rx_conf->rx_nseg = 0;
> >>>    		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, @@ -
> >> 2659,29
> >>> +2666,39 @@ rx_queue_setup(uint16_t port_id, uint16_t
> rx_queue_id,
> >>>    					     rx_conf, mp);
> >>>    		goto exit;
> >>>    	}
> >>> -	for (i = 0; i < rx_pkt_nb_segs; i++) {
> >>> -		struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
> >>> -		struct rte_mempool *mpx;
> >>> -		/*
> >>> -		 * Use last valid pool for the segments with number
> >>> -		 * exceeding the pool index.
> >>> -		 */
> >>> -		mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
> >>> -		mpx = mbuf_pool_find(socket_id, mp_n);
> >>> -		/* Handle zero as mbuf data buffer size. */
> >>> -		rx_seg->offset = i < rx_pkt_nb_offs ?
> >>> -				   rx_pkt_seg_offsets[i] : 0;
> >>> -		rx_seg->mp = mpx ? mpx : mp;
> >>> -		if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0)
> {
> >>> -			rx_seg->proto_hdr = rx_pkt_hdr_protos[i];
> >>> -		} else {
> >>> -			rx_seg->length = rx_pkt_seg_lengths[i] ?
> >>> -					rx_pkt_seg_lengths[i] :
> >>> -					mbuf_data_size[mp_n];
> >>> +	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
> >> In case this flag *_OFFLOAD_BUFFER_SPLIT is not set, but
> >> rx_pkt_nb_segs > 1 Will it still enter below loop, as before.
> > Yes Aman, RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT flag to be set to
> proceed further.
> > Do you suggest to enter the loop on  rx_pkt_nb_segs > 1 irrespective of
> RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT flag.
> > Something like,
> > if (rx_pkt_nb_segs > 1) {
> > 	for(i = 0; i < rx_pkt_nb_segs; i++){
> > 	}
> > }
> 
> As per the old logic, either of the case was supported- if (rx_pkt_nb_segs <=
> 1 || (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0)
> 


Yes, will update accordingly.

> >
> >>> +		for (i = 0; i < rx_pkt_nb_segs; i++) {
> >>> +			struct rte_eth_rxseg_split *rx_seg =
> &rx_useg[i].split;
> >>> +			/*
> >>> +			 * Use last valid pool for the segments with number
> >>> +			 * exceeding the pool index.
> >>> +			 */
> >>> +			mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n
> - 1 :
> >> i;
> >>> +			mpx = mbuf_pool_find(socket_id, mp_n);
> >>> +			if (rx_conf->offloads &
> >> RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
> >>
> >> Isn't above check already found to be TRUE, before we reached here.
> > Yes this is redundant, will remove.
> >>> +				/**
> >>> +				 * On Segment length zero, update length as,
> >>> +				 *      buffer size - headroom size
> >>> +				 * to make sure enough space is accomidate
> for
> >> header.
> >>> +				 */
> >>> +				rx_seg->length = rx_pkt_seg_lengths[i] ?
> >>> +						 rx_pkt_seg_lengths[i] :
> >>> +						 mbuf_data_size[mp_n] -
> >> RTE_PKTMBUF_HEADROOM;
> >>> +				rx_seg->offset = i < rx_pkt_nb_offs ?
> >>> +						 rx_pkt_seg_offsets[i] : 0;
> >>> +				rx_seg->mp = mpx ? mpx : mp;
> >>> +			}
> >>> +		}
> >>> +		rx_conf->rx_nseg = rx_pkt_nb_segs;
> >>> +		rx_conf->rx_seg = rx_useg;
> >>> +	} else {
> >>> +		for (i = 0; i < mbuf_data_size_n; i++) {
> >>> +			mpx = mbuf_pool_find(socket_id, i);
> >>> +			rx_mempool[i] = mpx ? mpx : mp;
> >>>    		}
> >>> +		rx_conf->rx_mempools = rx_mempool;
> >>> +		rx_conf->rx_nmempool = mbuf_data_size_n;
> >>>    	}
> >>> -	rx_conf->rx_nseg = rx_pkt_nb_segs;
> >>> -	rx_conf->rx_seg = rx_useg;
> >>>    	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
> >>>    				    socket_id, rx_conf, NULL);
> >>>    	rx_conf->rx_seg = NULL;
> >>> diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index
> >>> e65be323b8..14be10dcef 100644
> >>> --- a/app/test-pmd/testpmd.h
> >>> +++ b/app/test-pmd/testpmd.h
> >>> @@ -80,6 +80,9 @@ extern uint8_t cl_quit;
> >>>
> >>>    #define MIN_TOTAL_NUM_MBUFS 1024
> >>>
> >>> +/* Maximum number of pools supported per Rx queue */ #define
> >>> +MAX_MEMPOOL 8
> >>> +
> >>>    typedef uint8_t  lcoreid_t;
> >>>    typedef uint16_t portid_t;
> >>>    typedef uint16_t queueid_t;
> >>> diff --git a/app/test-pmd/util.c b/app/test-pmd/util.c index
> >>> fd98e8b51d..f9df5f69ef 100644
> >>> --- a/app/test-pmd/util.c
> >>> +++ b/app/test-pmd/util.c
> >>> @@ -150,8 +150,8 @@ dump_pkt_burst(uint16_t port_id, uint16_t
> queue,
> >> struct rte_mbuf *pkts[],
> >>>    		print_ether_addr(" - dst=", &eth_hdr->dst_addr,
> >>>    				 print_buf, buf_size, &cur_len);
> >>>    		MKDUMPSTR(print_buf, buf_size, cur_len,
> >>> -			  " - type=0x%04x - length=%u - nb_segs=%d",
> >>> -			  eth_type, (unsigned int) mb->pkt_len,
> >>> +			  " - pool=%s - type=0x%04x - length=%u -
> >> nb_segs=%d",
> >>> +			  mb->pool->name, eth_type, (unsigned int) mb-
> >>> pkt_len,
> >>>    			  (int)mb->nb_segs);
> >>>    		ol_flags = mb->ol_flags;
> >>>    		if (ol_flags & RTE_MBUF_F_RX_RSS_HASH) {
  

Patch

diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 5b0f0838dc..62f7c9dba8 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2647,11 +2647,18 @@  rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
 {
 	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
+	struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {};
+	struct rte_mempool *mpx;
 	unsigned int i, mp_n;
 	int ret;
 
-	if (rx_pkt_nb_segs <= 1 ||
-	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
+	/* Verify Rx queue configuration is single pool and segment or
+	 * multiple pool/segment.
+	 * @see rte_eth_rxconf::rx_mempools
+	 * @see rte_eth_rxconf::rx_seg
+	 */
+	if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 ||
+	    ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) {
 		rx_conf->rx_seg = NULL;
 		rx_conf->rx_nseg = 0;
 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
@@ -2659,29 +2666,39 @@  rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 					     rx_conf, mp);
 		goto exit;
 	}
-	for (i = 0; i < rx_pkt_nb_segs; i++) {
-		struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
-		struct rte_mempool *mpx;
-		/*
-		 * Use last valid pool for the segments with number
-		 * exceeding the pool index.
-		 */
-		mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
-		mpx = mbuf_pool_find(socket_id, mp_n);
-		/* Handle zero as mbuf data buffer size. */
-		rx_seg->offset = i < rx_pkt_nb_offs ?
-				   rx_pkt_seg_offsets[i] : 0;
-		rx_seg->mp = mpx ? mpx : mp;
-		if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) {
-			rx_seg->proto_hdr = rx_pkt_hdr_protos[i];
-		} else {
-			rx_seg->length = rx_pkt_seg_lengths[i] ?
-					rx_pkt_seg_lengths[i] :
-					mbuf_data_size[mp_n];
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
+		for (i = 0; i < rx_pkt_nb_segs; i++) {
+			struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
+			/*
+			 * Use last valid pool for the segments with number
+			 * exceeding the pool index.
+			 */
+			mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
+			mpx = mbuf_pool_find(socket_id, mp_n);
+			if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
+				/**
+				 * On Segment length zero, update length as,
+				 *      buffer size - headroom size
+				 * to make sure enough space is accomidate for header.
+				 */
+				rx_seg->length = rx_pkt_seg_lengths[i] ?
+						 rx_pkt_seg_lengths[i] :
+						 mbuf_data_size[mp_n] - RTE_PKTMBUF_HEADROOM;
+				rx_seg->offset = i < rx_pkt_nb_offs ?
+						 rx_pkt_seg_offsets[i] : 0;
+				rx_seg->mp = mpx ? mpx : mp;
+			}
+		}
+		rx_conf->rx_nseg = rx_pkt_nb_segs;
+		rx_conf->rx_seg = rx_useg;
+	} else {
+		for (i = 0; i < mbuf_data_size_n; i++) {
+			mpx = mbuf_pool_find(socket_id, i);
+			rx_mempool[i] = mpx ? mpx : mp;
 		}
+		rx_conf->rx_mempools = rx_mempool;
+		rx_conf->rx_nmempool = mbuf_data_size_n;
 	}
-	rx_conf->rx_nseg = rx_pkt_nb_segs;
-	rx_conf->rx_seg = rx_useg;
 	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
 				    socket_id, rx_conf, NULL);
 	rx_conf->rx_seg = NULL;
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index e65be323b8..14be10dcef 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -80,6 +80,9 @@  extern uint8_t cl_quit;
 
 #define MIN_TOTAL_NUM_MBUFS 1024
 
+/* Maximum number of pools supported per Rx queue */
+#define MAX_MEMPOOL 8
+
 typedef uint8_t  lcoreid_t;
 typedef uint16_t portid_t;
 typedef uint16_t queueid_t;
diff --git a/app/test-pmd/util.c b/app/test-pmd/util.c
index fd98e8b51d..f9df5f69ef 100644
--- a/app/test-pmd/util.c
+++ b/app/test-pmd/util.c
@@ -150,8 +150,8 @@  dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
 		print_ether_addr(" - dst=", &eth_hdr->dst_addr,
 				 print_buf, buf_size, &cur_len);
 		MKDUMPSTR(print_buf, buf_size, cur_len,
-			  " - type=0x%04x - length=%u - nb_segs=%d",
-			  eth_type, (unsigned int) mb->pkt_len,
+			  " - pool=%s - type=0x%04x - length=%u - nb_segs=%d",
+			  mb->pool->name, eth_type, (unsigned int) mb->pkt_len,
 			  (int)mb->nb_segs);
 		ol_flags = mb->ol_flags;
 		if (ol_flags & RTE_MBUF_F_RX_RSS_HASH) {