[v2,2/2] examples/vmdq: fix RSS configuration
diff mbox series

Message ID 20200325063248.28484-3-junyux.jiang@intel.com
State New
Delegated to: Ferruh Yigit
Headers show
Series
  • examples/vmdq: fix RSS configuration
Related show

Checks

Context Check Description
ci/Intel-compilation success Compilation OK
ci/checkpatch success coding style OK

Commit Message

Junyu Jiang March 25, 2020, 6:32 a.m. UTC
In order that all queues of pools can receive packets,
add enable-rss argument to change rss configuration.

Fixes: 6bb97df521aa ("examples/vmdq: new app")
Cc: stable@dpdk.org

Signed-off-by: Junyu Jiang <junyux.jiang@intel.com>
Acked-by: Xiaoyun Li <xiaoyun.li@intel.com>
---
 doc/guides/sample_app_ug/vmdq_forwarding.rst |  6 +--
 examples/vmdq/main.c                         | 39 +++++++++++++++++---
 2 files changed, 37 insertions(+), 8 deletions(-)

Comments

Han, YingyaX March 30, 2020, 2:35 a.m. UTC | #1
Tested-by: Han,YingyaX <yingyax.han@intel.com>

BRs,
Yingya

-----Original Message-----
From: dev <dev-bounces@dpdk.org> On Behalf Of Junyu Jiang
Sent: Wednesday, March 25, 2020 2:33 PM
To: dev@dpdk.org
Cc: Yang, Qiming <qiming.yang@intel.com>; Yigit, Ferruh <ferruh.yigit@intel.com>; Jiang, JunyuX <junyux.jiang@intel.com>; stable@dpdk.org
Subject: [dpdk-dev] [PATCH v2 2/2] examples/vmdq: fix RSS configuration

In order that all queues of pools can receive packets, add enable-rss argument to change rss configuration.

Fixes: 6bb97df521aa ("examples/vmdq: new app")
Cc: stable@dpdk.org

Signed-off-by: Junyu Jiang <junyux.jiang@intel.com>
Acked-by: Xiaoyun Li <xiaoyun.li@intel.com>
---
 doc/guides/sample_app_ug/vmdq_forwarding.rst |  6 +--
 examples/vmdq/main.c                         | 39 +++++++++++++++++---
 2 files changed, 37 insertions(+), 8 deletions(-)

diff --git a/doc/guides/sample_app_ug/vmdq_forwarding.rst b/doc/guides/sample_app_ug/vmdq_forwarding.rst
index df23043d6..658d6742d 100644
--- a/doc/guides/sample_app_ug/vmdq_forwarding.rst
+++ b/doc/guides/sample_app_ug/vmdq_forwarding.rst
@@ -26,13 +26,13 @@ The Intel® 82599 10 Gigabit Ethernet Controller NIC also supports the splitting  While the Intel® X710 or XL710 Ethernet Controller NICs support many configurations of VMDQ pools of 4 or 8 queues each.
 And queues numbers for each VMDQ pool can be changed by setting CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM
 in config/common_* file.
-The nb-pools parameter can be passed on the command line, after the EAL parameters:
+The nb-pools and enable-rss parameters can be passed on the command line, after the EAL parameters:
 
 .. code-block:: console
 
-    ./build/vmdq_app [EAL options] -- -p PORTMASK --nb-pools NP
+    ./build/vmdq_app [EAL options] -- -p PORTMASK --nb-pools NP 
+ --enable-rss
 
-where, NP can be 8, 16 or 32.
+where, NP can be 8, 16 or 32, rss is disabled by default.
 
 In Linux* user space, the application can display statistics with the number of packets received on each queue.
 To have the application display the statistics, send a SIGHUP signal to the running application process.
diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c index 011110920..98032e6a3 100644
--- a/examples/vmdq/main.c
+++ b/examples/vmdq/main.c
@@ -59,6 +59,7 @@ static uint32_t enabled_port_mask;
 /* number of pools (if user does not specify any, 8 by default */  static uint32_t num_queues = 8;  static uint32_t num_pools = 8;
+static uint8_t rss_enable;
 
 /* empty vmdq configuration structure. Filled in programatically */  static const struct rte_eth_conf vmdq_conf_default = { @@ -143,6 +144,13 @@ get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools)
 	(void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
 		   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
+	if (rss_enable) {
+		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
+							ETH_RSS_UDP |
+							ETH_RSS_TCP |
+							ETH_RSS_SCTP;
+	}
 	return 0;
 }
 
@@ -164,6 +172,7 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 	uint16_t q;
 	uint16_t queues_per_pool;
 	uint32_t max_nb_pools;
+	uint64_t rss_hf_tmp;
 
 	/*
 	 * The max pool number from dev_info will be used to validate the pool @@ -209,6 +218,17 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 	if (!rte_eth_dev_is_valid_port(port))
 		return -1;
 
+	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
+	port_conf.rx_adv_conf.rss_conf.rss_hf &=
+		dev_info.flow_type_rss_offloads;
+	if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
+		printf("Port %u modified RSS hash function based on hardware support,"
+			"requested:%#"PRIx64" configured:%#"PRIx64"\n",
+			port,
+			rss_hf_tmp,
+			port_conf.rx_adv_conf.rss_conf.rss_hf);
+	}
+
 	/*
 	 * Though in this example, we only receive packets from the first queue
 	 * of each pool and send packets through first rte_lcore_count() tx @@ -363,7 +383,8 @@ static void  vmdq_usage(const char *prgname)  {
 	printf("%s [EAL options] -- -p PORTMASK]\n"
-	"  --nb-pools NP: number of pools\n",
+	"  --nb-pools NP: number of pools\n"
+	"  --enable-rss: enable RSS (disabled by default)\n",
 	       prgname);
 }
 
@@ -377,6 +398,7 @@ vmdq_parse_args(int argc, char **argv)
 	const char *prgname = argv[0];
 	static struct option long_option[] = {
 		{"nb-pools", required_argument, NULL, 0},
+		{"enable-rss", 0, NULL, 0},
 		{NULL, 0, 0, 0}
 	};
 
@@ -394,11 +416,18 @@ vmdq_parse_args(int argc, char **argv)
 			}
 			break;
 		case 0:
-			if (vmdq_parse_num_pools(optarg) == -1) {
-				printf("invalid number of pools\n");
-				vmdq_usage(prgname);
-				return -1;
+			if (!strcmp(long_option[option_index].name,
+			    "nb-pools")) {
+				if (vmdq_parse_num_pools(optarg) == -1) {
+					printf("invalid number of pools\n");
+					vmdq_usage(prgname);
+					return -1;
+				}
 			}
+
+			if (!strcmp(long_option[option_index].name,
+			    "enable-rss"))
+				rss_enable = 1;
 			break;
 
 		default:
--
2.17.1
Wu, Jingjing April 3, 2020, 12:08 a.m. UTC | #2
> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Junyu Jiang
> Sent: Wednesday, March 25, 2020 2:33 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Yigit, Ferruh <ferruh.yigit@intel.com>; Jiang,
> JunyuX <junyux.jiang@intel.com>; stable@dpdk.org
> Subject: [dpdk-dev] [PATCH v2 2/2] examples/vmdq: fix RSS configuration
> 
> In order that all queues of pools can receive packets,
> add enable-rss argument to change rss configuration.
> 
> Fixes: 6bb97df521aa ("examples/vmdq: new app")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Junyu Jiang <junyux.jiang@intel.com>
> Acked-by: Xiaoyun Li <xiaoyun.li@intel.com>
> ---
>  doc/guides/sample_app_ug/vmdq_forwarding.rst |  6 +--
>  examples/vmdq/main.c                         | 39 +++++++++++++++++---
>  2 files changed, 37 insertions(+), 8 deletions(-)
> 
> diff --git a/doc/guides/sample_app_ug/vmdq_forwarding.rst
> b/doc/guides/sample_app_ug/vmdq_forwarding.rst
> index df23043d6..658d6742d 100644
> --- a/doc/guides/sample_app_ug/vmdq_forwarding.rst
> +++ b/doc/guides/sample_app_ug/vmdq_forwarding.rst
> @@ -26,13 +26,13 @@ The Intel® 82599 10 Gigabit Ethernet Controller NIC also supports
> the splitting
>  While the Intel® X710 or XL710 Ethernet Controller NICs support many configurations of
> VMDQ pools of 4 or 8 queues each.
>  And queues numbers for each VMDQ pool can be changed by setting
> CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM
>  in config/common_* file.
> -The nb-pools parameter can be passed on the command line, after the EAL parameters:
> +The nb-pools and enable-rss parameters can be passed on the command line, after the
> EAL parameters:
> 
>  .. code-block:: console
> 
> -    ./build/vmdq_app [EAL options] -- -p PORTMASK --nb-pools NP
> +    ./build/vmdq_app [EAL options] -- -p PORTMASK --nb-pools NP --enable-rss
> 
> -where, NP can be 8, 16 or 32.
> +where, NP can be 8, 16 or 32, rss is disabled by default.
> 
>  In Linux* user space, the application can display statistics with the number of packets
> received on each queue.
>  To have the application display the statistics, send a SIGHUP signal to the running
> application process.
> diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c
> index 011110920..98032e6a3 100644
> --- a/examples/vmdq/main.c
> +++ b/examples/vmdq/main.c
> @@ -59,6 +59,7 @@ static uint32_t enabled_port_mask;
>  /* number of pools (if user does not specify any, 8 by default */
>  static uint32_t num_queues = 8;
>  static uint32_t num_pools = 8;
> +static uint8_t rss_enable;
> 
>  /* empty vmdq configuration structure. Filled in programatically */
>  static const struct rte_eth_conf vmdq_conf_default = {
> @@ -143,6 +144,13 @@ get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t
> num_pools)
>  	(void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
>  	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
>  		   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
> +	if (rss_enable) {
> +		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
> +		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
> +							ETH_RSS_UDP |
> +							ETH_RSS_TCP |
> +							ETH_RSS_SCTP;
> +	}
>  	return 0;
>  }
> 
> @@ -164,6 +172,7 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
>  	uint16_t q;
>  	uint16_t queues_per_pool;
>  	uint32_t max_nb_pools;
> +	uint64_t rss_hf_tmp;
> 
>  	/*
>  	 * The max pool number from dev_info will be used to validate the pool
> @@ -209,6 +218,17 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
>  	if (!rte_eth_dev_is_valid_port(port))
>  		return -1;
> 
> +	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
> +	port_conf.rx_adv_conf.rss_conf.rss_hf &=
> +		dev_info.flow_type_rss_offloads;
> +	if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
> +		printf("Port %u modified RSS hash function based on hardware support,"

This is RSS offload type but not hash function.
Li, Xiaoyun April 3, 2020, 1:35 a.m. UTC | #3
> -----Original Message-----
> From: stable <stable-bounces@dpdk.org> On Behalf Of Wu, Jingjing
> Sent: Friday, April 3, 2020 08:08
> To: Jiang, JunyuX <junyux.jiang@intel.com>; dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Yigit, Ferruh
> <ferruh.yigit@intel.com>; Jiang, JunyuX <junyux.jiang@intel.com>;
> stable@dpdk.org
> Subject: Re: [dpdk-stable] [dpdk-dev] [PATCH v2 2/2] examples/vmdq: fix RSS
> configuration
> 
> 
> 
> > -----Original Message-----
> > From: dev <dev-bounces@dpdk.org> On Behalf Of Junyu Jiang
> > Sent: Wednesday, March 25, 2020 2:33 PM
> > To: dev@dpdk.org
> > Cc: Yang, Qiming <qiming.yang@intel.com>; Yigit, Ferruh
> > <ferruh.yigit@intel.com>; Jiang, JunyuX <junyux.jiang@intel.com>;
> > stable@dpdk.org
> > Subject: [dpdk-dev] [PATCH v2 2/2] examples/vmdq: fix RSS
> > configuration
> >
> > In order that all queues of pools can receive packets, add enable-rss
> > argument to change rss configuration.
> >
> > Fixes: 6bb97df521aa ("examples/vmdq: new app")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Junyu Jiang <junyux.jiang@intel.com>
> > Acked-by: Xiaoyun Li <xiaoyun.li@intel.com>
> > ---
> >  doc/guides/sample_app_ug/vmdq_forwarding.rst |  6 +--
> >  examples/vmdq/main.c                         | 39 +++++++++++++++++---
> >  2 files changed, 37 insertions(+), 8 deletions(-)
> >
> > diff --git a/doc/guides/sample_app_ug/vmdq_forwarding.rst
> > b/doc/guides/sample_app_ug/vmdq_forwarding.rst
> > index df23043d6..658d6742d 100644
> > --- a/doc/guides/sample_app_ug/vmdq_forwarding.rst
> > +++ b/doc/guides/sample_app_ug/vmdq_forwarding.rst
> > @@ -26,13 +26,13 @@ The Intel® 82599 10 Gigabit Ethernet Controller
> > NIC also supports the splitting  While the Intel® X710 or XL710
> > Ethernet Controller NICs support many configurations of VMDQ pools of
> > 4 or 8 queues each.
> >  And queues numbers for each VMDQ pool can be changed by setting
> > CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM
> >  in config/common_* file.
> > -The nb-pools parameter can be passed on the command line, after the EAL
> parameters:
> > +The nb-pools and enable-rss parameters can be passed on the command
> > +line, after the
> > EAL parameters:
> >
> >  .. code-block:: console
> >
> > -    ./build/vmdq_app [EAL options] -- -p PORTMASK --nb-pools NP
> > +    ./build/vmdq_app [EAL options] -- -p PORTMASK --nb-pools NP
> > + --enable-rss
> >
> > -where, NP can be 8, 16 or 32.
> > +where, NP can be 8, 16 or 32, rss is disabled by default.
> >
> >  In Linux* user space, the application can display statistics with the
> > number of packets received on each queue.
> >  To have the application display the statistics, send a SIGHUP signal
> > to the running application process.
> > diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c index
> > 011110920..98032e6a3 100644
> > --- a/examples/vmdq/main.c
> > +++ b/examples/vmdq/main.c
> > @@ -59,6 +59,7 @@ static uint32_t enabled_port_mask;
> >  /* number of pools (if user does not specify any, 8 by default */
> > static uint32_t num_queues = 8;  static uint32_t num_pools = 8;
> > +static uint8_t rss_enable;
> >
> >  /* empty vmdq configuration structure. Filled in programatically */
> > static const struct rte_eth_conf vmdq_conf_default = { @@ -143,6
> > +144,13 @@ get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t
> > num_pools)
> >  	(void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
> >  	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
> >  		   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
> > +	if (rss_enable) {
> > +		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
> > +		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
> > +							ETH_RSS_UDP |
> > +							ETH_RSS_TCP |
> > +							ETH_RSS_SCTP;
> > +	}
> >  	return 0;
> >  }
> >
> > @@ -164,6 +172,7 @@ port_init(uint16_t port, struct rte_mempool
> *mbuf_pool)
> >  	uint16_t q;
> >  	uint16_t queues_per_pool;
> >  	uint32_t max_nb_pools;
> > +	uint64_t rss_hf_tmp;
> >
> >  	/*
> >  	 * The max pool number from dev_info will be used to validate the
> > pool @@ -209,6 +218,17 @@ port_init(uint16_t port, struct rte_mempool
> *mbuf_pool)
> >  	if (!rte_eth_dev_is_valid_port(port))
> >  		return -1;
> >
> > +	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
> > +	port_conf.rx_adv_conf.rss_conf.rss_hf &=
> > +		dev_info.flow_type_rss_offloads;
> > +	if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
> > +		printf("Port %u modified RSS hash function based on hardware
> support,"
> 
> This is RSS offload type but not hash function.

* The *rss_hf* field of the *rss_conf* structure indicates the different
 * types of IPv4/IPv6 packets to which the RSS hashing must be applied.
 * Supplying an *rss_hf* equal to zero disables the RSS feature.

And in testpmd, it's the same.
port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
				rss_hf & port->dev_info.flow_type_rss_offloads;
Wu, Jingjing April 3, 2020, 3:15 a.m. UTC | #4
> > > +	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
> > > +	port_conf.rx_adv_conf.rss_conf.rss_hf &=
> > > +		dev_info.flow_type_rss_offloads;
> > > +	if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
> > > +		printf("Port %u modified RSS hash function based on hardware
> > support,"
> >
> > This is RSS offload type but not hash function.
> 
> * The *rss_hf* field of the *rss_conf* structure indicates the different
>  * types of IPv4/IPv6 packets to which the RSS hashing must be applied.
>  * Supplying an *rss_hf* equal to zero disables the RSS feature.
> 
> And in testpmd, it's the same.
> port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
> 				rss_hf & port->dev_info.flow_type_rss_offloads;

OK. I got, the definition of rss_hf at the beginning might be hash function which also the same as RSS offload type.
Ignore my comments then.

BTW hash function is also indicating TOEPLITZ/XOR... in somewhere. 


Thanks
Jingjng

Patch
diff mbox series

diff --git a/doc/guides/sample_app_ug/vmdq_forwarding.rst b/doc/guides/sample_app_ug/vmdq_forwarding.rst
index df23043d6..658d6742d 100644
--- a/doc/guides/sample_app_ug/vmdq_forwarding.rst
+++ b/doc/guides/sample_app_ug/vmdq_forwarding.rst
@@ -26,13 +26,13 @@  The Intel® 82599 10 Gigabit Ethernet Controller NIC also supports the splitting
 While the Intel® X710 or XL710 Ethernet Controller NICs support many configurations of VMDQ pools of 4 or 8 queues each.
 And queues numbers for each VMDQ pool can be changed by setting CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM
 in config/common_* file.
-The nb-pools parameter can be passed on the command line, after the EAL parameters:
+The nb-pools and enable-rss parameters can be passed on the command line, after the EAL parameters:
 
 .. code-block:: console
 
-    ./build/vmdq_app [EAL options] -- -p PORTMASK --nb-pools NP
+    ./build/vmdq_app [EAL options] -- -p PORTMASK --nb-pools NP --enable-rss
 
-where, NP can be 8, 16 or 32.
+where, NP can be 8, 16 or 32, rss is disabled by default.
 
 In Linux* user space, the application can display statistics with the number of packets received on each queue.
 To have the application display the statistics, send a SIGHUP signal to the running application process.
diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c
index 011110920..98032e6a3 100644
--- a/examples/vmdq/main.c
+++ b/examples/vmdq/main.c
@@ -59,6 +59,7 @@  static uint32_t enabled_port_mask;
 /* number of pools (if user does not specify any, 8 by default */
 static uint32_t num_queues = 8;
 static uint32_t num_pools = 8;
+static uint8_t rss_enable;
 
 /* empty vmdq configuration structure. Filled in programatically */
 static const struct rte_eth_conf vmdq_conf_default = {
@@ -143,6 +144,13 @@  get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools)
 	(void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
 	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
 		   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
+	if (rss_enable) {
+		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+		eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
+							ETH_RSS_UDP |
+							ETH_RSS_TCP |
+							ETH_RSS_SCTP;
+	}
 	return 0;
 }
 
@@ -164,6 +172,7 @@  port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 	uint16_t q;
 	uint16_t queues_per_pool;
 	uint32_t max_nb_pools;
+	uint64_t rss_hf_tmp;
 
 	/*
 	 * The max pool number from dev_info will be used to validate the pool
@@ -209,6 +218,17 @@  port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 	if (!rte_eth_dev_is_valid_port(port))
 		return -1;
 
+	rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
+	port_conf.rx_adv_conf.rss_conf.rss_hf &=
+		dev_info.flow_type_rss_offloads;
+	if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
+		printf("Port %u modified RSS hash function based on hardware support,"
+			"requested:%#"PRIx64" configured:%#"PRIx64"\n",
+			port,
+			rss_hf_tmp,
+			port_conf.rx_adv_conf.rss_conf.rss_hf);
+	}
+
 	/*
 	 * Though in this example, we only receive packets from the first queue
 	 * of each pool and send packets through first rte_lcore_count() tx
@@ -363,7 +383,8 @@  static void
 vmdq_usage(const char *prgname)
 {
 	printf("%s [EAL options] -- -p PORTMASK]\n"
-	"  --nb-pools NP: number of pools\n",
+	"  --nb-pools NP: number of pools\n"
+	"  --enable-rss: enable RSS (disabled by default)\n",
 	       prgname);
 }
 
@@ -377,6 +398,7 @@  vmdq_parse_args(int argc, char **argv)
 	const char *prgname = argv[0];
 	static struct option long_option[] = {
 		{"nb-pools", required_argument, NULL, 0},
+		{"enable-rss", 0, NULL, 0},
 		{NULL, 0, 0, 0}
 	};
 
@@ -394,11 +416,18 @@  vmdq_parse_args(int argc, char **argv)
 			}
 			break;
 		case 0:
-			if (vmdq_parse_num_pools(optarg) == -1) {
-				printf("invalid number of pools\n");
-				vmdq_usage(prgname);
-				return -1;
+			if (!strcmp(long_option[option_index].name,
+			    "nb-pools")) {
+				if (vmdq_parse_num_pools(optarg) == -1) {
+					printf("invalid number of pools\n");
+					vmdq_usage(prgname);
+					return -1;
+				}
 			}
+
+			if (!strcmp(long_option[option_index].name,
+			    "enable-rss"))
+				rss_enable = 1;
 			break;
 
 		default: