[dpdk-dev,v4,01/12] ixgbe: support VMDq RSS in non-SRIOV environment
Commit Message
In non-SRIOV environment, VMDq RSS could be enabled by MRQC register.
In theory, the queue number per pool could be 2 or 4, but only 2 queues are
available due to HW limitation, the same limit also exist in Linux ixgbe driver.
Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
---
Changes in v2:
- fix checkpatch errors
Changes in v4:
- use vmdq_queue_num to calculate queue number per pool
drivers/net/ixgbe/ixgbe_rxtx.c | 86 +++++++++++++++++++++++++++++++++++-------
lib/librte_ether/rte_ethdev.c | 31 +++++++++++++++
2 files changed, 104 insertions(+), 13 deletions(-)
Comments
On 12/08/2015 10:02, Ouyang Changchun wrote:
> +#define VMDQ_RSS_RX_QUEUE_NUM_MAX 4
> +
> +static int
> +rte_eth_dev_check_vmdq_rss_rxq_num(__rte_unused uint8_t port_id, uint16_t nb_rx_q)
> +{
> + if (nb_rx_q > VMDQ_RSS_RX_QUEUE_NUM_MAX)
> + return -EINVAL;
> + return 0;
> +}
> +
it is an ixgbe limitation, so, it should not be a included into
librte_ether/rte_ethdev.c
@@ -3513,16 +3513,16 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)
return;
}
-/*
- * VMDq only support for 10 GbE NIC.
+/**
+ * Config pool for VMDq on 10 GbE NIC.
*/
static void
-ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
+ixgbe_vmdq_pool_configure(struct rte_eth_dev *dev)
{
struct rte_eth_vmdq_rx_conf *cfg;
struct ixgbe_hw *hw;
enum rte_eth_nb_pools num_pools;
- uint32_t mrqc, vt_ctl, vlanctrl;
+ uint32_t vt_ctl, vlanctrl;
uint32_t vmolr = 0;
int i;
@@ -3531,12 +3531,6 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
num_pools = cfg->nb_queue_pools;
- ixgbe_rss_disable(dev);
-
- /* MRQC: enable vmdq */
- mrqc = IXGBE_MRQC_VMDQEN;
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
-
/* PFVTCTL: turn on virtualisation and set the default pool */
vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
if (cfg->enable_default_pool)
@@ -3602,7 +3596,29 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
IXGBE_WRITE_FLUSH(hw);
}
-/*
+/**
+ * VMDq only support for 10 GbE NIC.
+ */
+static void
+ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+ uint32_t mrqc;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ ixgbe_rss_disable(dev);
+
+ /* MRQC: enable vmdq */
+ mrqc = IXGBE_MRQC_VMDQEN;
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ IXGBE_WRITE_FLUSH(hw);
+
+ ixgbe_vmdq_pool_configure(dev);
+}
+
+/**
* ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
* @hw: pointer to hardware structure
*/
@@ -3707,6 +3723,41 @@ ixgbe_config_vf_rss(struct rte_eth_dev *dev)
}
static int
+ixgbe_config_vmdq_rss(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+ uint32_t mrqc;
+
+ ixgbe_rss_configure(dev);
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* MRQC: enable VMDQ RSS */
+ mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ mrqc &= ~IXGBE_MRQC_MRQE_MASK;
+
+ switch (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) {
+ case 2:
+ mrqc |= IXGBE_MRQC_VMDQRSS64EN;
+ break;
+
+ case 4:
+ mrqc |= IXGBE_MRQC_VMDQRSS32EN;
+ break;
+
+ default:
+ PMD_INIT_LOG(ERR, "Invalid pool number in non-IOV mode with VMDQ RSS");
+ return -EINVAL;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+ ixgbe_vmdq_pool_configure(dev);
+
+ return 0;
+}
+
+static int
ixgbe_config_vf_default(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw =
@@ -3762,6 +3813,10 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
ixgbe_vmdq_rx_hw_configure(dev);
break;
+ case ETH_MQ_RX_VMDQ_RSS:
+ ixgbe_config_vmdq_rss(dev);
+ break;
+
case ETH_MQ_RX_NONE:
/* if mq_mode is none, disable rss mode.*/
default: ixgbe_rss_disable(dev);
@@ -4252,6 +4307,8 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
/* Setup RX queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ uint32_t psrtype = 0;
+
rxq = dev->data->rx_queues[i];
/*
@@ -4279,12 +4336,10 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
if (rx_conf->header_split) {
if (hw->mac.type == ixgbe_mac_82599EB) {
/* Must setup the PSRTYPE register */
- uint32_t psrtype;
psrtype = IXGBE_PSRTYPE_TCPHDR |
IXGBE_PSRTYPE_UDPHDR |
IXGBE_PSRTYPE_IPV4HDR |
IXGBE_PSRTYPE_IPV6HDR;
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
}
srrctl = ((rx_conf->split_hdr_size <<
IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
@@ -4294,6 +4349,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
#endif
srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ /* Set RQPL for VMDQ RSS according to max Rx queue */
+ psrtype |= (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool >> 1) <<
+ IXGBE_PSRTYPE_RQPL_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
+
/* Set if packets are dropped when no descriptors available */
if (rxq->drop_en)
srrctl |= IXGBE_SRRCTL_DROP_EN;
@@ -906,6 +906,16 @@ rte_eth_dev_check_vf_rss_rxq_num(uint8_t port_id, uint16_t nb_rx_q)
return 0;
}
+#define VMDQ_RSS_RX_QUEUE_NUM_MAX 4
+
+static int
+rte_eth_dev_check_vmdq_rss_rxq_num(__rte_unused uint8_t port_id, uint16_t nb_rx_q)
+{
+ if (nb_rx_q > VMDQ_RSS_RX_QUEUE_NUM_MAX)
+ return -EINVAL;
+ return 0;
+}
+
static int
rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
const struct rte_eth_conf *dev_conf)
@@ -1067,6 +1077,27 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
return -EINVAL;
}
}
+
+ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_RSS) {
+ uint32_t nb_queue_pools =
+ dev_conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
+ struct rte_eth_dev_info dev_info;
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
+ dev_info.vmdq_queue_num/nb_queue_pools;
+
+ if (rte_eth_dev_check_vmdq_rss_rxq_num(port_id,
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) != 0) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d"
+ " SRIOV active, invalid queue"
+ " number for VMDQ RSS, allowed"
+ " value are 1, 2 or 4\n",
+ port_id);
+ return -EINVAL;
+ }
+ }
}
return 0;
}