@@ -19,7 +19,11 @@
#define IAVF_FRAME_SIZE_MAX 9728
#define IAVF_QUEUE_BASE_ADDR_UNIT 128
-#define IAVF_MAX_NUM_QUEUES 16
+#define IAVF_MAX_NUM_QUEUES_DFLT 16
+#define IAVF_MAX_NUM_QUEUES_LV 256
+#define IAVF_RXTX_QUEUE_CHUNKS_NUM 2
+#define IAVF_CFG_Q_NUM_PER_BUF 32
+#define IAVF_IRQ_MAP_NUM_PER_BUF 128
#define IAVF_NUM_MACADDR_MAX 64
@@ -104,8 +108,10 @@ struct iavf_fdir_info {
struct iavf_fdir_conf conf;
};
-/* TODO: is that correct to assume the max number to be 16 ?*/
-#define IAVF_MAX_MSIX_VECTORS 16
+struct iavf_qv_map {
+ uint16_t queue_id;
+ uint16_t vector_id;
+};
/* Message type read in admin queue from PF */
enum iavf_aq_result {
@@ -149,14 +155,16 @@ struct iavf_info {
uint8_t *rss_key;
uint16_t nb_msix; /* number of MSI-X interrupts on Rx */
uint16_t msix_base; /* msix vector base from */
- /* queue bitmask for each vector */
- uint16_t rxq_map[IAVF_MAX_MSIX_VECTORS];
+ uint16_t max_rss_qregion; /* max RSS queue region supported by PF */
+ struct iavf_qv_map *qv_map; /* queue vector mapping */
struct iavf_flow_list flow_list;
rte_spinlock_t flow_ops_lock;
struct iavf_parser_list rss_parser_list;
struct iavf_parser_list dist_parser_list;
struct iavf_fdir_info fdir; /* flow director info */
+ /* indicate large VF support enabled or not */
+ bool lv_enabled;
};
#define IAVF_MAX_PKT_TYPE 1024
@@ -283,13 +291,20 @@ int iavf_enable_vlan_strip(struct iavf_adapter *adapter);
int iavf_disable_vlan_strip(struct iavf_adapter *adapter);
int iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
bool rx, bool on);
+int iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
+ bool rx, bool on);
int iavf_enable_queues(struct iavf_adapter *adapter);
+int iavf_enable_queues_lv(struct iavf_adapter *adapter);
int iavf_disable_queues(struct iavf_adapter *adapter);
+int iavf_disable_queues_lv(struct iavf_adapter *adapter);
int iavf_configure_rss_lut(struct iavf_adapter *adapter);
int iavf_configure_rss_key(struct iavf_adapter *adapter);
-int iavf_configure_queues(struct iavf_adapter *adapter);
+int iavf_configure_queues(struct iavf_adapter *adapter,
+ uint16_t num_queue_pairs, uint16_t index);
int iavf_get_supported_rxdid(struct iavf_adapter *adapter);
int iavf_config_irq_map(struct iavf_adapter *adapter);
+int iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
+ uint16_t index);
void iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add);
int iavf_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete);
@@ -310,4 +325,5 @@ int iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
struct rte_ether_addr *mc_addrs,
uint32_t mc_addrs_num, bool add);
int iavf_request_queues(struct iavf_adapter *adapter, uint16_t num);
+int iavf_get_max_rss_queue_region(struct iavf_adapter *adapter);
#endif /* _IAVF_ETHDEV_H_ */
@@ -238,7 +238,7 @@ iavf_init_rss(struct iavf_adapter *adapter)
rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
- IAVF_MAX_NUM_QUEUES);
+ vf->max_rss_qregion);
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
PMD_DRV_LOG(DEBUG, "RSS is not supported");
@@ -284,6 +284,31 @@ iavf_init_rss(struct iavf_adapter *adapter)
return 0;
}
+static int
+iavf_queues_req_reset(struct rte_eth_dev *dev, uint16_t num)
+{
+ struct iavf_adapter *ad =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+ int ret = 0;
+
+ ret = iavf_request_queues(ad, num);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "request queues from PF failed");
+ return ret;
+ }
+ PMD_DRV_LOG(INFO, "change queue pairs from %u to %u",
+ vf->vsi_res->num_queue_pairs, num);
+
+ ret = iavf_dev_reset(dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "vf reset failed");
+ return ret;
+ }
+
+ return 0;
+}
+
static int
iavf_dev_configure(struct rte_eth_dev *dev)
{
@@ -292,6 +317,8 @@ iavf_dev_configure(struct rte_eth_dev *dev)
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
uint32_t frame_size = dev->data->mtu + IAVF_ETH_OVERHEAD;
+ uint16_t num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
+ dev->data->nb_tx_queues);
int ret;
ad->rx_bulk_alloc_allowed = true;
@@ -314,6 +341,46 @@ iavf_dev_configure(struct rte_eth_dev *dev)
return ret;
}
+ /* Large VF setting */
+ if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) {
+ if (!(vf->vf_res->vf_cap_flags &
+ VIRTCHNL_VF_LARGE_NUM_QPAIRS)) {
+ PMD_DRV_LOG(ERR, "large VF is not supported");
+ return -1;
+ }
+
+ if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_LV) {
+ PMD_DRV_LOG(ERR, "queue pairs number cannot be larger "
+ "than %u", IAVF_MAX_NUM_QUEUES_LV);
+ return -1;
+ }
+
+ ret = iavf_queues_req_reset(dev, num_queue_pairs);
+ if (ret)
+ return ret;
+
+ ret = iavf_get_max_rss_queue_region(ad);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "get max rss queue region failed");
+ return ret;
+ }
+
+ vf->lv_enabled = true;
+ } else {
+ /* Check if large VF is already enabled. If so, disable and
+ * release redundant queue resource.
+ */
+ if (vf->lv_enabled) {
+ ret = iavf_queues_req_reset(dev, num_queue_pairs);
+ if (ret)
+ return ret;
+
+ vf->lv_enabled = false;
+ }
+ /* if large VF is not required, use default rss queue region */
+ vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT;
+ }
+
/* Vlan stripping setting */
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
@@ -328,6 +395,7 @@ iavf_dev_configure(struct rte_eth_dev *dev)
return -1;
}
}
+
return 0;
}
@@ -410,6 +478,7 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+ struct iavf_qv_map *qv_map;
uint16_t interval, i;
int vec;
@@ -430,6 +499,14 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
}
}
+ qv_map = rte_zmalloc("qv_map",
+ dev->data->nb_rx_queues * sizeof(struct iavf_qv_map), 0);
+ if (!qv_map) {
+ PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
+ dev->data->nb_rx_queues);
+ return -1;
+ }
+
if (!dev->data->dev_conf.intr_conf.rxq ||
!rte_intr_dp_is_en(intr_handle)) {
/* Rx interrupt disabled, Map interrupt only for writeback */
@@ -460,16 +537,21 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
}
IAVF_WRITE_FLUSH(hw);
/* map all queues to the same interrupt */
- for (i = 0; i < dev->data->nb_rx_queues; i++)
- vf->rxq_map[vf->msix_base] |= 1 << i;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ qv_map[i].queue_id = i;
+ qv_map[i].vector_id = vf->msix_base;
+ }
+ vf->qv_map = qv_map;
} else {
if (!rte_intr_allow_others(intr_handle)) {
vf->nb_msix = 1;
vf->msix_base = IAVF_MISC_VEC_ID;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- vf->rxq_map[vf->msix_base] |= 1 << i;
+ qv_map[i].queue_id = i;
+ qv_map[i].vector_id = vf->msix_base;
intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
}
+ vf->qv_map = qv_map;
PMD_DRV_LOG(DEBUG,
"vector %u are mapping to all Rx queues",
vf->msix_base);
@@ -482,21 +564,46 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
vf->msix_base = IAVF_RX_VEC_START;
vec = IAVF_RX_VEC_START;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- vf->rxq_map[vec] |= 1 << i;
+ qv_map[i].queue_id = i;
+ qv_map[i].vector_id = vec;
intr_handle->intr_vec[i] = vec++;
if (vec >= vf->nb_msix)
vec = IAVF_RX_VEC_START;
}
+ vf->qv_map = qv_map;
PMD_DRV_LOG(DEBUG,
"%u vectors are mapping to %u Rx queues",
vf->nb_msix, dev->data->nb_rx_queues);
}
}
- if (iavf_config_irq_map(adapter)) {
- PMD_DRV_LOG(ERR, "config interrupt mapping failed");
- return -1;
+ if (!vf->lv_enabled) {
+ if (iavf_config_irq_map(adapter)) {
+ PMD_DRV_LOG(ERR, "config interrupt mapping failed");
+ return -1;
+ }
+ } else {
+ uint16_t num_qv_maps = dev->data->nb_rx_queues;
+ uint16_t index = 0;
+
+ while (num_qv_maps > IAVF_IRQ_MAP_NUM_PER_BUF) {
+ if (iavf_config_irq_map_lv(adapter,
+ IAVF_IRQ_MAP_NUM_PER_BUF, index)) {
+ PMD_DRV_LOG(ERR, "config interrupt mapping "
+ "for large VF failed");
+ return -1;
+ }
+ num_qv_maps -= IAVF_IRQ_MAP_NUM_PER_BUF;
+ index += IAVF_IRQ_MAP_NUM_PER_BUF;
+ }
+
+ if (iavf_config_irq_map_lv(adapter, num_qv_maps, index)) {
+ PMD_DRV_LOG(ERR, "config interrupt mapping "
+ "for large VF failed");
+ return -1;
+ }
}
+
return 0;
}
@@ -537,6 +644,8 @@ iavf_dev_start(struct rte_eth_dev *dev)
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct rte_intr_handle *intr_handle = dev->intr_handle;
+ uint16_t num_queue_pairs;
+ uint16_t index = 0;
PMD_INIT_FUNC_TRACE();
@@ -545,13 +654,27 @@ iavf_dev_start(struct rte_eth_dev *dev)
vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
dev->data->nb_tx_queues);
+ num_queue_pairs = vf->num_queue_pairs;
if (iavf_init_queues(dev) != 0) {
PMD_DRV_LOG(ERR, "failed to do Queue init");
return -1;
}
- if (iavf_configure_queues(adapter) != 0) {
+ /* If needed, send configure queues msg multiple times to make the
+ * adminq buffer length smaller than the 4K limitation.
+ */
+ while (num_queue_pairs > IAVF_CFG_Q_NUM_PER_BUF) {
+ if (iavf_configure_queues(adapter,
+ IAVF_CFG_Q_NUM_PER_BUF, index) != 0) {
+ PMD_DRV_LOG(ERR, "configure queues failed");
+ goto err_queue;
+ }
+ num_queue_pairs -= IAVF_CFG_Q_NUM_PER_BUF;
+ index += IAVF_CFG_Q_NUM_PER_BUF;
+ }
+
+ if (iavf_configure_queues(adapter, num_queue_pairs, index) != 0) {
PMD_DRV_LOG(ERR, "configure queues failed");
goto err_queue;
}
@@ -560,6 +683,7 @@ iavf_dev_start(struct rte_eth_dev *dev)
PMD_DRV_LOG(ERR, "configure irq failed");
goto err_queue;
}
+
/* re-enable intr again, because efd assign may change */
if (dev->data->dev_conf.intr_conf.rxq != 0) {
rte_intr_disable(intr_handle);
@@ -624,8 +748,8 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
- dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
+ dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
+ dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
dev_info->max_rx_pktlen = IAVF_FRAME_SIZE_MAX;
dev_info->max_mtu = dev_info->max_rx_pktlen - IAVF_ETH_OVERHEAD;
@@ -1703,6 +1827,7 @@ iavf_init_vf(struct rte_eth_dev *dev)
PMD_INIT_LOG(ERR, "iavf_get_vf_config failed");
goto err_alloc;
}
+
/* Allocate memort for RSS info */
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
vf->rss_key = rte_zmalloc("rss_key",
@@ -720,6 +720,7 @@ iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct iavf_adapter *adapter =
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct iavf_rx_queue *rxq;
int err = 0;
@@ -743,7 +744,11 @@ iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
IAVF_WRITE_FLUSH(hw);
/* Ready to switch the queue on */
- err = iavf_switch_queue(adapter, rx_queue_id, true, true);
+ if (!vf->lv_enabled)
+ err = iavf_switch_queue(adapter, rx_queue_id, true, true);
+ else
+ err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true);
+
if (err)
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
rx_queue_id);
@@ -760,6 +765,7 @@ iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
struct iavf_adapter *adapter =
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct iavf_tx_queue *txq;
int err = 0;
@@ -775,7 +781,10 @@ iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
IAVF_WRITE_FLUSH(hw);
/* Ready to switch the queue on */
- err = iavf_switch_queue(adapter, tx_queue_id, false, true);
+ if (!vf->lv_enabled)
+ err = iavf_switch_queue(adapter, tx_queue_id, false, true);
+ else
+ err = iavf_switch_queue_lv(adapter, tx_queue_id, false, true);
if (err)
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
@@ -876,14 +885,22 @@ iavf_stop_queues(struct rte_eth_dev *dev)
{
struct iavf_adapter *adapter =
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct iavf_rx_queue *rxq;
struct iavf_tx_queue *txq;
int ret, i;
/* Stop All queues */
- ret = iavf_disable_queues(adapter);
- if (ret)
- PMD_DRV_LOG(WARNING, "Fail to stop queues");
+ if (!vf->lv_enabled) {
+ ret = iavf_disable_queues(adapter);
+ if (ret)
+ PMD_DRV_LOG(WARNING, "Fail to stop queues");
+ } else {
+ ret = iavf_disable_queues_lv(adapter);
+ if (ret)
+ PMD_DRV_LOG(WARNING, "Fail to stop queues for large VF");
+ }
+
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
@@ -457,7 +457,8 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
VIRTCHNL_VF_OFFLOAD_FDIR_PF |
VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
- VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
+ VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
+ VIRTCHNL_VF_LARGE_NUM_QPAIRS;
args.in_args = (uint8_t *)∩︀
args.in_args_size = sizeof(caps);
@@ -607,6 +608,138 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
return err;
}
+int
+iavf_enable_queues_lv(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_del_ena_dis_queues *queue_select;
+ struct virtchnl_queue_chunk *queue_chunk;
+ struct iavf_cmd_info args;
+ int err, len;
+
+ len = sizeof(struct virtchnl_del_ena_dis_queues) +
+ sizeof(struct virtchnl_queue_chunk) *
+ (IAVF_RXTX_QUEUE_CHUNKS_NUM - 1);
+ queue_select = rte_zmalloc("queue_select", len, 0);
+ if (!queue_select)
+ return -ENOMEM;
+
+ queue_chunk = queue_select->chunks.chunks;
+ queue_select->chunks.num_chunks = IAVF_RXTX_QUEUE_CHUNKS_NUM;
+ queue_select->vport_id = vf->vsi_res->vsi_id;
+
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
+ adapter->eth_dev->data->nb_tx_queues;
+
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
+ adapter->eth_dev->data->nb_rx_queues;
+
+ args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2;
+ args.in_args = (u8 *)queue_select;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_ENABLE_QUEUES_V2");
+ return err;
+ }
+ return 0;
+}
+
+int
+iavf_disable_queues_lv(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_del_ena_dis_queues *queue_select;
+ struct virtchnl_queue_chunk *queue_chunk;
+ struct iavf_cmd_info args;
+ int err, len;
+
+ len = sizeof(struct virtchnl_del_ena_dis_queues) +
+ sizeof(struct virtchnl_queue_chunk) *
+ (IAVF_RXTX_QUEUE_CHUNKS_NUM - 1);
+ queue_select = rte_zmalloc("queue_select", len, 0);
+ if (!queue_select)
+ return -ENOMEM;
+
+ queue_chunk = queue_select->chunks.chunks;
+ queue_select->chunks.num_chunks = IAVF_RXTX_QUEUE_CHUNKS_NUM;
+ queue_select->vport_id = vf->vsi_res->vsi_id;
+
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
+ adapter->eth_dev->data->nb_tx_queues;
+
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
+ queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
+ adapter->eth_dev->data->nb_rx_queues;
+
+ args.ops = VIRTCHNL_OP_DISABLE_QUEUES_V2;
+ args.in_args = (u8 *)queue_select;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_DISABLE_QUEUES_V2");
+ return err;
+ }
+ return 0;
+}
+
+int
+iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
+ bool rx, bool on)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_del_ena_dis_queues *queue_select;
+ struct virtchnl_queue_chunk *queue_chunk;
+ struct iavf_cmd_info args;
+ int err, len;
+
+ len = sizeof(struct virtchnl_del_ena_dis_queues);
+ queue_select = rte_zmalloc("queue_select", len, 0);
+ if (!queue_select)
+ return -ENOMEM;
+
+ queue_chunk = queue_select->chunks.chunks;
+ queue_select->chunks.num_chunks = 1;
+ queue_select->vport_id = vf->vsi_res->vsi_id;
+
+ if (rx) {
+ queue_chunk->type = VIRTCHNL_QUEUE_TYPE_RX;
+ queue_chunk->start_queue_id = qid;
+ queue_chunk->num_queues = 1;
+ } else {
+ queue_chunk->type = VIRTCHNL_QUEUE_TYPE_TX;
+ queue_chunk->start_queue_id = qid;
+ queue_chunk->num_queues = 1;
+ }
+
+ if (on)
+ args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2;
+ else
+ args.ops = VIRTCHNL_OP_DISABLE_QUEUES_V2;
+ args.in_args = (u8 *)queue_select;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of %s",
+ on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
+ return err;
+}
+
int
iavf_configure_rss_lut(struct iavf_adapter *adapter)
{
@@ -672,7 +805,8 @@ iavf_configure_rss_key(struct iavf_adapter *adapter)
}
int
-iavf_configure_queues(struct iavf_adapter *adapter)
+iavf_configure_queues(struct iavf_adapter *adapter,
+ uint16_t num_queue_pairs, uint16_t index)
{
struct iavf_rx_queue **rxq =
(struct iavf_rx_queue **)adapter->eth_dev->data->rx_queues;
@@ -686,16 +820,16 @@ iavf_configure_queues(struct iavf_adapter *adapter)
int err;
size = sizeof(*vc_config) +
- sizeof(vc_config->qpair[0]) * vf->num_queue_pairs;
+ sizeof(vc_config->qpair[0]) * num_queue_pairs;
vc_config = rte_zmalloc("cfg_queue", size, 0);
if (!vc_config)
return -ENOMEM;
vc_config->vsi_id = vf->vsi_res->vsi_id;
- vc_config->num_queue_pairs = vf->num_queue_pairs;
+ vc_config->num_queue_pairs = num_queue_pairs;
- for (i = 0, vc_qp = vc_config->qpair;
- i < vf->num_queue_pairs;
+ for (i = index, vc_qp = vc_config->qpair;
+ i < index + num_queue_pairs;
i++, vc_qp++) {
vc_qp->txq.vsi_id = vf->vsi_res->vsi_id;
vc_qp->txq.queue_id = i;
@@ -775,13 +909,14 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
return -ENOMEM;
map_info->num_vectors = vf->nb_msix;
- for (i = 0; i < vf->nb_msix; i++) {
- vecmap = &map_info->vecmap[i];
+ for (i = 0; i < adapter->eth_dev->data->nb_rx_queues; i++) {
+ vecmap =
+ &map_info->vecmap[vf->qv_map[i].vector_id - vf->msix_base];
vecmap->vsi_id = vf->vsi_res->vsi_id;
vecmap->rxitr_idx = IAVF_ITR_INDEX_DEFAULT;
- vecmap->vector_id = vf->msix_base + i;
+ vecmap->vector_id = vf->qv_map[i].vector_id;
vecmap->txq_map = 0;
- vecmap->rxq_map = vf->rxq_map[vf->msix_base + i];
+ vecmap->rxq_map |= 1 << vf->qv_map[i].queue_id;
}
args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
@@ -797,6 +932,47 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
return err;
}
+int
+iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
+ uint16_t index)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_queue_vector_maps *map_info;
+ struct virtchnl_queue_vector *qv_maps;
+ struct iavf_cmd_info args;
+ int len, i, err;
+ int count = 0;
+
+ len = sizeof(struct virtchnl_queue_vector_maps) +
+ sizeof(struct virtchnl_queue_vector) * (num - 1);
+
+ map_info = rte_zmalloc("map_info", len, 0);
+ if (!map_info)
+ return -ENOMEM;
+
+ map_info->vport_id = vf->vsi_res->vsi_id;
+ map_info->num_qv_maps = num;
+ for (i = index; i < index + map_info->num_qv_maps; i++) {
+ qv_maps = &map_info->qv_maps[count++];
+ qv_maps->itr_idx = VIRTCHNL_ITR_IDX_0;
+ qv_maps->queue_type = VIRTCHNL_QUEUE_TYPE_RX;
+ qv_maps->queue_id = vf->qv_map[i].queue_id;
+ qv_maps->vector_id = vf->qv_map[i].vector_id;
+ }
+
+ args.ops = VIRTCHNL_OP_MAP_QUEUE_VECTOR;
+ args.in_args = (u8 *)map_info;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
+
+ rte_free(map_info);
+ return err;
+}
+
void
iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
{
@@ -1272,3 +1448,33 @@ iavf_request_queues(struct iavf_adapter *adapter, uint16_t num)
return -1;
}
+
+int
+iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct iavf_cmd_info args;
+ uint16_t qregion_width;
+ int err;
+
+ args.ops = VIRTCHNL_OP_GET_MAX_RSS_QREGION;
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of "
+ "VIRTCHNL_OP_GET_MAX_RSS_QREGION");
+ return err;
+ }
+
+ qregion_width =
+ ((struct virtchnl_max_rss_qregion *)args.out_buffer)->qregion_width;
+
+ vf->max_rss_qregion = (uint16_t)(1 << qregion_width);
+
+ return 0;
+}