From patchwork Fri Sep 1 11:31:54 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhang, Yuying" X-Patchwork-Id: 131081 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1C90D4221F; Fri, 1 Sep 2023 13:10:15 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 103A6402CB; Fri, 1 Sep 2023 13:09:42 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id ABA3C402BC for ; Fri, 1 Sep 2023 13:09:39 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1693566579; x=1725102579; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=jwY0/iS1omHo3Z2xRhVOX/aK75+BJng1rSIvRI2asAs=; b=cHB2lJPuSuwE7DsGRJP1KxKZUgSL9w7REKTiNLqbynSCHO5bPf4eKaQf lKJoYdkWIF8cca0m1wuQ9FCZX7eVcIZ/yXXb7YRjg6d0HhIWisLDvqqE9 0CD9hegs5iSFhmf5DzFt8xLRoaY5wM2D/dwg/Ple5maWnD5v6ZAegfiPY LeoKvpw5h6joZMVuY9k2a0DPXtK5+wRS5oG9M3tWR5PIinmitVm/U7ez7 X2yt0QNnkJY1j1Kq9iSEZLBgB2cWXBX+Y8pgjIK2HezeWb8F69/M+PjU6 2VYVhk3QMDqAx8PeIFEfR43rqY+FSPu0JQx6T/bHn+9vZ/Zovgu0FSGIB w==; X-IronPort-AV: E=McAfee;i="6600,9927,10819"; a="356511060" X-IronPort-AV: E=Sophos;i="6.02,219,1688454000"; d="scan'208";a="356511060" Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Sep 2023 04:09:39 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10819"; a="854670991" X-IronPort-AV: E=Sophos;i="6.02,219,1688454000"; d="scan'208";a="854670991" Received: from dpdk-yuyingzh-icelake.sh.intel.com ([10.67.116.226]) by fmsmga002.fm.intel.com with ESMTP; 01 Sep 2023 04:09:37 -0700 From: Yuying Zhang To: dev@dpdk.org, qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com Cc: Wenjing Qiao , Yuying Zhang Subject: [PATCH v2 4/8] net/cpfl: setup ctrl path Date: Fri, 1 Sep 2023 11:31:54 +0000 Message-Id: <20230901113158.1654044-5-yuying.zhang@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230901113158.1654044-1-yuying.zhang@intel.com> References: <20230812075506.361769-1-yuying.zhang@intel.com> <20230901113158.1654044-1-yuying.zhang@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Wenjing Qiao Setup the control vport and control queue for flow offloading. Signed-off-by: Yuying Zhang Signed-off-by: Beilei Xing Signed-off-by: Qi Zhang Signed-off-by: Wenjing Qiao --- drivers/net/cpfl/cpfl_ethdev.c | 267 +++++++++++++++++++++++++++++++++ drivers/net/cpfl/cpfl_ethdev.h | 14 ++ drivers/net/cpfl/cpfl_vchnl.c | 144 ++++++++++++++++++ 3 files changed, 425 insertions(+) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index 3c4a6a4724..22f3e72894 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -1657,6 +1657,10 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint return; } + /* ignore if it is ctrl vport */ + if (adapter->ctrl_vport.base.vport_id == vc_event->vport_id) + return; + vport = cpfl_find_vport(adapter, vc_event->vport_id); if (!vport) { PMD_DRV_LOG(ERR, "Can't find vport."); @@ -1852,6 +1856,260 @@ cpfl_dev_alarm_handler(void *param) rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter); } +static int +cpfl_stop_cfgqs(struct cpfl_adapter_ext *adapter) +{ + int i, ret; + + for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) { + ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, false); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to disable Tx config queue."); + return ret; + } + } + + for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) { + ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, false); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to disable Rx config queue."); + return ret; + } + } + + return 0; +} + +static int +cpfl_start_cfgqs(struct cpfl_adapter_ext *adapter) +{ + int i, ret; + + ret = cpfl_config_ctlq_tx(adapter); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to configure Tx config queue."); + return ret; + } + + ret = cpfl_config_ctlq_rx(adapter); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to configure Rx config queue."); + return ret; + } + + for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) { + ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, true); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to enable Tx config queue."); + return ret; + } + } + + for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) { + ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, true); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to enable Rx config queue."); + return ret; + } + } + + return 0; +} + +static void +cpfl_remove_cfgqs(struct cpfl_adapter_ext *adapter) +{ + struct idpf_hw *hw = (struct idpf_hw *)(&adapter->base.hw); + struct cpfl_ctlq_create_info *create_cfgq_info; + int i; + + create_cfgq_info = adapter->cfgq_info; + + for (i = 0; i < CPFL_CFGQ_NUM; i++) { + cpfl_vport_ctlq_remove(hw, adapter->ctlqp[i]); + if (create_cfgq_info[i].ring_mem.va) + idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem); + if (create_cfgq_info[i].buf_mem.va) + idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem); + } +} + +static int +cpfl_add_cfgqs(struct cpfl_adapter_ext *adapter) +{ + struct idpf_ctlq_info *cfg_cq; + int ret = 0; + int i = 0; + + for (i = 0; i < CPFL_CFGQ_NUM; i++) { + ret = cpfl_vport_ctlq_add((struct idpf_hw *)(&adapter->base.hw), + &adapter->cfgq_info[i], + &cfg_cq); + if (ret || !cfg_cq) { + PMD_DRV_LOG(ERR, "ctlq add failed for queue id: %d", + adapter->cfgq_info[i].id); + cpfl_remove_cfgqs(adapter); + return ret; + } + PMD_DRV_LOG(INFO, "added cfgq to hw. queue id: %d", + adapter->cfgq_info[i].id); + adapter->ctlqp[i] = cfg_cq; + } + + return ret; +} + +#define CPFL_CFGQ_RING_LEN 512 +#define CPFL_CFGQ_DESCRIPTOR_SIZE 32 +#define CPFL_CFGQ_BUFFER_SIZE 256 +#define CPFL_CFGQ_RING_SIZE 512 + +static int +cpfl_cfgq_setup(struct cpfl_adapter_ext *adapter) +{ + struct cpfl_ctlq_create_info *create_cfgq_info; + struct cpfl_vport *vport; + int i, err; + uint32_t ring_size = CPFL_CFGQ_RING_SIZE * sizeof(struct idpf_ctlq_desc); + uint32_t buf_size = CPFL_CFGQ_RING_SIZE * CPFL_CFGQ_BUFFER_SIZE; + + vport = &adapter->ctrl_vport; + create_cfgq_info = adapter->cfgq_info; + + for (i = 0; i < CPFL_CFGQ_NUM; i++) { + if (i % 2 == 0) { + /* Setup Tx config queue */ + create_cfgq_info[i].id = vport->base.chunks_info.tx_start_qid + i / 2; + create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_TX; + create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE; + create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE; + memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg)); + create_cfgq_info[i].reg.tail = vport->base.chunks_info.tx_qtail_start + + i / 2 * vport->base.chunks_info.tx_qtail_spacing; + } else { + /* Setup Rx config queue */ + create_cfgq_info[i].id = vport->base.chunks_info.rx_start_qid + i / 2; + create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_RX; + create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE; + create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE; + memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg)); + create_cfgq_info[i].reg.tail = vport->base.chunks_info.rx_qtail_start + + i / 2 * vport->base.chunks_info.rx_qtail_spacing; + if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem, + buf_size)) { + err = -ENOMEM; + goto free_mem; + } + } + if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem, + ring_size)) { + err = -ENOMEM; + goto free_mem; + } + } + return 0; +free_mem: + for (i = 0; i < CPFL_CFGQ_NUM; i++) { + if (create_cfgq_info[i].ring_mem.va) + idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem); + if (create_cfgq_info[i].buf_mem.va) + idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem); + } + return err; +} + +static int +cpfl_init_ctrl_vport(struct cpfl_adapter_ext *adapter) +{ + struct cpfl_vport *vport = &adapter->ctrl_vport; + struct virtchnl2_create_vport *vport_info = + (struct virtchnl2_create_vport *)adapter->ctrl_vport_recv_info; + int i; + + vport->itf.adapter = adapter; + vport->base.adapter = &adapter->base; + vport->base.vport_id = vport_info->vport_id; + + for (i = 0; i < vport_info->chunks.num_chunks; i++) { + if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_TX) { + vport->base.chunks_info.tx_start_qid = + vport_info->chunks.chunks[i].start_queue_id; + vport->base.chunks_info.tx_qtail_start = + vport_info->chunks.chunks[i].qtail_reg_start; + vport->base.chunks_info.tx_qtail_spacing = + vport_info->chunks.chunks[i].qtail_reg_spacing; + } else if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_RX) { + vport->base.chunks_info.rx_start_qid = + vport_info->chunks.chunks[i].start_queue_id; + vport->base.chunks_info.rx_qtail_start = + vport_info->chunks.chunks[i].qtail_reg_start; + vport->base.chunks_info.rx_qtail_spacing = + vport_info->chunks.chunks[i].qtail_reg_spacing; + } else { + PMD_INIT_LOG(ERR, "Unsupported chunk type"); + return -EINVAL; + } + } + + return 0; +} + +static void +cpfl_ctrl_path_close(struct cpfl_adapter_ext *adapter) +{ + cpfl_remove_cfgqs(adapter); + cpfl_stop_cfgqs(adapter); + idpf_vc_vport_destroy(&adapter->ctrl_vport.base); +} + +static int +cpfl_ctrl_path_open(struct cpfl_adapter_ext *adapter) +{ + int ret; + + ret = cpfl_vc_create_ctrl_vport(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to create control vport"); + return ret; + } + + ret = cpfl_init_ctrl_vport(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init control vport"); + goto err_init_ctrl_vport; + } + + ret = cpfl_cfgq_setup(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to setup control queues"); + goto err_cfgq_setup; + } + + ret = cpfl_add_cfgqs(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to add control queues"); + goto err_add_cfgq; + } + + ret = cpfl_start_cfgqs(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to start control queues"); + goto err_start_cfgqs; + } + + return 0; + +err_start_cfgqs: + cpfl_stop_cfgqs(adapter); +err_add_cfgq: + cpfl_remove_cfgqs(adapter); +err_cfgq_setup: +err_init_ctrl_vport: + idpf_vc_vport_destroy(&adapter->ctrl_vport.base); + + return ret; +} + static struct virtchnl2_get_capabilities req_caps = { .csum_caps = VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 | @@ -2019,6 +2277,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a goto err_vports_alloc; } + ret = cpfl_ctrl_path_open(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to setup control path"); + goto err_create_ctrl_vport; + } + adapter->cur_vports = 0; adapter->cur_vport_nb = 0; @@ -2026,6 +2290,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a return ret; +err_create_ctrl_vport: + rte_free(adapter->vports); err_vports_alloc: rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter); cpfl_repr_whitelist_uninit(adapter); @@ -2260,6 +2526,7 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev) static void cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter) { + cpfl_ctrl_path_close(adapter); rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter); cpfl_vport_map_uninit(adapter); idpf_adapter_deinit(&adapter->base); diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index ed730cc0e9..90e71a6550 100644 --- a/drivers/net/cpfl/cpfl_ethdev.h +++ b/drivers/net/cpfl/cpfl_ethdev.h @@ -22,6 +22,7 @@ #include "cpfl_logs.h" #include "cpfl_cpchnl.h" #include "cpfl_representor.h" +#include "cpfl_controlq.h" /* Currently, backend supports up to 8 vports */ #define CPFL_MAX_VPORT_NUM 8 @@ -89,6 +90,10 @@ #define CPFL_FLOW_FILE_LEN 100 +#define CPFL_RX_CFGQ_NUM 4 +#define CPFL_TX_CFGQ_NUM 4 +#define CPFL_CFGQ_NUM 8 + struct cpfl_vport_param { struct cpfl_adapter_ext *adapter; uint16_t devarg_id; /* arg id from user */ @@ -189,10 +194,19 @@ struct cpfl_adapter_ext { rte_spinlock_t repr_lock; struct rte_hash *repr_whitelist_hash; + + /* ctrl vport and ctrl queues. */ + struct cpfl_vport ctrl_vport; + uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE]; + struct idpf_ctlq_info *ctlqp[CPFL_CFGQ_NUM]; + struct cpfl_ctlq_create_info cfgq_info[CPFL_CFGQ_NUM]; }; TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext); +int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter); +int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter); +int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter); int cpfl_vport_info_create(struct cpfl_adapter_ext *adapter, struct cpfl_vport_id *vport_identity, struct cpchnl2_vport_info *vport_info); diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c index a21a4a451f..932840a972 100644 --- a/drivers/net/cpfl/cpfl_vchnl.c +++ b/drivers/net/cpfl/cpfl_vchnl.c @@ -70,3 +70,147 @@ cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter, return 0; } + +int +cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter) +{ + struct virtchnl2_create_vport vport_msg; + struct idpf_cmd_info args; + int err = -1; + + memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport)); + vport_msg.vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT); + vport_msg.txq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE); + vport_msg.rxq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE); + vport_msg.num_tx_q = CPFL_TX_CFGQ_NUM; + vport_msg.num_tx_complq = 0; + vport_msg.num_rx_q = CPFL_RX_CFGQ_NUM; + vport_msg.num_rx_bufq = 0; + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL2_OP_CREATE_VPORT; + args.in_args = (uint8_t *)&vport_msg; + args.in_args_size = sizeof(vport_msg); + args.out_buffer = adapter->base.mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + + err = idpf_vc_cmd_execute(&adapter->base, &args); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT"); + return err; + } + + rte_memcpy(adapter->ctrl_vport_recv_info, args.out_buffer, + IDPF_DFLT_MBX_BUF_SIZE); + return err; +} + +int +cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter) +{ + struct cpfl_vport *vport = &adapter->ctrl_vport; + struct virtchnl2_config_rx_queues *vc_rxqs = NULL; + struct virtchnl2_rxq_info *rxq_info; + struct idpf_cmd_info args; + uint16_t num_qs; + int size, err, i; + + if (vport->base.rxq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) { + PMD_DRV_LOG(ERR, "This rxq model isn't supported."); + err = -EINVAL; + return err; + } + + num_qs = CPFL_RX_CFGQ_NUM; + size = sizeof(*vc_rxqs) + (num_qs - 1) * + sizeof(struct virtchnl2_rxq_info); + vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0); + if (!vc_rxqs) { + PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues"); + err = -ENOMEM; + return err; + } + vc_rxqs->vport_id = vport->base.vport_id; + vc_rxqs->num_qinfo = num_qs; + + for (i = 0; i < num_qs; i++) { + rxq_info = &vc_rxqs->qinfo[i]; + rxq_info->dma_ring_addr = adapter->ctlqp[2 * i + 1]->desc_ring.pa; + rxq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_RX; + rxq_info->queue_id = adapter->cfgq_info[2 * i + 1].id; + rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE; + rxq_info->data_buffer_size = adapter->cfgq_info[2 * i + 1].buf_size; + rxq_info->max_pkt_size = vport->base.max_pkt_len; + rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M; + rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE; + rxq_info->ring_len = adapter->cfgq_info[2 * i + 1].len; + } + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES; + args.in_args = (uint8_t *)vc_rxqs; + args.in_args_size = size; + args.out_buffer = adapter->base.mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + + err = idpf_vc_cmd_execute(&adapter->base, &args); + rte_free(vc_rxqs); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES"); + + return err; +} + +int +cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter) +{ + struct cpfl_vport *vport = &adapter->ctrl_vport; + struct virtchnl2_config_tx_queues *vc_txqs = NULL; + struct virtchnl2_txq_info *txq_info; + struct idpf_cmd_info args; + uint16_t num_qs; + int size, err, i; + + if (vport->base.txq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) { + PMD_DRV_LOG(ERR, "This txq model isn't supported."); + err = -EINVAL; + return err; + } + + num_qs = CPFL_TX_CFGQ_NUM; + size = sizeof(*vc_txqs) + (num_qs - 1) * + sizeof(struct virtchnl2_txq_info); + vc_txqs = rte_zmalloc("cfg_txqs", size, 0); + if (!vc_txqs) { + PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues"); + err = -ENOMEM; + return err; + } + vc_txqs->vport_id = vport->base.vport_id; + vc_txqs->num_qinfo = num_qs; + + for (i = 0; i < num_qs; i++) { + txq_info = &vc_txqs->qinfo[i]; + txq_info->dma_ring_addr = adapter->ctlqp[2 * i]->desc_ring.pa; + txq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_TX; + txq_info->queue_id = adapter->cfgq_info[2 * i].id; + txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE; + txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE; + txq_info->ring_len = adapter->cfgq_info[2 * i].len; + } + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES; + args.in_args = (uint8_t *)vc_txqs; + args.in_args_size = size; + args.out_buffer = adapter->base.mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + + err = idpf_vc_cmd_execute(&adapter->base, &args); + rte_free(vc_txqs); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES"); + + return err; +}