From patchwork Thu Oct 22 06:48:57 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xu, Ting" X-Patchwork-Id: 81723 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0838CA04DD; Thu, 22 Oct 2020 08:46:09 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id ADFE972F9; Thu, 22 Oct 2020 08:45:53 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id 7A98272F8 for ; Thu, 22 Oct 2020 08:45:51 +0200 (CEST) IronPort-SDR: w133Y3hQK0bN3KCtTcri+bP1tQJ9jfgumV7qoTJ+3thS0v24Aw51XiyfbMixo1ES9y844BjfZN w0WhJg7G93OA== X-IronPort-AV: E=McAfee;i="6000,8403,9781"; a="185158488" X-IronPort-AV: E=Sophos;i="5.77,403,1596524400"; d="scan'208";a="185158488" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Oct 2020 23:45:51 -0700 IronPort-SDR: sGdCcLs2EImtg+XMsd8lz5z1KvtPnLtIe2sdh7q3qrFe+wJbYDgXrmCE0yIMTSZwxlfi2dK/B7 j3duyixG1ipA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,403,1596524400"; d="scan'208";a="533829171" Received: from dpdk-xuting-second.sh.intel.com ([10.67.116.154]) by orsmga005.jf.intel.com with ESMTP; 21 Oct 2020 23:45:47 -0700 From: Ting Xu To: dev@dpdk.org Cc: qi.z.zhang@intel.com, beilei.xing@intel.com, jingjing.wu@intel.com, Ting Xu Date: Thu, 22 Oct 2020 14:48:57 +0800 Message-Id: <20201022064902.40143-2-ting.xu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20201022064902.40143-1-ting.xu@intel.com> References: <20200909072028.16726-1-ting.xu@intel.com> <20201022064902.40143-1-ting.xu@intel.com> Subject: [dpdk-dev] [PATCH v8 1/6] net/iavf: handle virtchnl event message without interrupt X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Currently, VF can only handle virtchnl event message by calling interrupt. It is not available in two cases: 1. If the event message comes during VF initialization before interrupt is enabled, this message will not be handled correctly. 2. Some virtchnl commands need to receive the event message and handle it with interrupt disabled. To solve this issue, we add the virtchnl event message handling in the process of reading vitchnl messages in adminq from PF. Signed-off-by: Ting Xu --- drivers/net/iavf/iavf.h | 8 ++ drivers/net/iavf/iavf_vchnl.c | 138 ++++++++++++++++++++++------------ 2 files changed, 97 insertions(+), 49 deletions(-) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 3198d85b3..9c16324c1 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -107,6 +107,14 @@ struct iavf_fdir_info { /* TODO: is that correct to assume the max number to be 16 ?*/ #define IAVF_MAX_MSIX_VECTORS 16 +/* Message type read in admin queue from PF */ +enum iavf_aq_result { + IAVF_MSG_ERR = -1, /* Meet error when accessing admin queue */ + IAVF_MSG_NON, /* Read nothing from admin queue */ + IAVF_MSG_SYS, /* Read system msg from admin queue */ + IAVF_MSG_CMD, /* Read async command result */ +}; + /* Structure to store private data specific for VF instance. */ struct iavf_info { uint16_t num_queue_pairs; diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index a2295f879..b62c8683c 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -25,14 +25,54 @@ #define MAX_TRY_TIMES 200 #define ASQ_DELAY_MS 10 +static uint32_t +iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed) +{ + uint32_t speed; + + switch (virt_link_speed) { + case VIRTCHNL_LINK_SPEED_100MB: + speed = 100; + break; + case VIRTCHNL_LINK_SPEED_1GB: + speed = 1000; + break; + case VIRTCHNL_LINK_SPEED_10GB: + speed = 10000; + break; + case VIRTCHNL_LINK_SPEED_40GB: + speed = 40000; + break; + case VIRTCHNL_LINK_SPEED_20GB: + speed = 20000; + break; + case VIRTCHNL_LINK_SPEED_25GB: + speed = 25000; + break; + case VIRTCHNL_LINK_SPEED_2_5GB: + speed = 2500; + break; + case VIRTCHNL_LINK_SPEED_5GB: + speed = 5000; + break; + default: + speed = 0; + break; + } + + return speed; +} + /* Read data in admin queue to get msg from pf driver */ -static enum iavf_status +static enum iavf_aq_result iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, uint8_t *buf) { struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct rte_eth_dev *dev = adapter->eth_dev; struct iavf_arq_event_info event; + enum iavf_aq_result result = IAVF_MSG_NON; enum virtchnl_ops opcode; int ret; @@ -42,7 +82,9 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, /* Can't read any msg from adminQ */ if (ret) { PMD_DRV_LOG(DEBUG, "Can't read msg from AQ"); - return ret; + if (ret != IAVF_ERR_ADMIN_QUEUE_NO_WORK) + result = IAVF_MSG_ERR; + return result; } opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high); @@ -52,16 +94,51 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, PMD_DRV_LOG(DEBUG, "AQ from pf carries opcode %u, retval %d", opcode, vf->cmd_retval); - if (opcode != vf->pend_cmd) { - if (opcode != VIRTCHNL_OP_EVENT) { - PMD_DRV_LOG(WARNING, - "command mismatch, expect %u, get %u", - vf->pend_cmd, opcode); + if (opcode == VIRTCHNL_OP_EVENT) { + struct virtchnl_pf_event *vpe = + (struct virtchnl_pf_event *)event.msg_buf; + + result = IAVF_MSG_SYS; + switch (vpe->event) { + case VIRTCHNL_EVENT_LINK_CHANGE: + vf->link_up = + vpe->event_data.link_event.link_status; + if (vf->vf_res->vf_cap_flags & + VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { + vf->link_speed = + vpe->event_data.link_event_adv.link_speed; + } else { + enum virtchnl_link_speed speed; + speed = vpe->event_data.link_event.link_speed; + vf->link_speed = iavf_convert_link_speed(speed); + } + iavf_dev_link_update(dev, 0); + PMD_DRV_LOG(INFO, "Link status update:%s", + vf->link_up ? "up" : "down"); + break; + case VIRTCHNL_EVENT_RESET_IMPENDING: + vf->vf_reset = true; + PMD_DRV_LOG(INFO, "VF is resetting"); + break; + case VIRTCHNL_EVENT_PF_DRIVER_CLOSE: + vf->dev_closed = true; + PMD_DRV_LOG(INFO, "PF driver closed"); + break; + default: + PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf", + __func__, vpe->event); + } + } else { + /* async reply msg on command issued by vf previously */ + result = IAVF_MSG_CMD; + if (opcode != vf->pend_cmd) { + PMD_DRV_LOG(WARNING, "command mismatch, expect %u, get %u", + vf->pend_cmd, opcode); + result = IAVF_MSG_ERR; } - return IAVF_ERR_OPCODE_MISMATCH; } - return IAVF_SUCCESS; + return result; } static int @@ -69,6 +146,7 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args) { struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + enum iavf_aq_result result; enum iavf_status ret; int err = 0; int i = 0; @@ -97,9 +175,9 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args) case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS: /* for init virtchnl ops, need to poll the response */ do { - ret = iavf_read_msg_from_pf(adapter, args->out_size, + result = iavf_read_msg_from_pf(adapter, args->out_size, args->out_buffer); - if (ret == IAVF_SUCCESS) + if (result == IAVF_MSG_CMD) break; rte_delay_ms(ASQ_DELAY_MS); } while (i++ < MAX_TRY_TIMES); @@ -136,44 +214,6 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args) return err; } -static uint32_t -iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed) -{ - uint32_t speed; - - switch (virt_link_speed) { - case VIRTCHNL_LINK_SPEED_100MB: - speed = 100; - break; - case VIRTCHNL_LINK_SPEED_1GB: - speed = 1000; - break; - case VIRTCHNL_LINK_SPEED_10GB: - speed = 10000; - break; - case VIRTCHNL_LINK_SPEED_40GB: - speed = 40000; - break; - case VIRTCHNL_LINK_SPEED_20GB: - speed = 20000; - break; - case VIRTCHNL_LINK_SPEED_25GB: - speed = 25000; - break; - case VIRTCHNL_LINK_SPEED_2_5GB: - speed = 2500; - break; - case VIRTCHNL_LINK_SPEED_5GB: - speed = 5000; - break; - default: - speed = 0; - break; - } - - return speed; -} - static void iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg, uint16_t msglen) From patchwork Thu Oct 22 06:48:58 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xu, Ting" X-Patchwork-Id: 81724 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7123BA04DD; Thu, 22 Oct 2020 08:46:28 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 4C8858D9A; Thu, 22 Oct 2020 08:45:57 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id B6BC67F6C for ; Thu, 22 Oct 2020 08:45:55 +0200 (CEST) IronPort-SDR: YRC4EqgylCSEyMe/8yQYbNtnlOgLbuW6UTOp4Qi7zSpg9maro9p1Jy/Tm5UD0HQVUFF4+jB755 EK9aeZh1wJVw== X-IronPort-AV: E=McAfee;i="6000,8403,9781"; a="185158493" X-IronPort-AV: E=Sophos;i="5.77,403,1596524400"; d="scan'208";a="185158493" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Oct 2020 23:45:55 -0700 IronPort-SDR: 7800YTuc6SSDftboJVzCMibYjkY0v9Dia8JdGxjzGGn7y/EE1320uZxD7BoKUjgIMfvyPbsmvz 2qSBDyprSPbg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,403,1596524400"; d="scan'208";a="533829191" Received: from dpdk-xuting-second.sh.intel.com ([10.67.116.154]) by orsmga005.jf.intel.com with ESMTP; 21 Oct 2020 23:45:51 -0700 From: Ting Xu To: dev@dpdk.org Cc: qi.z.zhang@intel.com, beilei.xing@intel.com, jingjing.wu@intel.com, Ting Xu Date: Thu, 22 Oct 2020 14:48:58 +0800 Message-Id: <20201022064902.40143-3-ting.xu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20201022064902.40143-1-ting.xu@intel.com> References: <20200909072028.16726-1-ting.xu@intel.com> <20201022064902.40143-1-ting.xu@intel.com> Subject: [dpdk-dev] [PATCH v8 2/6] net/iavf: add IAVF request queues function X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add a new virtchnl function to request additional queues from PF. Current default queue pairs number when creating a VF is 16. In order to support up to 256 queue pairs per VF, enable this request queues function. When requesting queues succeeds, PF will return an event message. If it is handled by interrupt first, the request queues command cannot receive the correct PF response and will wait until timeout. Therefore, disable interrupt before requesting queues in order to handle the event message asynchronously. Signed-off-by: Ting Xu --- drivers/net/iavf/iavf.h | 1 + drivers/net/iavf/iavf_vchnl.c | 88 ++++++++++++++++++++++++++++++++++- 2 files changed, 87 insertions(+), 2 deletions(-) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 9c16324c1..778b6c23c 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -287,4 +287,5 @@ int iavf_add_del_rss_cfg(struct iavf_adapter *adapter, int iavf_add_del_mc_addr_list(struct iavf_adapter *adapter, struct rte_ether_addr *mc_addrs, uint32_t mc_addrs_num, bool add); +int iavf_request_queues(struct iavf_adapter *adapter, uint16_t num); #endif /* _IAVF_ETHDEV_H_ */ diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index b62c8683c..323e2a843 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include "iavf.h" @@ -189,7 +190,33 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args) } _clear_cmd(vf); break; - + case VIRTCHNL_OP_REQUEST_QUEUES: + /* + * ignore async reply, only wait for system message, + * vf_reset = true if get VIRTCHNL_EVENT_RESET_IMPENDING, + * if not, means request queues failed. + */ + do { + result = iavf_read_msg_from_pf(adapter, args->out_size, + args->out_buffer); + if (result == IAVF_MSG_SYS && vf->vf_reset) { + break; + } else if (result == IAVF_MSG_CMD || + result == IAVF_MSG_ERR) { + err = -1; + break; + } + rte_delay_ms(ASQ_DELAY_MS); + /* If don't read msg or read sys event, continue */ + } while (i++ < MAX_TRY_TIMES); + if (i >= MAX_TRY_TIMES || + vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { + err = -1; + PMD_DRV_LOG(ERR, "No response or return failure (%d)" + " for cmd %d", vf->cmd_retval, args->ops); + } + _clear_cmd(vf); + break; default: /* For other virtchnl ops in running time, * wait for the cmd done flag. @@ -429,7 +456,8 @@ iavf_get_vf_resource(struct iavf_adapter *adapter) caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC | VIRTCHNL_VF_OFFLOAD_FDIR_PF | - VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF; + VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF | + VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; args.in_args = (uint8_t *)∩︀ args.in_args_size = sizeof(caps); @@ -1183,3 +1211,59 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter, return 0; } + +int +iavf_request_queues(struct iavf_adapter *adapter, uint16_t num) +{ + struct rte_eth_dev *dev = adapter->eth_dev; + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct virtchnl_vf_res_request vfres; + struct iavf_cmd_info args; + uint16_t num_queue_pairs; + int err; + + if (!(vf->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) { + PMD_DRV_LOG(ERR, "request queues not supported"); + return -1; + } + + if (num == 0) { + PMD_DRV_LOG(ERR, "queue number cannot be zero"); + return -1; + } + vfres.num_queue_pairs = num; + + args.ops = VIRTCHNL_OP_REQUEST_QUEUES; + args.in_args = (u8 *)&vfres; + args.in_args_size = sizeof(vfres); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + /* + * disable interrupt to avoid the admin queue message to be read + * before iavf_read_msg_from_pf. + */ + rte_intr_disable(&pci_dev->intr_handle); + err = iavf_execute_vf_cmd(adapter, &args); + rte_intr_enable(&pci_dev->intr_handle); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES"); + return err; + } + + /* request queues succeeded, vf is resetting */ + if (vf->vf_reset) { + PMD_DRV_LOG(INFO, "vf is resetting"); + return 0; + } + + /* request additional queues failed, return available number */ + num_queue_pairs = + ((struct virtchnl_vf_res_request *)args.out_buffer)->num_queue_pairs; + PMD_DRV_LOG(ERR, "request queues failed, only %u queues " + "available", num_queue_pairs); + + return -1; +} From patchwork Thu Oct 22 06:48:59 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xu, Ting" X-Patchwork-Id: 81725 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 18165A04DD; Thu, 22 Oct 2020 08:46:47 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 997169AF1; Thu, 22 Oct 2020 08:46:01 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id A41DF9AF1 for ; Thu, 22 Oct 2020 08:45:58 +0200 (CEST) IronPort-SDR: uU4UaCQGw++Q4lLPFGV5LjN2Qc/Q/qmU80uwzdzyrpfnMgo6bS4cvcXwLHLxmCsj8pnWyYVpnq hLawk0KY+fOg== X-IronPort-AV: E=McAfee;i="6000,8403,9781"; a="185158501" X-IronPort-AV: E=Sophos;i="5.77,403,1596524400"; d="scan'208";a="185158501" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Oct 2020 23:45:58 -0700 IronPort-SDR: ys3zkItZCjg3gfwP20OdNaRHiWddYMdQvhgK4Re0LADozqxSh5MgC+MbW6ONao1yDVUngaVhvp 9Gu8fvW0EiTw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,403,1596524400"; d="scan'208";a="533829204" Received: from dpdk-xuting-second.sh.intel.com ([10.67.116.154]) by orsmga005.jf.intel.com with ESMTP; 21 Oct 2020 23:45:55 -0700 From: Ting Xu To: dev@dpdk.org Cc: qi.z.zhang@intel.com, beilei.xing@intel.com, jingjing.wu@intel.com, Ting Xu Date: Thu, 22 Oct 2020 14:48:59 +0800 Message-Id: <20201022064902.40143-4-ting.xu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20201022064902.40143-1-ting.xu@intel.com> References: <20200909072028.16726-1-ting.xu@intel.com> <20201022064902.40143-1-ting.xu@intel.com> Subject: [dpdk-dev] [PATCH v8 3/6] net/iavf: negotiate large VF and request more queues X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Negotiate large VF capability with PF during VF initialization. If large VF is supported and the number of queues larger than 16 is required, VF requests additional queues from PF. Mark the state that large VF is supported. If the allocated queues number is larger than 16, the max RSS queue region cannot be 16 anymore. Add the function to query max RSS queue region from PF, use it in the RSS initialization and future filters configuration. Signed-off-by: Ting Xu --- drivers/net/iavf/iavf.h | 7 +++- drivers/net/iavf/iavf_ethdev.c | 74 ++++++++++++++++++++++++++++++++-- drivers/net/iavf/iavf_vchnl.c | 31 +++++++++++++- 3 files changed, 107 insertions(+), 5 deletions(-) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 778b6c23c..49ccfeece 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -19,7 +19,8 @@ #define IAVF_FRAME_SIZE_MAX 9728 #define IAVF_QUEUE_BASE_ADDR_UNIT 128 -#define IAVF_MAX_NUM_QUEUES 16 +#define IAVF_MAX_NUM_QUEUES_DFLT 16 +#define IAVF_MAX_NUM_QUEUES_LV 256 #define IAVF_NUM_MACADDR_MAX 64 @@ -149,6 +150,7 @@ struct iavf_info { uint8_t *rss_key; uint16_t nb_msix; /* number of MSI-X interrupts on Rx */ uint16_t msix_base; /* msix vector base from */ + uint16_t max_rss_qregion; /* max RSS queue region supported by PF */ /* queue bitmask for each vector */ uint16_t rxq_map[IAVF_MAX_MSIX_VECTORS]; struct iavf_flow_list flow_list; @@ -157,6 +159,8 @@ struct iavf_info { struct iavf_parser_list dist_parser_list; struct iavf_fdir_info fdir; /* flow director info */ + /* indicate large VF support enabled or not */ + bool lv_enabled; }; #define IAVF_MAX_PKT_TYPE 1024 @@ -288,4 +292,5 @@ int iavf_add_del_mc_addr_list(struct iavf_adapter *adapter, struct rte_ether_addr *mc_addrs, uint32_t mc_addrs_num, bool add); int iavf_request_queues(struct iavf_adapter *adapter, uint16_t num); +int iavf_get_max_rss_queue_region(struct iavf_adapter *adapter); #endif /* _IAVF_ETHDEV_H_ */ diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index a5b06e6bd..87082d1cc 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -209,7 +209,7 @@ iavf_init_rss(struct iavf_adapter *adapter) rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf; nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues, - IAVF_MAX_NUM_QUEUES); + vf->max_rss_qregion); if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) { PMD_DRV_LOG(DEBUG, "RSS is not supported"); @@ -255,6 +255,31 @@ iavf_init_rss(struct iavf_adapter *adapter) return 0; } +static int +iavf_queues_req_reset(struct rte_eth_dev *dev, uint16_t num) +{ + struct iavf_adapter *ad = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); + int ret; + + ret = iavf_request_queues(ad, num); + if (ret) { + PMD_DRV_LOG(ERR, "request queues from PF failed"); + return ret; + } + PMD_DRV_LOG(INFO, "change queue pairs from %u to %u", + vf->vsi_res->num_queue_pairs, num); + + ret = iavf_dev_reset(dev); + if (ret) { + PMD_DRV_LOG(ERR, "vf reset failed"); + return ret; + } + + return 0; +} + static int iavf_dev_configure(struct rte_eth_dev *dev) { @@ -262,6 +287,9 @@ iavf_dev_configure(struct rte_eth_dev *dev) IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + uint16_t num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues, + dev->data->nb_tx_queues); + int ret; ad->rx_bulk_alloc_allowed = true; /* Initialize to TRUE. If any of Rx queues doesn't meet the @@ -273,6 +301,46 @@ iavf_dev_configure(struct rte_eth_dev *dev) if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + /* Large VF setting */ + if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_DFLT) { + if (!(vf->vf_res->vf_cap_flags & + VIRTCHNL_VF_LARGE_NUM_QPAIRS)) { + PMD_DRV_LOG(ERR, "large VF is not supported"); + return -1; + } + + if (num_queue_pairs > IAVF_MAX_NUM_QUEUES_LV) { + PMD_DRV_LOG(ERR, "queue pairs number cannot be larger than %u", + IAVF_MAX_NUM_QUEUES_LV); + return -1; + } + + ret = iavf_queues_req_reset(dev, num_queue_pairs); + if (ret) + return ret; + + ret = iavf_get_max_rss_queue_region(ad); + if (ret) { + PMD_INIT_LOG(ERR, "get max rss queue region failed"); + return ret; + } + + vf->lv_enabled = true; + } else { + /* Check if large VF is already enabled. If so, disable and + * release redundant queue resource. + */ + if (vf->lv_enabled) { + ret = iavf_queues_req_reset(dev, num_queue_pairs); + if (ret) + return ret; + + vf->lv_enabled = false; + } + /* if large VF is not required, use default rss queue region */ + vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT; + } + /* Vlan stripping setting */ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) { if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) @@ -586,8 +654,8 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs; - dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs; + dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV; + dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV; dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN; dev_info->max_rx_pktlen = IAVF_FRAME_SIZE_MAX; dev_info->max_mtu = dev_info->max_rx_pktlen - IAVF_ETH_OVERHEAD; diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index 323e2a843..2c49a0e76 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -457,7 +457,8 @@ iavf_get_vf_resource(struct iavf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC | VIRTCHNL_VF_OFFLOAD_FDIR_PF | VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF | - VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; + VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | + VIRTCHNL_VF_LARGE_NUM_QPAIRS; args.in_args = (uint8_t *)∩︀ args.in_args_size = sizeof(caps); @@ -1267,3 +1268,31 @@ iavf_request_queues(struct iavf_adapter *adapter, uint16_t num) return -1; } + +int +iavf_get_max_rss_queue_region(struct iavf_adapter *adapter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct iavf_cmd_info args; + uint16_t qregion_width; + int err; + + args.ops = VIRTCHNL_OP_GET_MAX_RSS_QREGION; + args.in_args = NULL; + args.in_args_size = 0; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + err = iavf_execute_vf_cmd(adapter, &args); + if (err) { + PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION"); + return err; + } + + qregion_width = + ((struct virtchnl_max_rss_qregion *)args.out_buffer)->qregion_width; + + vf->max_rss_qregion = (uint16_t)(1 << qregion_width); + + return 0; +} From patchwork Thu Oct 22 06:49:00 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xu, Ting" X-Patchwork-Id: 81726 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 86FE2A04DD; Thu, 22 Oct 2020 08:47:06 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 5901E9AFE; Thu, 22 Oct 2020 08:46:05 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id 753439AFD for ; Thu, 22 Oct 2020 08:46:01 +0200 (CEST) IronPort-SDR: MzZScRENCTRy0Q6CxoRWIgd3mTPt4H4Vnw+pp8q4nZJSeXoy3cf3priUUoefUzCryrSY3C9SI/ JBIJNQ5uqbQA== X-IronPort-AV: E=McAfee;i="6000,8403,9781"; a="185158505" X-IronPort-AV: E=Sophos;i="5.77,403,1596524400"; d="scan'208";a="185158505" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Oct 2020 23:46:01 -0700 IronPort-SDR: /vveDomddpZzsKXkVfie5lqNHyfDg6QF6iJUALAH1sQSXHq/m3ONM93N8DUzmMgJx6sAFEDQ5Q 8DUDj07ZstyQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,403,1596524400"; d="scan'208";a="533829226" Received: from dpdk-xuting-second.sh.intel.com ([10.67.116.154]) by orsmga005.jf.intel.com with ESMTP; 21 Oct 2020 23:45:58 -0700 From: Ting Xu To: dev@dpdk.org Cc: qi.z.zhang@intel.com, beilei.xing@intel.com, jingjing.wu@intel.com, Ting Xu Date: Thu, 22 Oct 2020 14:49:00 +0800 Message-Id: <20201022064902.40143-5-ting.xu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20201022064902.40143-1-ting.xu@intel.com> References: <20200909072028.16726-1-ting.xu@intel.com> <20201022064902.40143-1-ting.xu@intel.com> Subject: [dpdk-dev] [PATCH v8 4/6] net/iavf: enable multiple queues configurations for large VF X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Since the adminq buffer size has a 4K limitation, the current virtchnl command VIRTCHNL_OP_CONFIG_VSI_QUEUES cannot send the message only once to configure up to 256 queues. In this patch, we send the messages multiple times to make sure that the buffer size is less than 4K each time. Signed-off-by: Ting Xu --- drivers/net/iavf/iavf.h | 4 +++- drivers/net/iavf/iavf_ethdev.c | 18 +++++++++++++++++- drivers/net/iavf/iavf_vchnl.c | 11 ++++++----- 3 files changed, 26 insertions(+), 7 deletions(-) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 49ccfeece..1cdac1b10 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -21,6 +21,7 @@ #define IAVF_MAX_NUM_QUEUES_DFLT 16 #define IAVF_MAX_NUM_QUEUES_LV 256 +#define IAVF_CFG_Q_NUM_PER_BUF 32 #define IAVF_NUM_MACADDR_MAX 64 @@ -269,7 +270,8 @@ int iavf_enable_queues(struct iavf_adapter *adapter); int iavf_disable_queues(struct iavf_adapter *adapter); int iavf_configure_rss_lut(struct iavf_adapter *adapter); int iavf_configure_rss_key(struct iavf_adapter *adapter); -int iavf_configure_queues(struct iavf_adapter *adapter); +int iavf_configure_queues(struct iavf_adapter *adapter, + uint16_t num_queue_pairs, uint16_t index); int iavf_get_supported_rxdid(struct iavf_adapter *adapter); int iavf_config_irq_map(struct iavf_adapter *adapter); void iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add); diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index 87082d1cc..db9449590 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -564,6 +564,8 @@ iavf_dev_start(struct rte_eth_dev *dev) IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct rte_intr_handle *intr_handle = dev->intr_handle; + uint16_t num_queue_pairs; + uint16_t index = 0; PMD_INIT_FUNC_TRACE(); @@ -572,13 +574,27 @@ iavf_dev_start(struct rte_eth_dev *dev) vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues); + num_queue_pairs = vf->num_queue_pairs; if (iavf_init_queues(dev) != 0) { PMD_DRV_LOG(ERR, "failed to do Queue init"); return -1; } - if (iavf_configure_queues(adapter) != 0) { + /* If needed, send configure queues msg multiple times to make the + * adminq buffer length smaller than the 4K limitation. + */ + while (num_queue_pairs > IAVF_CFG_Q_NUM_PER_BUF) { + if (iavf_configure_queues(adapter, + IAVF_CFG_Q_NUM_PER_BUF, index) != 0) { + PMD_DRV_LOG(ERR, "configure queues failed"); + goto err_queue; + } + num_queue_pairs -= IAVF_CFG_Q_NUM_PER_BUF; + index += IAVF_CFG_Q_NUM_PER_BUF; + } + + if (iavf_configure_queues(adapter, num_queue_pairs, index) != 0) { PMD_DRV_LOG(ERR, "configure queues failed"); goto err_queue; } diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index 2c49a0e76..7e6abeca9 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -673,7 +673,8 @@ iavf_configure_rss_key(struct iavf_adapter *adapter) } int -iavf_configure_queues(struct iavf_adapter *adapter) +iavf_configure_queues(struct iavf_adapter *adapter, + uint16_t num_queue_pairs, uint16_t index) { struct iavf_rx_queue **rxq = (struct iavf_rx_queue **)adapter->eth_dev->data->rx_queues; @@ -687,16 +688,16 @@ iavf_configure_queues(struct iavf_adapter *adapter) int err; size = sizeof(*vc_config) + - sizeof(vc_config->qpair[0]) * vf->num_queue_pairs; + sizeof(vc_config->qpair[0]) * num_queue_pairs; vc_config = rte_zmalloc("cfg_queue", size, 0); if (!vc_config) return -ENOMEM; vc_config->vsi_id = vf->vsi_res->vsi_id; - vc_config->num_queue_pairs = vf->num_queue_pairs; + vc_config->num_queue_pairs = num_queue_pairs; - for (i = 0, vc_qp = vc_config->qpair; - i < vf->num_queue_pairs; + for (i = index, vc_qp = vc_config->qpair; + i < index + num_queue_pairs; i++, vc_qp++) { vc_qp->txq.vsi_id = vf->vsi_res->vsi_id; vc_qp->txq.queue_id = i; From patchwork Thu Oct 22 06:49:01 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xu, Ting" X-Patchwork-Id: 81727 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 68045A04DD; Thu, 22 Oct 2020 08:47:27 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 4097CA8EF; Thu, 22 Oct 2020 08:46:09 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id 9F6FF9AFD for ; Thu, 22 Oct 2020 08:46:04 +0200 (CEST) IronPort-SDR: X57lKD5mSdFvlXLbpxOz0JeSIXqOkPxUKY12OmPcVSLIWcqRJvdgtfPQel4oUlsI5mFsuREiIe USpGsoMgT0Cg== X-IronPort-AV: E=McAfee;i="6000,8403,9781"; a="185158514" X-IronPort-AV: E=Sophos;i="5.77,403,1596524400"; d="scan'208";a="185158514" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Oct 2020 23:46:04 -0700 IronPort-SDR: LKEnW8BvWu/UudxGEQeTDDJqq6fKS+DSQ20Qe5uMb+bZgmCefSQi1mYG1CfAEcj99OUS4n6TYn 4Jg3YUDH+OYQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,403,1596524400"; d="scan'208";a="533829252" Received: from dpdk-xuting-second.sh.intel.com ([10.67.116.154]) by orsmga005.jf.intel.com with ESMTP; 21 Oct 2020 23:46:01 -0700 From: Ting Xu To: dev@dpdk.org Cc: qi.z.zhang@intel.com, beilei.xing@intel.com, jingjing.wu@intel.com, Ting Xu Date: Thu, 22 Oct 2020 14:49:01 +0800 Message-Id: <20201022064902.40143-6-ting.xu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20201022064902.40143-1-ting.xu@intel.com> References: <20200909072028.16726-1-ting.xu@intel.com> <20201022064902.40143-1-ting.xu@intel.com> Subject: [dpdk-dev] [PATCH v8 5/6] net/iavf: enable IRQ mapping configuration for large VF X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The current IRQ mapping configuration only supports max 16 queues and 16 MSIX vectors. Change the queue vector mapping structure to indicate up to 256 queues. A new opcode is used to handle the case with large number of queues. To avoid adminq buffer size limitation, we support to send the virtchnl message multiple times if needed. Signed-off-by: Ting Xu --- drivers/net/iavf/iavf.h | 12 +++++--- drivers/net/iavf/iavf_ethdev.c | 50 +++++++++++++++++++++++++++++----- drivers/net/iavf/iavf_vchnl.c | 50 +++++++++++++++++++++++++++++++--- 3 files changed, 97 insertions(+), 15 deletions(-) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 1cdac1b10..5e330b215 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -22,6 +22,7 @@ #define IAVF_MAX_NUM_QUEUES_DFLT 16 #define IAVF_MAX_NUM_QUEUES_LV 256 #define IAVF_CFG_Q_NUM_PER_BUF 32 +#define IAVF_IRQ_MAP_NUM_PER_BUF 128 #define IAVF_NUM_MACADDR_MAX 64 @@ -106,8 +107,10 @@ struct iavf_fdir_info { struct iavf_fdir_conf conf; }; -/* TODO: is that correct to assume the max number to be 16 ?*/ -#define IAVF_MAX_MSIX_VECTORS 16 +struct iavf_qv_map { + uint16_t queue_id; + uint16_t vector_id; +}; /* Message type read in admin queue from PF */ enum iavf_aq_result { @@ -152,8 +155,7 @@ struct iavf_info { uint16_t nb_msix; /* number of MSI-X interrupts on Rx */ uint16_t msix_base; /* msix vector base from */ uint16_t max_rss_qregion; /* max RSS queue region supported by PF */ - /* queue bitmask for each vector */ - uint16_t rxq_map[IAVF_MAX_MSIX_VECTORS]; + struct iavf_qv_map *qv_map; /* queue vector mapping */ struct iavf_flow_list flow_list; rte_spinlock_t flow_ops_lock; struct iavf_parser_list rss_parser_list; @@ -274,6 +276,8 @@ int iavf_configure_queues(struct iavf_adapter *adapter, uint16_t num_queue_pairs, uint16_t index); int iavf_get_supported_rxdid(struct iavf_adapter *adapter); int iavf_config_irq_map(struct iavf_adapter *adapter); +int iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num, + uint16_t index); void iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add); int iavf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete); diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index db9449590..92dd14171 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -437,6 +437,7 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); + struct iavf_qv_map *qv_map; uint16_t interval, i; int vec; @@ -457,6 +458,14 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, } } + qv_map = rte_zmalloc("qv_map", + dev->data->nb_rx_queues * sizeof(struct iavf_qv_map), 0); + if (!qv_map) { + PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map", + dev->data->nb_rx_queues); + return -1; + } + if (!dev->data->dev_conf.intr_conf.rxq || !rte_intr_dp_is_en(intr_handle)) { /* Rx interrupt disabled, Map interrupt only for writeback */ @@ -487,16 +496,21 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, } IAVF_WRITE_FLUSH(hw); /* map all queues to the same interrupt */ - for (i = 0; i < dev->data->nb_rx_queues; i++) - vf->rxq_map[vf->msix_base] |= 1 << i; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + qv_map[i].queue_id = i; + qv_map[i].vector_id = vf->msix_base; + } + vf->qv_map = qv_map; } else { if (!rte_intr_allow_others(intr_handle)) { vf->nb_msix = 1; vf->msix_base = IAVF_MISC_VEC_ID; for (i = 0; i < dev->data->nb_rx_queues; i++) { - vf->rxq_map[vf->msix_base] |= 1 << i; + qv_map[i].queue_id = i; + qv_map[i].vector_id = vf->msix_base; intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID; } + vf->qv_map = qv_map; PMD_DRV_LOG(DEBUG, "vector %u are mapping to all Rx queues", vf->msix_base); @@ -509,20 +523,42 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, vf->msix_base = IAVF_RX_VEC_START; vec = IAVF_RX_VEC_START; for (i = 0; i < dev->data->nb_rx_queues; i++) { - vf->rxq_map[vec] |= 1 << i; + qv_map[i].queue_id = i; + qv_map[i].vector_id = vec; intr_handle->intr_vec[i] = vec++; if (vec >= vf->nb_msix) vec = IAVF_RX_VEC_START; } + vf->qv_map = qv_map; PMD_DRV_LOG(DEBUG, "%u vectors are mapping to %u Rx queues", vf->nb_msix, dev->data->nb_rx_queues); } } - if (iavf_config_irq_map(adapter)) { - PMD_DRV_LOG(ERR, "config interrupt mapping failed"); - return -1; + if (!vf->lv_enabled) { + if (iavf_config_irq_map(adapter)) { + PMD_DRV_LOG(ERR, "config interrupt mapping failed"); + return -1; + } + } else { + uint16_t num_qv_maps = dev->data->nb_rx_queues; + uint16_t index = 0; + + while (num_qv_maps > IAVF_IRQ_MAP_NUM_PER_BUF) { + if (iavf_config_irq_map_lv(adapter, + IAVF_IRQ_MAP_NUM_PER_BUF, index)) { + PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed"); + return -1; + } + num_qv_maps -= IAVF_IRQ_MAP_NUM_PER_BUF; + index += IAVF_IRQ_MAP_NUM_PER_BUF; + } + + if (iavf_config_irq_map_lv(adapter, num_qv_maps, index)) { + PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed"); + return -1; + } } return 0; } diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index 7e6abeca9..145a4ff9e 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -775,13 +775,14 @@ iavf_config_irq_map(struct iavf_adapter *adapter) return -ENOMEM; map_info->num_vectors = vf->nb_msix; - for (i = 0; i < vf->nb_msix; i++) { - vecmap = &map_info->vecmap[i]; + for (i = 0; i < adapter->eth_dev->data->nb_rx_queues; i++) { + vecmap = + &map_info->vecmap[vf->qv_map[i].vector_id - vf->msix_base]; vecmap->vsi_id = vf->vsi_res->vsi_id; vecmap->rxitr_idx = IAVF_ITR_INDEX_DEFAULT; - vecmap->vector_id = vf->msix_base + i; + vecmap->vector_id = vf->qv_map[i].vector_id; vecmap->txq_map = 0; - vecmap->rxq_map = vf->rxq_map[vf->msix_base + i]; + vecmap->rxq_map |= 1 << vf->qv_map[i].queue_id; } args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP; @@ -797,6 +798,47 @@ iavf_config_irq_map(struct iavf_adapter *adapter) return err; } +int +iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num, + uint16_t index) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_queue_vector_maps *map_info; + struct virtchnl_queue_vector *qv_maps; + struct iavf_cmd_info args; + int len, i, err; + int count = 0; + + len = sizeof(struct virtchnl_queue_vector_maps) + + sizeof(struct virtchnl_queue_vector) * (num - 1); + + map_info = rte_zmalloc("map_info", len, 0); + if (!map_info) + return -ENOMEM; + + map_info->vport_id = vf->vsi_res->vsi_id; + map_info->num_qv_maps = num; + for (i = index; i < index + map_info->num_qv_maps; i++) { + qv_maps = &map_info->qv_maps[count++]; + qv_maps->itr_idx = VIRTCHNL_ITR_IDX_0; + qv_maps->queue_type = VIRTCHNL_QUEUE_TYPE_RX; + qv_maps->queue_id = vf->qv_map[i].queue_id; + qv_maps->vector_id = vf->qv_map[i].vector_id; + } + + args.ops = VIRTCHNL_OP_MAP_QUEUE_VECTOR; + args.in_args = (u8 *)map_info; + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + err = iavf_execute_vf_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR"); + + rte_free(map_info); + return err; +} + void iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add) { From patchwork Thu Oct 22 06:49:02 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xu, Ting" X-Patchwork-Id: 81728 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 48D6FA04DD; Thu, 22 Oct 2020 08:47:42 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id CC75FA8F7; Thu, 22 Oct 2020 08:46:10 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id A5A547F6C for ; Thu, 22 Oct 2020 08:46:07 +0200 (CEST) IronPort-SDR: ZG/KQPL9vmBfCjuB5lX6SeuX466pNn6P2x2vqgS11nxiBE9KleuJ3droTCuTbhFNUl+/Sy47iP g91RbGeh14mA== X-IronPort-AV: E=McAfee;i="6000,8403,9781"; a="185158518" X-IronPort-AV: E=Sophos;i="5.77,403,1596524400"; d="scan'208";a="185158518" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Oct 2020 23:46:07 -0700 IronPort-SDR: bUq2ql1dh9mJryV+/NNAs+3BeRNVAY4evOjEQOdQLlO2MOsx89X2laZzS1jffDyLO45tgT5Jt2 CY0gftZVQsWQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,403,1596524400"; d="scan'208";a="533829269" Received: from dpdk-xuting-second.sh.intel.com ([10.67.116.154]) by orsmga005.jf.intel.com with ESMTP; 21 Oct 2020 23:46:04 -0700 From: Ting Xu To: dev@dpdk.org Cc: qi.z.zhang@intel.com, beilei.xing@intel.com, jingjing.wu@intel.com, Ting Xu Date: Thu, 22 Oct 2020 14:49:02 +0800 Message-Id: <20201022064902.40143-7-ting.xu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20201022064902.40143-1-ting.xu@intel.com> References: <20200909072028.16726-1-ting.xu@intel.com> <20201022064902.40143-1-ting.xu@intel.com> Subject: [dpdk-dev] [PATCH v8 6/6] net/iavf: add enable/disable queues for large VF X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The current virtchnl structure for enable/disable queues only supports max 32 queue pairs. Use a new opcode and structure to indicate up to 256 queue pairs, in order to enable/disable queues in large VF case. Signed-off-by: Ting Xu --- drivers/net/iavf/iavf.h | 5 ++ drivers/net/iavf/iavf_rxtx.c | 25 ++++++- drivers/net/iavf/iavf_vchnl.c | 132 ++++++++++++++++++++++++++++++++++ 3 files changed, 159 insertions(+), 3 deletions(-) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 5e330b215..3d3b0da5d 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -23,6 +23,7 @@ #define IAVF_MAX_NUM_QUEUES_LV 256 #define IAVF_CFG_Q_NUM_PER_BUF 32 #define IAVF_IRQ_MAP_NUM_PER_BUF 128 +#define IAVF_RXTX_QUEUE_CHUNKS_NUM 2 #define IAVF_NUM_MACADDR_MAX 64 @@ -268,8 +269,12 @@ int iavf_enable_vlan_strip(struct iavf_adapter *adapter); int iavf_disable_vlan_strip(struct iavf_adapter *adapter); int iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid, bool rx, bool on); +int iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid, + bool rx, bool on); int iavf_enable_queues(struct iavf_adapter *adapter); +int iavf_enable_queues_lv(struct iavf_adapter *adapter); int iavf_disable_queues(struct iavf_adapter *adapter); +int iavf_disable_queues_lv(struct iavf_adapter *adapter); int iavf_configure_rss_lut(struct iavf_adapter *adapter); int iavf_configure_rss_key(struct iavf_adapter *adapter); int iavf_configure_queues(struct iavf_adapter *adapter, diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index 7d4f4ed48..6635f7fd9 100644 --- a/drivers/net/iavf/iavf_rxtx.c +++ b/drivers/net/iavf/iavf_rxtx.c @@ -532,6 +532,7 @@ iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct iavf_rx_queue *rxq; int err = 0; @@ -556,7 +557,11 @@ iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) IAVF_WRITE_FLUSH(hw); /* Ready to switch the queue on */ - err = iavf_switch_queue(adapter, rx_queue_id, true, true); + if (!vf->lv_enabled) + err = iavf_switch_queue(adapter, rx_queue_id, true, true); + else + err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true); + if (err) PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", rx_queue_id); @@ -572,6 +577,7 @@ iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) { struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct iavf_tx_queue *txq; int err = 0; @@ -588,7 +594,10 @@ iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) IAVF_WRITE_FLUSH(hw); /* Ready to switch the queue on */ - err = iavf_switch_queue(adapter, tx_queue_id, false, true); + if (!vf->lv_enabled) + err = iavf_switch_queue(adapter, tx_queue_id, false, true); + else + err = iavf_switch_queue_lv(adapter, tx_queue_id, false, true); if (err) PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", @@ -689,12 +698,22 @@ iavf_stop_queues(struct rte_eth_dev *dev) { struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct iavf_rx_queue *rxq; struct iavf_tx_queue *txq; int ret, i; /* Stop All queues */ - ret = iavf_disable_queues(adapter); + if (!vf->lv_enabled) { + ret = iavf_disable_queues(adapter); + if (ret) + PMD_DRV_LOG(WARNING, "Fail to stop queues"); + } else { + ret = iavf_disable_queues_lv(adapter); + if (ret) + PMD_DRV_LOG(WARNING, "Fail to stop queues for large VF"); + } + if (ret) PMD_DRV_LOG(WARNING, "Fail to stop queues"); diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index 145a4ff9e..54d9917c0 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -608,6 +608,138 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid, return err; } +int +iavf_enable_queues_lv(struct iavf_adapter *adapter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_del_ena_dis_queues *queue_select; + struct virtchnl_queue_chunk *queue_chunk; + struct iavf_cmd_info args; + int err, len; + + len = sizeof(struct virtchnl_del_ena_dis_queues) + + sizeof(struct virtchnl_queue_chunk) * + (IAVF_RXTX_QUEUE_CHUNKS_NUM - 1); + queue_select = rte_zmalloc("queue_select", len, 0); + if (!queue_select) + return -ENOMEM; + + queue_chunk = queue_select->chunks.chunks; + queue_select->chunks.num_chunks = IAVF_RXTX_QUEUE_CHUNKS_NUM; + queue_select->vport_id = vf->vsi_res->vsi_id; + + queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX; + queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0; + queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues = + adapter->eth_dev->data->nb_tx_queues; + + queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX; + queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0; + queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues = + adapter->eth_dev->data->nb_rx_queues; + + args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2; + args.in_args = (u8 *)queue_select; + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + err = iavf_execute_vf_cmd(adapter, &args); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_ENABLE_QUEUES_V2"); + return err; + } + return 0; +} + +int +iavf_disable_queues_lv(struct iavf_adapter *adapter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_del_ena_dis_queues *queue_select; + struct virtchnl_queue_chunk *queue_chunk; + struct iavf_cmd_info args; + int err, len; + + len = sizeof(struct virtchnl_del_ena_dis_queues) + + sizeof(struct virtchnl_queue_chunk) * + (IAVF_RXTX_QUEUE_CHUNKS_NUM - 1); + queue_select = rte_zmalloc("queue_select", len, 0); + if (!queue_select) + return -ENOMEM; + + queue_chunk = queue_select->chunks.chunks; + queue_select->chunks.num_chunks = IAVF_RXTX_QUEUE_CHUNKS_NUM; + queue_select->vport_id = vf->vsi_res->vsi_id; + + queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX; + queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0; + queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues = + adapter->eth_dev->data->nb_tx_queues; + + queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX; + queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0; + queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues = + adapter->eth_dev->data->nb_rx_queues; + + args.ops = VIRTCHNL_OP_DISABLE_QUEUES_V2; + args.in_args = (u8 *)queue_select; + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + err = iavf_execute_vf_cmd(adapter, &args); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_DISABLE_QUEUES_V2"); + return err; + } + return 0; +} + +int +iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid, + bool rx, bool on) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_del_ena_dis_queues *queue_select; + struct virtchnl_queue_chunk *queue_chunk; + struct iavf_cmd_info args; + int err, len; + + len = sizeof(struct virtchnl_del_ena_dis_queues); + queue_select = rte_zmalloc("queue_select", len, 0); + if (!queue_select) + return -ENOMEM; + + queue_chunk = queue_select->chunks.chunks; + queue_select->chunks.num_chunks = 1; + queue_select->vport_id = vf->vsi_res->vsi_id; + + if (rx) { + queue_chunk->type = VIRTCHNL_QUEUE_TYPE_RX; + queue_chunk->start_queue_id = qid; + queue_chunk->num_queues = 1; + } else { + queue_chunk->type = VIRTCHNL_QUEUE_TYPE_TX; + queue_chunk->start_queue_id = qid; + queue_chunk->num_queues = 1; + } + + if (on) + args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2; + else + args.ops = VIRTCHNL_OP_DISABLE_QUEUES_V2; + args.in_args = (u8 *)queue_select; + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + err = iavf_execute_vf_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of %s", + on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2"); + return err; +} + int iavf_configure_rss_lut(struct iavf_adapter *adapter) {