From patchwork Fri Aug 20 16:28:28 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ananyev, Konstantin" X-Patchwork-Id: 97167 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 42CA4A0C4D; Fri, 20 Aug 2021 18:29:10 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2D2D44121C; Fri, 20 Aug 2021 18:29:10 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by mails.dpdk.org (Postfix) with ESMTP id C30D7411C3; Fri, 20 Aug 2021 18:29:07 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10082"; a="214960211" X-IronPort-AV: E=Sophos;i="5.84,338,1620716400"; d="scan'208";a="214960211" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 20 Aug 2021 09:29:06 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.84,338,1620716400"; d="scan'208";a="490551769" Received: from sivswdev08.ir.intel.com ([10.237.217.47]) by fmsmga008.fm.intel.com with ESMTP; 20 Aug 2021 09:29:04 -0700 From: Konstantin Ananyev To: dev@dpdk.org Cc: thomas@monjalon.net, ferruh.yigit@intel.com, andrew.rybchenko@oktetlabs.ru, qiming.yang@intel.com, qi.z.zhang@intel.com, beilei.xing@intel.com, techboard@dpdk.org, Konstantin Ananyev Date: Fri, 20 Aug 2021 17:28:28 +0100 Message-Id: <20210820162834.12544-2-konstantin.ananyev@intel.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20210820162834.12544-1-konstantin.ananyev@intel.com> References: <20210820162834.12544-1-konstantin.ananyev@intel.com> Subject: [dpdk-dev] [RFC 1/7] eth: move ethdev 'burst' API into separate structure X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Move public function pointers (rx_pkt_burst(), etc.) from rte_eth_dev into a separate flat array. We can keep it public to still use inline functions for 'fast' calls (like rte_eth_rx_burst(), etc.) to avoid/minimize slowdown. The intention is to make rte_eth_dev and related structures internal. That should allow future possible changes to core eth_dev strcutures to be transaprent to the user and help to avoid ABI/API breakages. Signed-off-by: Konstantin Ananyev --- lib/ethdev/ethdev_private.c | 74 ++++++++++++++++++++++++++++++++++++ lib/ethdev/ethdev_private.h | 3 ++ lib/ethdev/rte_ethdev.c | 12 ++++++ lib/ethdev/rte_ethdev_core.h | 33 ++++++++++++++++ 4 files changed, 122 insertions(+) diff --git a/lib/ethdev/ethdev_private.c b/lib/ethdev/ethdev_private.c index 012cf73ca2..1ab64d24cf 100644 --- a/lib/ethdev/ethdev_private.c +++ b/lib/ethdev/ethdev_private.c @@ -174,3 +174,77 @@ rte_eth_devargs_parse_representor_ports(char *str, void *data) RTE_LOG(ERR, EAL, "wrong representor format: %s\n", str); return str == NULL ? -1 : 0; } + +static uint16_t +dummy_eth_rx_burst(__rte_unused uint16_t port_id, + __rte_unused uint16_t queue_id, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) +{ + RTE_LOG(ERR, EAL, "rx_pkt_burst for unconfigured port %u\n", port_id); + rte_errno = ENOTSUP; + return 0; +} + +static uint16_t +dummy_eth_tx_burst(__rte_unused uint16_t port_id, + __rte_unused uint16_t queue_id, + __rte_unused struct rte_mbuf **tx_pkts, + __rte_unused uint16_t nb_pkts) +{ + RTE_LOG(ERR, EAL, "tx_pkt_burst for unconfigured port %u\n", port_id); + rte_errno = ENOTSUP; + return 0; +} + +static uint16_t +dummy_eth_tx_prepare(__rte_unused uint16_t port_id, + __rte_unused uint16_t queue_id, + __rte_unused struct rte_mbuf **tx_pkts, + __rte_unused uint16_t nb_pkts) +{ + RTE_LOG(ERR, EAL, "tx_pkt_prepare for unconfigured port %u\n", port_id); + rte_errno = ENOTSUP; + return 0; +} + +static int +dummy_eth_rx_queue_count(__rte_unused uint16_t port_id, + __rte_unused uint16_t queue_id) +{ + RTE_LOG(ERR, EAL, "rx_queue_count for unconfigured port %u\n", port_id); + return -ENOTSUP; +} + +static int +dummy_eth_rx_descriptor_status(__rte_unused uint16_t port_id, + __rte_unused uint16_t queue_id, __rte_unused uint16_t offset) +{ + RTE_LOG(ERR, EAL, "rx_descriptor_status for unconfigured port %u\n", + port_id); + return -ENOTSUP; +} + +static int +dummy_eth_tx_descriptor_status(__rte_unused uint16_t port_id, + __rte_unused uint16_t queue_id, __rte_unused uint16_t offset) +{ + RTE_LOG(ERR, EAL, "tx_descriptor_status for unconfigured port %u\n", + port_id); + return -ENOTSUP; +} + +void +rte_eth_dev_burst_api_reset(struct rte_eth_burst_api *rba) +{ + static const struct rte_eth_burst_api dummy = { + .rx_pkt_burst = dummy_eth_rx_burst, + .tx_pkt_burst = dummy_eth_tx_burst, + .tx_pkt_prepare = dummy_eth_tx_prepare, + .rx_queue_count = dummy_eth_rx_queue_count, + .rx_descriptor_status = dummy_eth_rx_descriptor_status, + .tx_descriptor_status = dummy_eth_tx_descriptor_status, + }; + + *rba = dummy; +} diff --git a/lib/ethdev/ethdev_private.h b/lib/ethdev/ethdev_private.h index 9bb0879538..b9b0e6755a 100644 --- a/lib/ethdev/ethdev_private.h +++ b/lib/ethdev/ethdev_private.h @@ -30,6 +30,9 @@ eth_find_device(const struct rte_eth_dev *_start, rte_eth_cmp_t cmp, /* Parse devargs value for representor parameter. */ int rte_eth_devargs_parse_representor_ports(char *str, void *data); +/* reset eth 'burst' API to dummy values */ +void rte_eth_dev_burst_api_reset(struct rte_eth_burst_api *rba); + #ifdef __cplusplus } #endif diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index 9d95cd11e1..949292a617 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -44,6 +44,9 @@ static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; +/* public 'fast/burst' API */ +struct rte_eth_burst_api rte_eth_burst_api[RTE_MAX_ETHPORTS]; + /* spinlock for eth device callbacks */ static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; @@ -1336,6 +1339,7 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, int diag; int ret; uint16_t old_mtu; + struct rte_eth_burst_api rba; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; @@ -1363,6 +1367,9 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, */ dev->data->dev_configured = 0; + rba = rte_eth_burst_api[port_id]; + rte_eth_dev_burst_api_reset(&rte_eth_burst_api[port_id]); + /* Store original config, as rollback required on failure */ memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); @@ -1623,6 +1630,8 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, if (old_mtu != dev->data->mtu) dev->data->mtu = old_mtu; + rte_eth_burst_api[port_id] = rba; + rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); return ret; } @@ -1871,6 +1880,7 @@ rte_eth_dev_close(uint16_t port_id) dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); + rte_eth_dev_burst_api_reset(rte_eth_burst_api + port_id); *lasterr = (*dev->dev_ops->dev_close)(dev); if (*lasterr != 0) lasterr = &binerr; @@ -1892,6 +1902,8 @@ rte_eth_dev_reset(uint16_t port_id) RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); + rte_eth_dev_burst_api_reset(rte_eth_burst_api + port_id); + ret = rte_eth_dev_stop(port_id); if (ret != 0) { RTE_ETHDEV_LOG(ERR, diff --git a/lib/ethdev/rte_ethdev_core.h b/lib/ethdev/rte_ethdev_core.h index edf96de2dc..fb8526cb9f 100644 --- a/lib/ethdev/rte_ethdev_core.h +++ b/lib/ethdev/rte_ethdev_core.h @@ -25,21 +25,31 @@ TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback); struct rte_eth_dev; +typedef uint16_t (*rte_eth_rx_burst_t)(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); + typedef uint16_t (*eth_rx_burst_t)(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); /**< @internal Retrieve input packets from a receive queue of an Ethernet device. */ +typedef uint16_t (*rte_eth_tx_burst_t)(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts); + typedef uint16_t (*eth_tx_burst_t)(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); /**< @internal Send output packets on a transmit queue of an Ethernet device. */ +typedef uint16_t (*rte_eth_tx_prep_t)(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts); + typedef uint16_t (*eth_tx_prep_t)(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); /**< @internal Prepare output packets on a transmit queue of an Ethernet device. */ +typedef int (*rte_eth_rx_queue_count_t)(uint16_t port_id, uint16_t queue_id); typedef uint32_t (*eth_rx_queue_count_t)(struct rte_eth_dev *dev, uint16_t rx_queue_id); @@ -48,12 +58,35 @@ typedef uint32_t (*eth_rx_queue_count_t)(struct rte_eth_dev *dev, typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset); /**< @internal Check DD bit of specific RX descriptor */ +typedef int (*rte_eth_rx_descriptor_status_t)(uint16_t port_id, + uint16_t queue_id, uint16_t offset); + typedef int (*eth_rx_descriptor_status_t)(void *rxq, uint16_t offset); /**< @internal Check the status of a Rx descriptor */ +typedef int (*rte_eth_tx_descriptor_status_t)(uint16_t port_id, + uint16_t queue_id, uint16_t offset); + typedef int (*eth_tx_descriptor_status_t)(void *txq, uint16_t offset); /**< @internal Check the status of a Tx descriptor */ +struct rte_eth_burst_api { + rte_eth_rx_burst_t rx_pkt_burst; + /**< PMD receive function. */ + rte_eth_tx_burst_t tx_pkt_burst; + /**< PMD transmit function. */ + rte_eth_tx_prep_t tx_pkt_prepare; + /**< PMD transmit prepare function. */ + rte_eth_rx_queue_count_t rx_queue_count; + /**< Get the number of used RX descriptors. */ + rte_eth_rx_descriptor_status_t rx_descriptor_status; + /**< Check the status of a Rx descriptor. */ + rte_eth_tx_descriptor_status_t tx_descriptor_status; + /**< Check the status of a Tx descriptor. */ + uintptr_t reserved[2]; +} __rte_cache_min_aligned; + +extern struct rte_eth_burst_api rte_eth_burst_api[RTE_MAX_ETHPORTS]; /** * @internal From patchwork Fri Aug 20 16:28:29 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ananyev, Konstantin" X-Patchwork-Id: 97168 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8659CA0C4D; Fri, 20 Aug 2021 18:29:25 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 6C6D741203; Fri, 20 Aug 2021 18:29:25 +0200 (CEST) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by mails.dpdk.org (Postfix) with ESMTP id 8C03340141; Fri, 20 Aug 2021 18:29:22 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10082"; a="216836581" X-IronPort-AV: E=Sophos;i="5.84,338,1620716400"; d="scan'208";a="216836581" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 20 Aug 2021 09:29:21 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.84,338,1620716400"; d="scan'208";a="490551887" Received: from sivswdev08.ir.intel.com ([10.237.217.47]) by fmsmga008.fm.intel.com with ESMTP; 20 Aug 2021 09:29:18 -0700 From: Konstantin Ananyev To: dev@dpdk.org Cc: thomas@monjalon.net, ferruh.yigit@intel.com, andrew.rybchenko@oktetlabs.ru, qiming.yang@intel.com, qi.z.zhang@intel.com, beilei.xing@intel.com, techboard@dpdk.org, Konstantin Ananyev Date: Fri, 20 Aug 2021 17:28:29 +0100 Message-Id: <20210820162834.12544-3-konstantin.ananyev@intel.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20210820162834.12544-1-konstantin.ananyev@intel.com> References: <20210820162834.12544-1-konstantin.ananyev@intel.com> Subject: [dpdk-dev] [RFC 2/7] eth: make drivers to use new API for Rx X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" ethdev: - make changes so drivers can start using new API for rx_pkt_burst(). - provide helper functions/macros. - remove rx_pkt_burst() from 'struct rte_eth_dev'. drivers/net: - adjust to new rx_burst API. Signed-off-by: Konstantin Ananyev --- app/test/virtual_pmd.c | 15 ++- drivers/net/i40e/i40e_ethdev.c | 2 +- drivers/net/i40e/i40e_ethdev_vf.c | 3 +- drivers/net/i40e/i40e_rxtx.c | 161 ++++++++++++++++------- drivers/net/i40e/i40e_rxtx.h | 36 +++-- drivers/net/i40e/i40e_rxtx_vec_avx2.c | 7 +- drivers/net/i40e/i40e_rxtx_vec_avx512.c | 8 +- drivers/net/i40e/i40e_rxtx_vec_sse.c | 8 +- drivers/net/i40e/i40e_vf_representor.c | 5 +- drivers/net/ice/ice_dcf_ethdev.c | 5 +- drivers/net/ice/ice_dcf_vf_representor.c | 5 +- drivers/net/ice/ice_ethdev.c | 2 +- drivers/net/ice/ice_rxtx.c | 160 +++++++++++++++------- drivers/net/ice/ice_rxtx.h | 44 +++---- drivers/net/ice/ice_rxtx_vec_avx2.c | 16 ++- drivers/net/ice/ice_rxtx_vec_avx512.c | 16 ++- drivers/net/ice/ice_rxtx_vec_sse.c | 8 +- lib/ethdev/ethdev_driver.h | 120 +++++++++++++++++ lib/ethdev/rte_ethdev.c | 23 +++- lib/ethdev/rte_ethdev.h | 39 +----- lib/ethdev/rte_ethdev_core.h | 9 +- lib/ethdev/version.map | 5 + 22 files changed, 483 insertions(+), 214 deletions(-) diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c index 7036f401ed..734ef32c97 100644 --- a/app/test/virtual_pmd.c +++ b/app/test/virtual_pmd.c @@ -348,6 +348,8 @@ virtual_ethdev_rx_burst_success(void *queue __rte_unused, return rx_count; } +static _RTE_ETH_RX_DEF(virtual_ethdev_rx_burst_success) + static uint16_t virtual_ethdev_rx_burst_fail(void *queue __rte_unused, struct rte_mbuf **bufs __rte_unused, @@ -356,6 +358,8 @@ virtual_ethdev_rx_burst_fail(void *queue __rte_unused, return 0; } +static _RTE_ETH_RX_DEF(virtual_ethdev_rx_burst_fail) + static uint16_t virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) @@ -425,12 +429,12 @@ virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs, void virtual_ethdev_rx_burst_fn_set_success(uint16_t port_id, uint8_t success) { - struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; - if (success) - vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success; + rte_eth_set_rx_burst(port_id, + _RTE_ETH_FUNC(virtual_ethdev_rx_burst_success)); else - vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_fail; + rte_eth_set_rx_burst(port_id, + _RTE_ETH_FUNC(virtual_ethdev_rx_burst_fail)); } @@ -599,7 +603,8 @@ virtual_ethdev_create(const char *name, struct rte_ether_addr *mac_addr, pci_dev->device.driver = &pci_drv->driver; eth_dev->device = &pci_dev->device; - eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success; + rte_eth_set_rx_burst(eth_dev->data->port_id, + _RTE_ETH_FUNC(virtual_ethdev_rx_burst_success)); eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success; rte_eth_dev_probing_finish(eth_dev); diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 7b230e2ed1..4753af126d 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -1437,7 +1437,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) dev->rx_descriptor_done = i40e_dev_rx_descriptor_done; dev->rx_descriptor_status = i40e_dev_rx_descriptor_status; dev->tx_descriptor_status = i40e_dev_tx_descriptor_status; - dev->rx_pkt_burst = i40e_recv_pkts; + rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC(i40e_recv_pkts)); dev->tx_pkt_burst = i40e_xmit_pkts; dev->tx_pkt_prepare = i40e_prep_pkts; diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index 0cfe13b7b2..e08e97276a 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -1576,7 +1576,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev) eth_dev->rx_descriptor_done = i40e_dev_rx_descriptor_done; eth_dev->rx_descriptor_status = i40e_dev_rx_descriptor_status; eth_dev->tx_descriptor_status = i40e_dev_tx_descriptor_status; - eth_dev->rx_pkt_burst = &i40e_recv_pkts; + rte_eth_set_rx_burst(eth_dev->data->port_id, + _RTE_ETH_FUNC(i40e_recv_pkts)); eth_dev->tx_pkt_burst = &i40e_xmit_pkts; /* diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 026cda948c..f2d0d35538 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -700,7 +700,9 @@ i40e_recv_pkts_bulk_alloc(void __rte_unused *rx_queue, } #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ -uint16_t +static _RTE_ETH_RX_DEF(i40e_recv_pkts_bulk_alloc) + +static uint16_t i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { struct i40e_rx_queue *rxq; @@ -822,7 +824,9 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) return nb_rx; } -uint16_t +_RTE_ETH_RX_DEF(i40e_recv_pkts) + +static uint16_t i40e_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -1000,6 +1004,8 @@ i40e_recv_scattered_pkts(void *rx_queue, return nb_rx; } +_RTE_ETH_RX_DEF(i40e_recv_scattered_pkts) + /* Check if the context descriptor is needed for TX offloading */ static inline uint16_t i40e_calc_context_desc(uint64_t flags) @@ -1843,19 +1849,21 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev) RTE_PTYPE_UNKNOWN }; - if (dev->rx_pkt_burst == i40e_recv_pkts || + rte_eth_rx_burst_t rx_burst = rte_eth_get_rx_burst(dev->data->port_id); + + if (rx_burst == _RTE_ETH_FUNC(i40e_recv_pkts) || #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC - dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc || + rx_burst == _RTE_ETH_FUNC(i40e_recv_pkts_bulk_alloc) || #endif - dev->rx_pkt_burst == i40e_recv_scattered_pkts || - dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec || - dev->rx_pkt_burst == i40e_recv_pkts_vec || + rx_burst == _RTE_ETH_FUNC(i40e_recv_scattered_pkts) || + rx_burst == _RTE_ETH_FUNC(i40e_recv_scattered_pkts_vec) || + rx_burst == _RTE_ETH_FUNC(i40e_recv_pkts_vec) || #ifdef CC_AVX512_SUPPORT - dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx512 || - dev->rx_pkt_burst == i40e_recv_pkts_vec_avx512 || + rx_burst == _RTE_ETH_FUNC(i40e_recv_scattered_pkts_vec_avx512) || + rx_burst == _RTE_ETH_FUNC(i40e_recv_pkts_vec_avx512) || #endif - dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 || - dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2) + rx_burst == _RTE_ETH_FUNC(i40e_recv_scattered_pkts_vec_avx2) || + rx_burst == _RTE_ETH_FUNC(i40e_recv_pkts_vec_avx2)) return ptypes; return NULL; } @@ -3265,6 +3273,8 @@ i40e_set_rx_function(struct rte_eth_dev *dev) struct i40e_adapter *ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); uint16_t rx_using_sse, i; + rte_eth_rx_burst_t rx_burst; + /* In order to allow Vector Rx there are a few configuration * conditions to be met and Rx Bulk Allocation should be allowed. */ @@ -3309,17 +3319,22 @@ i40e_set_rx_function(struct rte_eth_dev *dev) PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Scattered Rx (port %d).", dev->data->port_id); - dev->rx_pkt_burst = - i40e_recv_scattered_pkts_vec_avx512; + rte_eth_set_rx_burst(dev->data->port_id, + _RTE_ETH_FUNC( + i40e_recv_scattered_pkts_vec_avx512)); + #endif } else { PMD_INIT_LOG(DEBUG, "Using %sVector Scattered Rx (port %d).", ad->rx_use_avx2 ? "avx2 " : "", dev->data->port_id); - dev->rx_pkt_burst = ad->rx_use_avx2 ? - i40e_recv_scattered_pkts_vec_avx2 : - i40e_recv_scattered_pkts_vec; + rte_eth_set_rx_burst(dev->data->port_id, + ad->rx_use_avx2 ? + _RTE_ETH_FUNC( + i40e_recv_scattered_pkts_vec_avx2) : + _RTE_ETH_FUNC( + i40e_recv_scattered_pkts_vec)); } } else { if (ad->rx_use_avx512) { @@ -3327,17 +3342,19 @@ i40e_set_rx_function(struct rte_eth_dev *dev) PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Rx (port %d).", dev->data->port_id); - dev->rx_pkt_burst = - i40e_recv_pkts_vec_avx512; + rte_eth_set_rx_burst(dev->data->port_id, + _RTE_ETH_FUNC( + i40e_recv_pkts_vec_avx512)); #endif } else { PMD_INIT_LOG(DEBUG, "Using %sVector Rx (port %d).", ad->rx_use_avx2 ? "avx2 " : "", dev->data->port_id); - dev->rx_pkt_burst = ad->rx_use_avx2 ? - i40e_recv_pkts_vec_avx2 : - i40e_recv_pkts_vec; + rte_eth_set_rx_burst(dev->data->port_id, + ad->rx_use_avx2 ? + _RTE_ETH_FUNC(i40e_recv_pkts_vec_avx2) : + _RTE_ETH_FUNC(i40e_recv_pkts_vec)); } } #else /* RTE_ARCH_X86 */ @@ -3345,11 +3362,13 @@ i40e_set_rx_function(struct rte_eth_dev *dev) PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx (port %d).", dev->data->port_id); - dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec; + rte_eth_set_rx_burst(dev->data->port_id, + _RTE_ETH_FUNC(i40e_recv_scattered_pkts_vec)); } else { PMD_INIT_LOG(DEBUG, "Using Vector Rx (port %d).", dev->data->port_id); - dev->rx_pkt_burst = i40e_recv_pkts_vec; + rte_eth_set_rx_burst(dev->data->port_id, + _RTE_ETH_FUNC(i40e_recv_pkts_vec)); } #endif /* RTE_ARCH_X86 */ } else if (!dev->data->scattered_rx && ad->rx_bulk_alloc_allowed) { @@ -3358,27 +3377,34 @@ i40e_set_rx_function(struct rte_eth_dev *dev) "will be used on port=%d.", dev->data->port_id); - dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc; + rte_eth_set_rx_burst(dev->data->port_id, + _RTE_ETH_FUNC(i40e_recv_pkts_bulk_alloc)); } else { /* Simple Rx Path. */ PMD_INIT_LOG(DEBUG, "Simple Rx path will be used on port=%d.", dev->data->port_id); - dev->rx_pkt_burst = dev->data->scattered_rx ? - i40e_recv_scattered_pkts : - i40e_recv_pkts; + rte_eth_set_rx_burst(dev->data->port_id, + dev->data->scattered_rx ? + _RTE_ETH_FUNC(i40e_recv_scattered_pkts) : + _RTE_ETH_FUNC(i40e_recv_pkts)); } + rx_burst = rte_eth_get_rx_burst(dev->data->port_id); + /* Propagate information about RX function choice through all queues. */ if (rte_eal_process_type() == RTE_PROC_PRIMARY) { rx_using_sse = - (dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec || - dev->rx_pkt_burst == i40e_recv_pkts_vec || + (rx_burst == _RTE_ETH_FUNC( + i40e_recv_scattered_pkts_vec) || + rx_burst == _RTE_ETH_FUNC(i40e_recv_pkts_vec) || #ifdef CC_AVX512_SUPPORT - dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx512 || - dev->rx_pkt_burst == i40e_recv_pkts_vec_avx512 || + rx_burst == _RTE_ETH_FUNC( + i40e_recv_scattered_pkts_vec_avx512) || + rx_burst == _RTE_ETH_FUNC(i40e_recv_pkts_vec_avx512) || #endif - dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 || - dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2); + rx_burst == _RTE_ETH_FUNC( + i40e_recv_scattered_pkts_vec_avx2) || + rx_burst == _RTE_ETH_FUNC(i40e_recv_pkts_vec_avx2)); for (i = 0; i < dev->data->nb_rx_queues; i++) { struct i40e_rx_queue *rxq = dev->data->rx_queues[i]; @@ -3390,27 +3416,66 @@ i40e_set_rx_function(struct rte_eth_dev *dev) } static const struct { - eth_rx_burst_t pkt_burst; + rte_eth_rx_burst_t pkt_burst; const char *info; } i40e_rx_burst_infos[] = { - { i40e_recv_scattered_pkts, "Scalar Scattered" }, - { i40e_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" }, - { i40e_recv_pkts, "Scalar" }, + { + _RTE_ETH_FUNC(i40e_recv_scattered_pkts), + "Scalar Scattered", + }, + { + _RTE_ETH_FUNC(i40e_recv_pkts_bulk_alloc), + "Scalar Bulk Alloc", + }, + { + _RTE_ETH_FUNC(i40e_recv_pkts), + "Scalar", + }, #ifdef RTE_ARCH_X86 #ifdef CC_AVX512_SUPPORT - { i40e_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" }, - { i40e_recv_pkts_vec_avx512, "Vector AVX512" }, + { + _RTE_ETH_FUNC(i40e_recv_scattered_pkts_vec_avx512), + "Vector AVX512 Scattered" + }, + { + _RTE_ETH_FUNC(i40e_recv_pkts_vec_avx512), + "Vector AVX512", + }, #endif - { i40e_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" }, - { i40e_recv_pkts_vec_avx2, "Vector AVX2" }, - { i40e_recv_scattered_pkts_vec, "Vector SSE Scattered" }, - { i40e_recv_pkts_vec, "Vector SSE" }, + { + _RTE_ETH_FUNC(i40e_recv_scattered_pkts_vec_avx2), + "Vector AVX2 Scattered", + }, + { + _RTE_ETH_FUNC(i40e_recv_pkts_vec_avx2), + "Vector AVX2", + }, + { + _RTE_ETH_FUNC(i40e_recv_scattered_pkts_vec), + "Vector SSE Scattered", + }, + { + _RTE_ETH_FUNC(i40e_recv_pkts_vec), + "Vector SSE", + }, #elif defined(RTE_ARCH_ARM64) - { i40e_recv_scattered_pkts_vec, "Vector Neon Scattered" }, - { i40e_recv_pkts_vec, "Vector Neon" }, + { + _RTE_ETH_FUNC(i40e_recv_scattered_pkts_vec), + "Vector Neon Scattered", + }, + { + _RTE_ETH_FUNC(i40e_recv_pkts_vec), + "Vector Neon", + }, #elif defined(RTE_ARCH_PPC_64) - { i40e_recv_scattered_pkts_vec, "Vector AltiVec Scattered" }, - { i40e_recv_pkts_vec, "Vector AltiVec" }, + { + _RTE_ETH_FUNC(i40e_recv_scattered_pkts_vec), + "Vector AltiVec Scattered", + }, + { + _RTE_ETH_FUNC(i40e_recv_pkts_vec, + "Vector AltiVec", + }, #endif }; @@ -3418,7 +3483,7 @@ int i40e_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, struct rte_eth_burst_mode *mode) { - eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; + rte_eth_rx_burst_t pkt_burst = rte_eth_get_rx_burst(dev->data->port_id); int ret = -EINVAL; unsigned int i; diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index 5ccf5773e8..beeeaae78d 100644 --- a/drivers/net/i40e/i40e_rxtx.h +++ b/drivers/net/i40e/i40e_rxtx.h @@ -199,12 +199,10 @@ int i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, const struct rte_eth_txconf *tx_conf); void i40e_dev_rx_queue_release(void *rxq); void i40e_dev_tx_queue_release(void *txq); -uint16_t i40e_recv_pkts(void *rx_queue, - struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); -uint16_t i40e_recv_scattered_pkts(void *rx_queue, - struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); + +_RTE_ETH_RX_PROTO(i40e_recv_pkts); +_RTE_ETH_RX_PROTO(i40e_recv_scattered_pkts); + uint16_t i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); @@ -231,11 +229,9 @@ int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset); int i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset); int i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset); -uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); -uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue, - struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); +_RTE_ETH_RX_PROTO(i40e_recv_pkts_vec); +_RTE_ETH_RX_PROTO(i40e_recv_scattered_pkts_vec); + int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev); int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq); int i40e_txq_vec_setup(struct i40e_tx_queue *txq); @@ -248,19 +244,17 @@ void i40e_set_tx_function_flag(struct rte_eth_dev *dev, void i40e_set_tx_function(struct rte_eth_dev *dev); void i40e_set_default_ptype_table(struct rte_eth_dev *dev); void i40e_set_default_pctype_table(struct rte_eth_dev *dev); -uint16_t i40e_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); -uint16_t i40e_recv_scattered_pkts_vec_avx2(void *rx_queue, - struct rte_mbuf **rx_pkts, uint16_t nb_pkts); + +_RTE_ETH_RX_PROTO(i40e_recv_pkts_vec_avx2); +_RTE_ETH_RX_PROTO(i40e_recv_scattered_pkts_vec_avx2); + uint16_t i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); int i40e_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc); -uint16_t i40e_recv_pkts_vec_avx512(void *rx_queue, - struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); -uint16_t i40e_recv_scattered_pkts_vec_avx512(void *rx_queue, - struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); + +_RTE_ETH_RX_PROTO(i40e_recv_pkts_vec_avx512); +_RTE_ETH_RX_PROTO(i40e_recv_scattered_pkts_vec_avx512); + uint16_t i40e_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx2.c b/drivers/net/i40e/i40e_rxtx_vec_avx2.c index 3b9eef91a9..5c03d16644 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_avx2.c +++ b/drivers/net/i40e/i40e_rxtx_vec_avx2.c @@ -628,13 +628,15 @@ _recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, * Notice: * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet */ -uint16_t +static inline uint16_t i40e_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { return _recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts, NULL); } +_RTE_ETH_RX_DEF(i40e_recv_pkts_vec_avx2) + /* * vPMD receive routine that reassembles single burst of 32 scattered packets * Notice: @@ -682,7 +684,7 @@ i40e_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, * Notice: * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet */ -uint16_t +static inline uint16_t i40e_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { @@ -699,6 +701,7 @@ i40e_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, rx_pkts + retval, nb_pkts); } +_RTE_ETH_RX_DEF(i40e_recv_scattered_pkts_vec_avx2) static inline void vtx1(volatile struct i40e_tx_desc *txdp, diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx512.c b/drivers/net/i40e/i40e_rxtx_vec_avx512.c index bd21d64223..96ff3d60c3 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_avx512.c +++ b/drivers/net/i40e/i40e_rxtx_vec_avx512.c @@ -802,13 +802,15 @@ _recv_raw_pkts_vec_avx512(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, * Notice: * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet */ -uint16_t +static inline uint16_t i40e_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { return _recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL); } +_RTE_ETH_RX_DEF(i40e_recv_pkts_vec_avx512) + /** * vPMD receive routine that reassembles single burst of 32 scattered packets * Notice: @@ -857,7 +859,7 @@ i40e_recv_scattered_burst_vec_avx512(void *rx_queue, * Notice: * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet */ -uint16_t +static inline uint16_t i40e_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -876,6 +878,8 @@ i40e_recv_scattered_pkts_vec_avx512(void *rx_queue, rx_pkts + retval, nb_pkts); } +_RTE_ETH_RX_DEF(i40e_recv_scattered_pkts_vec_avx512) + static __rte_always_inline int i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq) { diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c index bfa5aff48d..24687984a7 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_sse.c +++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c @@ -598,13 +598,15 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST * numbers of DD bits */ -uint16_t +static inline uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); } +_RTE_ETH_RX_DEF(i40e_recv_pkts_vec) + /** * vPMD receive routine that reassembles single burst of 32 scattered packets * @@ -651,7 +653,7 @@ i40e_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts, /** * vPMD receive routine that reassembles scattered packets. */ -uint16_t +static inline uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { @@ -674,6 +676,8 @@ i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, nb_pkts); } +_RTE_ETH_RX_DEF(i40e_recv_scattered_pkts_vec) + static inline void vtx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags) diff --git a/drivers/net/i40e/i40e_vf_representor.c b/drivers/net/i40e/i40e_vf_representor.c index 0481b55381..9d32a5c85d 100644 --- a/drivers/net/i40e/i40e_vf_representor.c +++ b/drivers/net/i40e/i40e_vf_representor.c @@ -466,6 +466,8 @@ i40e_vf_representor_rx_burst(__rte_unused void *rx_queue, return 0; } +static _RTE_ETH_RX_DEF(i40e_vf_representor_rx_burst) + static uint16_t i40e_vf_representor_tx_burst(__rte_unused void *tx_queue, __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts) @@ -501,7 +503,8 @@ i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params) /* No data-path, but need stub Rx/Tx functions to avoid crash * when testing with the likes of testpmd. */ - ethdev->rx_pkt_burst = i40e_vf_representor_rx_burst; + rte_eth_set_rx_burst(ethdev->data->port_id, + _RTE_ETH_FUNC(i40e_vf_representor_rx_burst)); ethdev->tx_pkt_burst = i40e_vf_representor_tx_burst; vf = &pf->vfs[representor->vf_id]; diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index cab7c4da87..58a4204621 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -40,6 +40,8 @@ ice_dcf_recv_pkts(__rte_unused void *rx_queue, return 0; } +static _RTE_ETH_RX_DEF(ice_dcf_recv_pkts) + static uint16_t ice_dcf_xmit_pkts(__rte_unused void *tx_queue, __rte_unused struct rte_mbuf **bufs, @@ -1039,7 +1041,8 @@ ice_dcf_dev_init(struct rte_eth_dev *eth_dev) struct ice_dcf_adapter *adapter = eth_dev->data->dev_private; eth_dev->dev_ops = &ice_dcf_eth_dev_ops; - eth_dev->rx_pkt_burst = ice_dcf_recv_pkts; + rte_eth_set_rx_burst(eth_dev->data->port_id, + _RTE_ETH_FUNC(ice_dcf_recv_pkts)); eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts; if (rte_eal_process_type() != RTE_PROC_PRIMARY) diff --git a/drivers/net/ice/ice_dcf_vf_representor.c b/drivers/net/ice/ice_dcf_vf_representor.c index 970461f3e9..8136169ebd 100644 --- a/drivers/net/ice/ice_dcf_vf_representor.c +++ b/drivers/net/ice/ice_dcf_vf_representor.c @@ -18,6 +18,8 @@ ice_dcf_vf_repr_rx_burst(__rte_unused void *rxq, return 0; } +static _RTE_ETH_RX_DEF(ice_dcf_vf_repr_rx_burst) + static uint16_t ice_dcf_vf_repr_tx_burst(__rte_unused void *txq, __rte_unused struct rte_mbuf **tx_pkts, @@ -413,7 +415,8 @@ ice_dcf_vf_repr_init(struct rte_eth_dev *vf_rep_eth_dev, void *init_param) vf_rep_eth_dev->dev_ops = &ice_dcf_vf_repr_dev_ops; - vf_rep_eth_dev->rx_pkt_burst = ice_dcf_vf_repr_rx_burst; + rte_eth_set_rx_burst(vf_rep_eth_dev->data->port_id, + _RTE_ETH_FUNC(ice_dcf_vf_repr_rx_burst)); vf_rep_eth_dev->tx_pkt_burst = ice_dcf_vf_repr_tx_burst; vf_rep_eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index a4cd39c954..4d67a2dddf 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -1996,7 +1996,7 @@ ice_dev_init(struct rte_eth_dev *dev) dev->rx_queue_count = ice_rx_queue_count; dev->rx_descriptor_status = ice_rx_descriptor_status; dev->tx_descriptor_status = ice_tx_descriptor_status; - dev->rx_pkt_burst = ice_recv_pkts; + rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC(ice_recv_pkts)); dev->tx_pkt_burst = ice_xmit_pkts; dev->tx_pkt_prepare = ice_prep_pkts; diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index 5d7ab4f047..2cc411d315 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -1749,6 +1749,8 @@ ice_recv_pkts_bulk_alloc(void *rx_queue, return nb_rx; } +static _RTE_ETH_RX_DEF(ice_recv_pkts_bulk_alloc) + static uint16_t ice_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, @@ -1917,12 +1919,15 @@ ice_recv_scattered_pkts(void *rx_queue, return nb_rx; } +static _RTE_ETH_RX_DEF(ice_recv_scattered_pkts) + const uint32_t * ice_dev_supported_ptypes_get(struct rte_eth_dev *dev) { struct ice_adapter *ad = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); const uint32_t *ptypes; + rte_eth_rx_burst_t rx_pkt_burst; static const uint32_t ptypes_os[] = { /* refers to ice_get_default_pkt_type() */ @@ -1988,24 +1993,28 @@ ice_dev_supported_ptypes_get(struct rte_eth_dev *dev) else ptypes = ptypes_os; - if (dev->rx_pkt_burst == ice_recv_pkts || - dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc || - dev->rx_pkt_burst == ice_recv_scattered_pkts) + rx_pkt_burst = rte_eth_get_rx_burst(dev->data->port_id); + + if (rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_pkts) || + rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_pkts_bulk_alloc) || + rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_scattered_pkts)) return ptypes; #ifdef RTE_ARCH_X86 - if (dev->rx_pkt_burst == ice_recv_pkts_vec || - dev->rx_pkt_burst == ice_recv_scattered_pkts_vec || + if (rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_pkts_vec) || + rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_scattered_pkts_vec) || #ifdef CC_AVX512_SUPPORT - dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 || - dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload || - dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 || - dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload || + rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_pkts_vec_avx512) || + rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_pkts_vec_avx512_offload) || + rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_scattered_pkts_vec_avx512) || + rx_pkt_burst == + _RTE_ETH_FUNC(ice_recv_scattered_pkts_vec_avx512_offload) || #endif - dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 || - dev->rx_pkt_burst == ice_recv_pkts_vec_avx2_offload || - dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2 || - dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2_offload) + rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_pkts_vec_avx2) || + rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_pkts_vec_avx2_offload) || + rx_pkt_burst == _RTE_ETH_FUNC(ice_recv_scattered_pkts_vec_avx2) || + rx_pkt_burst == + _RTE_ETH_FUNC(ice_recv_scattered_pkts_vec_avx2_offload)) return ptypes; #endif @@ -2216,7 +2225,7 @@ ice_fdir_setup_rx_resources(struct ice_pf *pf) return ICE_SUCCESS; } -uint16_t +static uint16_t ice_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -2313,6 +2322,8 @@ ice_recv_pkts(void *rx_queue, return nb_rx; } +_RTE_ETH_RX_DEF(ice_recv_pkts) + static inline void ice_parse_tunneling_params(uint64_t ol_flags, union ice_tx_offload tx_offload, @@ -3107,14 +3118,16 @@ ice_set_rx_function(struct rte_eth_dev *dev) PMD_DRV_LOG(NOTICE, "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).", dev->data->port_id); - dev->rx_pkt_burst = - ice_recv_scattered_pkts_vec_avx512_offload; + rte_eth_set_rx_burst(dev->data->port_id, + _RTE_ETH_FUNC( + ice_recv_scattered_pkts_vec_avx512_offload)); } else { PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Scattered Rx (port %d).", dev->data->port_id); - dev->rx_pkt_burst = - ice_recv_scattered_pkts_vec_avx512; + rte_eth_set_rx_burst(dev->data->port_id, + _RTE_ETH_FUNC( + ice_recv_scattered_pkts_vec_avx512)); } #endif } else if (ad->rx_use_avx2) { @@ -3122,20 +3135,23 @@ ice_set_rx_function(struct rte_eth_dev *dev) PMD_DRV_LOG(NOTICE, "Using AVX2 OFFLOAD Vector Scattered Rx (port %d).", dev->data->port_id); - dev->rx_pkt_burst = - ice_recv_scattered_pkts_vec_avx2_offload; + rte_eth_set_rx_burst(dev->data->port_id, + _RTE_ETH_FUNC( + ice_recv_scattered_pkts_vec_avx2_offload)); } else { PMD_DRV_LOG(NOTICE, "Using AVX2 Vector Scattered Rx (port %d).", dev->data->port_id); - dev->rx_pkt_burst = - ice_recv_scattered_pkts_vec_avx2; + rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC( + ice_recv_scattered_pkts_vec_avx2)); } } else { PMD_DRV_LOG(DEBUG, "Using Vector Scattered Rx (port %d).", dev->data->port_id); - dev->rx_pkt_burst = ice_recv_scattered_pkts_vec; + rte_eth_set_rx_burst(dev->data->port_id, + _RTE_ETH_FUNC( + ice_recv_scattered_pkts_vec)); } } else { if (ad->rx_use_avx512) { @@ -3144,14 +3160,15 @@ ice_set_rx_function(struct rte_eth_dev *dev) PMD_DRV_LOG(NOTICE, "Using AVX512 OFFLOAD Vector Rx (port %d).", dev->data->port_id); - dev->rx_pkt_burst = - ice_recv_pkts_vec_avx512_offload; + rte_eth_set_rx_burst(dev->data->port_id, + _RTE_ETH_FUNC( + ice_recv_pkts_vec_avx512_offload)); } else { PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Rx (port %d).", dev->data->port_id); - dev->rx_pkt_burst = - ice_recv_pkts_vec_avx512; + rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC( + ice_recv_pkts_vec_avx512)); } #endif } else if (ad->rx_use_avx2) { @@ -3159,20 +3176,21 @@ ice_set_rx_function(struct rte_eth_dev *dev) PMD_DRV_LOG(NOTICE, "Using AVX2 OFFLOAD Vector Rx (port %d).", dev->data->port_id); - dev->rx_pkt_burst = - ice_recv_pkts_vec_avx2_offload; + rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC( + ice_recv_pkts_vec_avx2_offload)); } else { PMD_DRV_LOG(NOTICE, "Using AVX2 Vector Rx (port %d).", dev->data->port_id); - dev->rx_pkt_burst = - ice_recv_pkts_vec_avx2; + rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC( + ice_recv_pkts_vec_avx2)); } } else { PMD_DRV_LOG(DEBUG, "Using Vector Rx (port %d).", dev->data->port_id); - dev->rx_pkt_burst = ice_recv_pkts_vec; + rte_eth_set_rx_burst(dev->data->port_id, + _RTE_ETH_FUNC(ice_recv_pkts_vec)); } } return; @@ -3185,43 +3203,85 @@ ice_set_rx_function(struct rte_eth_dev *dev) PMD_INIT_LOG(DEBUG, "Using a Scattered function on port %d.", dev->data->port_id); - dev->rx_pkt_burst = ice_recv_scattered_pkts; + rte_eth_set_rx_burst(dev->data->port_id, + _RTE_ETH_FUNC(ice_recv_scattered_pkts)); } else if (ad->rx_bulk_alloc_allowed) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " "satisfied. Rx Burst Bulk Alloc function " "will be used on port %d.", dev->data->port_id); - dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc; + rte_eth_set_rx_burst(dev->data->port_id, + _RTE_ETH_FUNC(ice_recv_pkts_bulk_alloc)); } else { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not " "satisfied, Normal Rx will be used on port %d.", dev->data->port_id); - dev->rx_pkt_burst = ice_recv_pkts; + rte_eth_set_rx_burst(dev->data->port_id, + _RTE_ETH_FUNC(ice_recv_pkts)); } } static const struct { - eth_rx_burst_t pkt_burst; + rte_eth_rx_burst_t pkt_burst; const char *info; } ice_rx_burst_infos[] = { - { ice_recv_scattered_pkts, "Scalar Scattered" }, - { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" }, - { ice_recv_pkts, "Scalar" }, + { + _RTE_ETH_FUNC(ice_recv_scattered_pkts), + "Scalar Scattered", + }, + { + _RTE_ETH_FUNC(ice_recv_pkts_bulk_alloc), + "Scalar Bulk Alloc", + }, + { + _RTE_ETH_FUNC(ice_recv_pkts), + "Scalar", + }, #ifdef RTE_ARCH_X86 #ifdef CC_AVX512_SUPPORT - { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" }, - { ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" }, - { ice_recv_pkts_vec_avx512, "Vector AVX512" }, - { ice_recv_pkts_vec_avx512_offload, "Offload Vector AVX512" }, + { + _RTE_ETH_FUNC(ice_recv_scattered_pkts_vec_avx512), + "Vector AVX512 Scattered", + }, + { + _RTE_ETH_FUNC(ice_recv_scattered_pkts_vec_avx512_offload), + "Offload Vector AVX512 Scattered", + }, + { + _RTE_ETH_FUNC(ice_recv_pkts_vec_avx512), + "Vector AVX512", + }, + { + _RTE_ETH_FUNC(ice_recv_pkts_vec_avx512_offload), + "Offload Vector AVX512", + }, #endif - { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" }, - { ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 Scattered" }, - { ice_recv_pkts_vec_avx2, "Vector AVX2" }, - { ice_recv_pkts_vec_avx2_offload, "Offload Vector AVX2" }, - { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" }, - { ice_recv_pkts_vec, "Vector SSE" }, + { + _RTE_ETH_FUNC(ice_recv_scattered_pkts_vec_avx2), + "Vector AVX2 Scattered", + }, + { + _RTE_ETH_FUNC(ice_recv_scattered_pkts_vec_avx2_offload), + "Offload Vector AVX2 Scattered", + }, + { + _RTE_ETH_FUNC(ice_recv_pkts_vec_avx2), + "Vector AVX2", + }, + { + _RTE_ETH_FUNC(ice_recv_pkts_vec_avx2_offload), + "Offload Vector AVX2", + }, + { + _RTE_ETH_FUNC(ice_recv_scattered_pkts_vec), + "Vector SSE Scattered", + }, + { + _RTE_ETH_FUNC(ice_recv_pkts_vec), + "Vector SSE", + }, #endif }; @@ -3229,7 +3289,7 @@ int ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, struct rte_eth_burst_mode *mode) { - eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; + rte_eth_rx_burst_t pkt_burst = rte_eth_get_rx_burst(dev->data->port_id); int ret = -EINVAL; unsigned int i; diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index b10db0874d..be8d43a591 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -212,8 +212,7 @@ void ice_tx_queue_release(void *txq); void ice_free_queues(struct rte_eth_dev *dev); int ice_fdir_setup_tx_resources(struct ice_pf *pf); int ice_fdir_setup_rx_resources(struct ice_pf *pf); -uint16_t ice_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); +_RTE_ETH_RX_PROTO(ice_recv_pkts); uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); void ice_set_rx_function(struct rte_eth_dev *dev); @@ -242,37 +241,28 @@ int ice_rx_vec_dev_check(struct rte_eth_dev *dev); int ice_tx_vec_dev_check(struct rte_eth_dev *dev); int ice_rxq_vec_setup(struct ice_rx_queue *rxq); int ice_txq_vec_setup(struct ice_tx_queue *txq); -uint16_t ice_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); -uint16_t ice_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); + +_RTE_ETH_RX_PROTO(ice_recv_pkts_vec); +_RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec); + uint16_t ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); -uint16_t ice_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); -uint16_t ice_recv_pkts_vec_avx2_offload(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); -uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue, - struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); -uint16_t ice_recv_scattered_pkts_vec_avx2_offload(void *rx_queue, - struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); + +_RTE_ETH_RX_PROTO(ice_recv_pkts_vec_avx2); +_RTE_ETH_RX_PROTO(ice_recv_pkts_vec_avx2_offload); +_RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec_avx2); +_RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec_avx2_offload); + uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); uint16_t ice_xmit_pkts_vec_avx2_offload(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); -uint16_t ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); -uint16_t ice_recv_pkts_vec_avx512_offload(void *rx_queue, - struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); -uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue, - struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); -uint16_t ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue, - struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); + +_RTE_ETH_RX_PROTO(ice_recv_pkts_vec_avx512); +_RTE_ETH_RX_PROTO(ice_recv_pkts_vec_avx512_offload); +_RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec_avx512); +_RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec_avx512_offload); + uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); uint16_t ice_xmit_pkts_vec_avx512_offload(void *tx_queue, diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c index 9725ac0180..29b9b57f9f 100644 --- a/drivers/net/ice/ice_rxtx_vec_avx2.c +++ b/drivers/net/ice/ice_rxtx_vec_avx2.c @@ -704,7 +704,7 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, * Notice: * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet */ -uint16_t +static inline uint16_t ice_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { @@ -712,7 +712,9 @@ ice_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, nb_pkts, NULL, false); } -uint16_t +_RTE_ETH_RX_DEF(ice_recv_pkts_vec_avx2) + +static inline uint16_t ice_recv_pkts_vec_avx2_offload(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { @@ -720,6 +722,8 @@ ice_recv_pkts_vec_avx2_offload(void *rx_queue, struct rte_mbuf **rx_pkts, nb_pkts, NULL, true); } +_RTE_ETH_RX_DEF(ice_recv_pkts_vec_avx2_offload) + /** * vPMD receive routine that reassembles single burst of 32 scattered packets * Notice: @@ -787,7 +791,7 @@ ice_recv_scattered_pkts_vec_avx2_common(void *rx_queue, rx_pkts + retval, nb_pkts, offload); } -uint16_t +static inline uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -798,7 +802,9 @@ ice_recv_scattered_pkts_vec_avx2(void *rx_queue, false); } -uint16_t +_RTE_ETH_RX_DEF(ice_recv_scattered_pkts_vec_avx2) + +static inline uint16_t ice_recv_scattered_pkts_vec_avx2_offload(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -809,6 +815,8 @@ ice_recv_scattered_pkts_vec_avx2_offload(void *rx_queue, true); } +_RTE_ETH_RX_DEF(ice_recv_scattered_pkts_vec_avx2_offload) + static __rte_always_inline void ice_vtx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags, bool offload) diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c index 5bba9887d2..30c44c8918 100644 --- a/drivers/net/ice/ice_rxtx_vec_avx512.c +++ b/drivers/net/ice/ice_rxtx_vec_avx512.c @@ -819,18 +819,20 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq, * Notice: * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet */ -uint16_t +static inline uint16_t ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL, false); } +_RTE_ETH_RX_DEF(ice_recv_pkts_vec_avx512) + /** * Notice: * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet */ -uint16_t +static inline uint16_t ice_recv_pkts_vec_avx512_offload(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { @@ -838,6 +840,8 @@ ice_recv_pkts_vec_avx512_offload(void *rx_queue, struct rte_mbuf **rx_pkts, nb_pkts, NULL, true); } +_RTE_ETH_RX_DEF(ice_recv_pkts_vec_avx512_offload) + /** * vPMD receive routine that reassembles single burst of 32 scattered packets * Notice: @@ -927,7 +931,7 @@ ice_recv_scattered_burst_vec_avx512_offload(void *rx_queue, * Notice: * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet */ -uint16_t +static inline uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { @@ -945,13 +949,15 @@ ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, rx_pkts + retval, nb_pkts); } +_RTE_ETH_RX_DEF(ice_recv_scattered_pkts_vec_avx512) + /** * vPMD receive routine that reassembles scattered packets. * Main receive routine that can handle arbitrary burst sizes * Notice: * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet */ -uint16_t +static inline uint16_t ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -971,6 +977,8 @@ ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue, rx_pkts + retval, nb_pkts); } +_RTE_ETH_RX_DEF(ice_recv_scattered_pkts_vec_avx512_offload) + static __rte_always_inline int ice_tx_free_bufs_avx512(struct ice_tx_queue *txq) { diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c index 673e44a243..2caf1c6941 100644 --- a/drivers/net/ice/ice_rxtx_vec_sse.c +++ b/drivers/net/ice/ice_rxtx_vec_sse.c @@ -587,13 +587,15 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, * - nb_pkts > ICE_VPMD_RX_BURST, only scan ICE_VPMD_RX_BURST * numbers of DD bits */ -uint16_t +static inline uint16_t ice_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { return _ice_recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); } +_RTE_ETH_RX_DEF(ice_recv_pkts_vec) + /** * vPMD receive routine that reassembles single burst of 32 scattered packets * @@ -639,7 +641,7 @@ ice_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts, /** * vPMD receive routine that reassembles scattered packets. */ -uint16_t +static inline uint16_t ice_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { @@ -662,6 +664,8 @@ ice_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, nb_pkts); } +_RTE_ETH_RX_DEF(ice_recv_scattered_pkts_vec) + static inline void ice_vtx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags) diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h index 40e474aa7e..8b7d1e8840 100644 --- a/lib/ethdev/ethdev_driver.h +++ b/lib/ethdev/ethdev_driver.h @@ -1513,6 +1513,126 @@ struct rte_eth_tunnel_filter_conf { uint16_t queue_id; /**< Queue assigned to if match. */ }; +/** + * @internal + * Helper routine for eth driver rx_burst API. + * Should be called as first thing on entrance to the PMD's rte_eth_rx_bulk + * implementation. + * Does necessary checks and returns pointer to device RX queue. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param queue_id + * The index of the receive queue from which to retrieve input packets. + * + * @return + * Pointer to device RX queue structure on success or NULL otherwise. + */ +__rte_internal +static inline void * +_rte_eth_rx_prolog(uint16_t port_id, uint16_t queue_id) +{ + struct rte_eth_dev *dev; + + dev = &rte_eth_devices[port_id]; + +#ifdef RTE_ETHDEV_DEBUG_RX + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); + + if (queue_id >= dev->data->nb_rx_queues) { + RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); + return NULL; + } +#endif + return dev->data->rx_queues[queue_id]; +} + +/** + * @internal + * Helper routine for eth driver rx_burst API. + * Should be called at exit from PMD's rte_eth_rx_bulk implementation. + * Does necessary post-processing - invokes RX callbacks if any, tracing, etc. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param queue_id + * The index of the receive queue from which to retrieve input packets. + * @param rx_pkts + * The address of an array of pointers to *rte_mbuf* structures that + * have been retrieved from the device. + * @param nb_pkts + * The number of packets that were retrieved from the device. + * @param nb_pkts + * The number of elements in *rx_pkts* array. + * + * @return + * The number of packets effectively supplied to the *rx_pkts* array. + */ +__rte_internal +static inline uint16_t +_rte_eth_rx_epilog(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts) +{ + struct rte_eth_dev *dev; + + dev = &rte_eth_devices[port_id]; + +#ifdef RTE_ETHDEV_RXTX_CALLBACKS + struct rte_eth_rxtx_callback *cb; + + /* __ATOMIC_RELEASE memory order was used when the + * call back was inserted into the list. + * Since there is a clear dependency between loading + * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is + * not required. + */ + cb = __atomic_load_n(&dev->post_rx_burst_cbs[queue_id], + __ATOMIC_RELAXED); + + if (unlikely(cb != NULL)) { + do { + nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx, + nb_pkts, cb->param); + cb = cb->next; + } while (cb != NULL); + } +#endif + + rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx); + return nb_rx; +} + +#define _RTE_ETH_FUNC(fn) _rte_eth_##fn + +/** + * @internal + * Helper macro to create new API wrappers for existing PMD rx_burst functions. + */ +#define _RTE_ETH_RX_PROTO(fn) \ + uint16_t _RTE_ETH_FUNC(fn)(uint16_t port_id, uint16_t queue_id, \ + struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + +/** + * @internal + * Helper macro to create new API wrappers for existing PMD rx_burst functions. + */ +#define _RTE_ETH_RX_DEF(fn) \ +_RTE_ETH_RX_PROTO(fn) \ +{ \ + uint16_t nb_rx; \ + void *rxq = _rte_eth_rx_prolog(port_id, queue_id); \ + if (rxq == NULL) \ + return 0; \ + nb_rx = fn(rxq, rx_pkts, nb_pkts); \ + return _rte_eth_rx_epilog(port_id, queue_id, rx_pkts, nb_rx, nb_pkts); \ +} + +__rte_experimental +rte_eth_rx_burst_t rte_eth_get_rx_burst(uint16_t port_id); + +__rte_experimental +int rte_eth_set_rx_burst(uint16_t port_id, rte_eth_rx_burst_t rxf); + #ifdef __cplusplus } #endif diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index 949292a617..c126626281 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -588,7 +588,6 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) eth_dev->device = NULL; eth_dev->process_private = NULL; eth_dev->intr_handle = NULL; - eth_dev->rx_pkt_burst = NULL; eth_dev->tx_pkt_burst = NULL; eth_dev->tx_pkt_prepare = NULL; eth_dev->rx_queue_count = NULL; @@ -6337,3 +6336,25 @@ RTE_INIT(ethdev_init_telemetry) eth_dev_handle_port_link_status, "Returns the link status for a port. Parameters: int port_id"); } + +__rte_experimental +rte_eth_rx_burst_t +rte_eth_get_rx_burst(uint16_t port_id) +{ + if (port_id >= RTE_DIM(rte_eth_burst_api)) { + rte_errno = EINVAL; + return NULL; + } + return rte_eth_burst_api[port_id].rx_pkt_burst; +} + +__rte_experimental +int +rte_eth_set_rx_burst(uint16_t port_id, rte_eth_rx_burst_t rxf) +{ + if (port_id >= RTE_DIM(rte_eth_burst_api)) + return -EINVAL; + + rte_eth_burst_api[port_id].rx_pkt_burst = rxf; + return 0; +} diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h index d2b27c351f..a155f255ad 100644 --- a/lib/ethdev/rte_ethdev.h +++ b/lib/ethdev/rte_ethdev.h @@ -4981,44 +4981,11 @@ static inline uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts) { - struct rte_eth_dev *dev = &rte_eth_devices[port_id]; - uint16_t nb_rx; - -#ifdef RTE_ETHDEV_DEBUG_RX - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); - RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0); - - if (queue_id >= dev->data->nb_rx_queues) { - RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); + if (port_id >= RTE_MAX_ETHPORTS) return 0; - } -#endif - nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id], - rx_pkts, nb_pkts); - -#ifdef RTE_ETHDEV_RXTX_CALLBACKS - struct rte_eth_rxtx_callback *cb; - - /* __ATOMIC_RELEASE memory order was used when the - * call back was inserted into the list. - * Since there is a clear dependency between loading - * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is - * not required. - */ - cb = __atomic_load_n(&dev->post_rx_burst_cbs[queue_id], - __ATOMIC_RELAXED); - - if (unlikely(cb != NULL)) { - do { - nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx, - nb_pkts, cb->param); - cb = cb->next; - } while (cb != NULL); - } -#endif - rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx); - return nb_rx; + return rte_eth_burst_api[port_id].rx_pkt_burst(port_id, queue_id, + rx_pkts, nb_pkts); } /** diff --git a/lib/ethdev/rte_ethdev_core.h b/lib/ethdev/rte_ethdev_core.h index fb8526cb9f..94ffa071e3 100644 --- a/lib/ethdev/rte_ethdev_core.h +++ b/lib/ethdev/rte_ethdev_core.h @@ -25,12 +25,14 @@ TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback); struct rte_eth_dev; +/* !!! should be removed *** */ +typedef uint16_t (*eth_rx_burst_t)(void *rxq, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + typedef uint16_t (*rte_eth_rx_burst_t)(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); -typedef uint16_t (*eth_rx_burst_t)(void *rxq, - struct rte_mbuf **rx_pkts, - uint16_t nb_pkts); /**< @internal Retrieve input packets from a receive queue of an Ethernet device. */ typedef uint16_t (*rte_eth_tx_burst_t)(uint16_t port_id, uint16_t queue_id, @@ -113,7 +115,6 @@ struct rte_eth_rxtx_callback { * process, while the actual configuration data for the device is shared. */ struct rte_eth_dev { - eth_rx_burst_t rx_pkt_burst; /**< Pointer to PMD receive function. */ eth_tx_burst_t tx_pkt_burst; /**< Pointer to PMD transmit function. */ eth_tx_prep_t tx_pkt_prepare; /**< Pointer to PMD transmit prepare function. */ diff --git a/lib/ethdev/version.map b/lib/ethdev/version.map index 3eece75b72..2698c75940 100644 --- a/lib/ethdev/version.map +++ b/lib/ethdev/version.map @@ -249,6 +249,11 @@ EXPERIMENTAL { rte_mtr_meter_policy_delete; rte_mtr_meter_policy_update; rte_mtr_meter_policy_validate; + + # added in 21.11 + rte_eth_burst_api; + rte_eth_get_rx_burst; + rte_eth_set_rx_burst; }; INTERNAL { From patchwork Fri Aug 20 16:28:30 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ananyev, Konstantin" X-Patchwork-Id: 97169 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 794CBA0C4D; Fri, 20 Aug 2021 18:29:33 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1D9564122B; Fri, 20 Aug 2021 18:29:30 +0200 (CEST) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id 2504F411EB; Fri, 20 Aug 2021 18:29:27 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10082"; a="203946797" X-IronPort-AV: E=Sophos;i="5.84,338,1620716400"; d="scan'208";a="203946797" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 20 Aug 2021 09:29:26 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.84,338,1620716400"; d="scan'208";a="490551904" Received: from sivswdev08.ir.intel.com ([10.237.217.47]) by fmsmga008.fm.intel.com with ESMTP; 20 Aug 2021 09:29:24 -0700 From: Konstantin Ananyev To: dev@dpdk.org Cc: thomas@monjalon.net, ferruh.yigit@intel.com, andrew.rybchenko@oktetlabs.ru, qiming.yang@intel.com, qi.z.zhang@intel.com, beilei.xing@intel.com, techboard@dpdk.org, Konstantin Ananyev Date: Fri, 20 Aug 2021 17:28:30 +0100 Message-Id: <20210820162834.12544-4-konstantin.ananyev@intel.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20210820162834.12544-1-konstantin.ananyev@intel.com> References: <20210820162834.12544-1-konstantin.ananyev@intel.com> Subject: [dpdk-dev] [RFC 3/7] eth: make drivers to use new API for Tx X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" ethdev: - make changes so drivers can start using new API for tx_pkt_burst(). - provide helper functions/macros. - remove tx_pkt_burst() from 'struct rte_eth_dev'. drivers/net: - adjust to new tx_burst API. Signed-off-by: Konstantin Ananyev --- app/test/virtual_pmd.c | 12 ++- drivers/net/i40e/i40e_ethdev.c | 2 +- drivers/net/i40e/i40e_ethdev_vf.c | 3 +- drivers/net/i40e/i40e_rxtx.c | 56 ++++++++------ drivers/net/i40e/i40e_rxtx.h | 16 ++-- drivers/net/i40e/i40e_rxtx_vec_avx2.c | 4 +- drivers/net/i40e/i40e_rxtx_vec_avx512.c | 4 +- drivers/net/i40e/i40e_vf_representor.c | 5 +- drivers/net/ice/ice_dcf_ethdev.c | 5 +- drivers/net/ice/ice_dcf_vf_representor.c | 5 +- drivers/net/ice/ice_ethdev.c | 2 +- drivers/net/ice/ice_rxtx.c | 47 +++++++----- drivers/net/ice/ice_rxtx.h | 20 ++--- drivers/net/ice/ice_rxtx_vec_avx2.c | 8 +- drivers/net/ice/ice_rxtx_vec_avx512.c | 8 +- drivers/net/ice/ice_rxtx_vec_common.h | 7 +- drivers/net/ice/ice_rxtx_vec_sse.c | 4 +- lib/ethdev/ethdev_driver.h | 94 ++++++++++++++++++++++++ lib/ethdev/rte_ethdev.c | 23 +++++- lib/ethdev/rte_ethdev.h | 37 +--------- lib/ethdev/rte_ethdev_core.h | 1 - lib/ethdev/version.map | 2 + 22 files changed, 247 insertions(+), 118 deletions(-) diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c index 734ef32c97..940b2af1ab 100644 --- a/app/test/virtual_pmd.c +++ b/app/test/virtual_pmd.c @@ -390,6 +390,8 @@ virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs, return nb_pkts; } +static _RTE_ETH_TX_DEF(virtual_ethdev_tx_burst_success) + static uint16_t virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) @@ -425,6 +427,7 @@ virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs, return 0; } +static _RTE_ETH_TX_DEF(virtual_ethdev_tx_burst_fail) void virtual_ethdev_rx_burst_fn_set_success(uint16_t port_id, uint8_t success) @@ -447,9 +450,11 @@ virtual_ethdev_tx_burst_fn_set_success(uint16_t port_id, uint8_t success) dev_private = vrtl_eth_dev->data->dev_private; if (success) - vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success; + rte_eth_set_tx_burst(port_id, + _RTE_ETH_FUNC(virtual_ethdev_tx_burst_success)); else - vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_fail; + rte_eth_set_tx_burst(port_id, + _RTE_ETH_FUNC(virtual_ethdev_tx_burst_fail)); dev_private->tx_burst_fail_count = 0; } @@ -605,7 +610,8 @@ virtual_ethdev_create(const char *name, struct rte_ether_addr *mac_addr, rte_eth_set_rx_burst(eth_dev->data->port_id, _RTE_ETH_FUNC(virtual_ethdev_rx_burst_success)); - eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success; + rte_eth_set_tx_burst(eth_dev->data->port_id, + _RTE_ETH_FUNC(virtual_ethdev_tx_burst_success)); rte_eth_dev_probing_finish(eth_dev); diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 4753af126d..9eb9129ae9 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -1438,7 +1438,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) dev->rx_descriptor_status = i40e_dev_rx_descriptor_status; dev->tx_descriptor_status = i40e_dev_tx_descriptor_status; rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC(i40e_recv_pkts)); - dev->tx_pkt_burst = i40e_xmit_pkts; + rte_eth_set_tx_burst(dev->data->port_id, _RTE_ETH_FUNC(i40e_xmit_pkts)); dev->tx_pkt_prepare = i40e_prep_pkts; /* for secondary processes, we don't initialise any further as primary diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index e08e97276a..3755bdb66a 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -1578,7 +1578,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev) eth_dev->tx_descriptor_status = i40e_dev_tx_descriptor_status; rte_eth_set_rx_burst(eth_dev->data->port_id, _RTE_ETH_FUNC(i40e_recv_pkts)); - eth_dev->tx_pkt_burst = &i40e_xmit_pkts; + rte_eth_set_tx_burst(eth_dev->data->port_id, + _RTE_ETH_FUNC(i40e_xmit_pkts)); /* * For secondary processes, we don't initialise any further as primary diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index f2d0d35538..5a400435dd 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -1067,7 +1067,7 @@ i40e_calc_pkt_desc(struct rte_mbuf *tx_pkt) return count; } -uint16_t +static inline uint16_t i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { struct i40e_tx_queue *txq; @@ -1315,6 +1315,8 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) return nb_tx; } +_RTE_ETH_TX_DEF(i40e_xmit_pkts) + static __rte_always_inline int i40e_tx_free_bufs(struct i40e_tx_queue *txq) { @@ -1509,6 +1511,8 @@ i40e_xmit_pkts_simple(void *tx_queue, return nb_tx; } +static _RTE_ETH_TX_DEF(i40e_xmit_pkts_simple) + static uint16_t i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) @@ -1531,6 +1535,8 @@ i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } +static _RTE_ETH_TX_DEF(i40e_xmit_pkts_vec) + /********************************************************************* * * TX simple prep functions @@ -2608,7 +2614,7 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq) void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq) { - struct rte_eth_dev *dev; + rte_eth_tx_burst_t tx_pkt_burst; uint16_t i; if (!txq || !txq->sw_ring) { @@ -2616,14 +2622,14 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq) return; } - dev = &rte_eth_devices[txq->port_id]; + tx_pkt_burst = rte_eth_get_tx_burst(txq->port_id); /** * vPMD tx will not set sw_ring's mbuf to NULL after free, * so need to free remains more carefully. */ #ifdef CC_AVX512_SUPPORT - if (dev->tx_pkt_burst == i40e_xmit_pkts_vec_avx512) { + if (tx_pkt_burst == _RTE_ETH_FUNC(i40e_xmit_pkts_vec_avx512)) { struct i40e_vec_tx_entry *swr = (void *)txq->sw_ring; i = txq->tx_next_dd - txq->tx_rs_thresh + 1; @@ -2641,8 +2647,8 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq) return; } #endif - if (dev->tx_pkt_burst == i40e_xmit_pkts_vec_avx2 || - dev->tx_pkt_burst == i40e_xmit_pkts_vec) { + if (tx_pkt_burst == _RTE_ETH_FUNC(i40e_xmit_pkts_vec_avx2) || + tx_pkt_burst == _RTE_ETH_FUNC(i40e_xmit_pkts_vec)) { i = txq->tx_next_dd - txq->tx_rs_thresh + 1; if (txq->tx_tail < i) { for (; i < txq->nb_tx_desc; i++) { @@ -3564,49 +3570,55 @@ i40e_set_tx_function(struct rte_eth_dev *dev) #ifdef CC_AVX512_SUPPORT PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).", dev->data->port_id); - dev->tx_pkt_burst = i40e_xmit_pkts_vec_avx512; + rte_eth_set_tx_burst(dev->data->port_id, + _RTE_ETH_FUNC( + i40e_xmit_pkts_vec_avx512)); #endif } else { PMD_INIT_LOG(DEBUG, "Using %sVector Tx (port %d).", ad->tx_use_avx2 ? "avx2 " : "", dev->data->port_id); - dev->tx_pkt_burst = ad->tx_use_avx2 ? - i40e_xmit_pkts_vec_avx2 : - i40e_xmit_pkts_vec; + rte_eth_set_tx_burst(dev->data->port_id, + ad->tx_use_avx2 ? + _RTE_ETH_FUNC(i40e_xmit_pkts_vec_avx2) : + _RTE_ETH_FUNC(i40e_xmit_pkts_vec)); } #else /* RTE_ARCH_X86 */ PMD_INIT_LOG(DEBUG, "Using Vector Tx (port %d).", dev->data->port_id); - dev->tx_pkt_burst = i40e_xmit_pkts_vec; + rte_eth_set_tx_burst(dev->data->port_id, + _RTE_ETH_FUNC(i40e_xmit_pkts_vec)); #endif /* RTE_ARCH_X86 */ } else { PMD_INIT_LOG(DEBUG, "Simple tx finally be used."); - dev->tx_pkt_burst = i40e_xmit_pkts_simple; + rte_eth_set_tx_burst(dev->data->port_id, + _RTE_ETH_FUNC(i40e_xmit_pkts_simple)); } dev->tx_pkt_prepare = i40e_simple_prep_pkts; } else { PMD_INIT_LOG(DEBUG, "Xmit tx finally be used."); - dev->tx_pkt_burst = i40e_xmit_pkts; + rte_eth_set_tx_burst(dev->data->port_id, + _RTE_ETH_FUNC(i40e_xmit_pkts)); dev->tx_pkt_prepare = i40e_prep_pkts; } } static const struct { - eth_tx_burst_t pkt_burst; + rte_eth_tx_burst_t pkt_burst; const char *info; } i40e_tx_burst_infos[] = { - { i40e_xmit_pkts_simple, "Scalar Simple" }, - { i40e_xmit_pkts, "Scalar" }, + { _RTE_ETH_FUNC(i40e_xmit_pkts_simple), "Scalar Simple" }, + { _RTE_ETH_FUNC(i40e_xmit_pkts), "Scalar" }, #ifdef RTE_ARCH_X86 #ifdef CC_AVX512_SUPPORT - { i40e_xmit_pkts_vec_avx512, "Vector AVX512" }, + { _RTE_ETH_FUNC(i40e_xmit_pkts_vec_avx512), "Vector AVX512" }, #endif - { i40e_xmit_pkts_vec_avx2, "Vector AVX2" }, - { i40e_xmit_pkts_vec, "Vector SSE" }, + { _RTE_ETH_FUNC(i40e_xmit_pkts_vec_avx2), "Vector AVX2" }, + { _RTE_ETH_FUNC(i40e_xmit_pkts_vec), "Vector SSE" }, #elif defined(RTE_ARCH_ARM64) - { i40e_xmit_pkts_vec, "Vector Neon" }, + { _RTE_ETH_FUNC(i40e_xmit_pkts_vec), "Vector Neon" }, #elif defined(RTE_ARCH_PPC_64) - { i40e_xmit_pkts_vec, "Vector AltiVec" }, + { _RTE_ETH_FUNC(i40e_xmit_pkts_vec), "Vector AltiVec" }, #endif }; @@ -3614,7 +3626,7 @@ int i40e_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, struct rte_eth_burst_mode *mode) { - eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; + rte_eth_tx_burst_t pkt_burst = rte_eth_get_tx_burst(dev->data->port_id); int ret = -EINVAL; unsigned int i; diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index beeeaae78d..c51d5db2f7 100644 --- a/drivers/net/i40e/i40e_rxtx.h +++ b/drivers/net/i40e/i40e_rxtx.h @@ -203,9 +203,7 @@ void i40e_dev_tx_queue_release(void *txq); _RTE_ETH_RX_PROTO(i40e_recv_pkts); _RTE_ETH_RX_PROTO(i40e_recv_scattered_pkts); -uint16_t i40e_xmit_pkts(void *tx_queue, - struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); +_RTE_ETH_TX_PROTO(i40e_xmit_pkts); uint16_t i40e_simple_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); uint16_t i40e_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, @@ -236,8 +234,10 @@ int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev); int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq); int i40e_txq_vec_setup(struct i40e_tx_queue *txq); void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq); + uint16_t i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); + uint16_t nb_pkts); + void i40e_set_rx_function(struct rte_eth_dev *dev); void i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq); @@ -248,16 +248,14 @@ void i40e_set_default_pctype_table(struct rte_eth_dev *dev); _RTE_ETH_RX_PROTO(i40e_recv_pkts_vec_avx2); _RTE_ETH_RX_PROTO(i40e_recv_scattered_pkts_vec_avx2); -uint16_t i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); +_RTE_ETH_TX_PROTO(i40e_xmit_pkts_vec_avx2); + int i40e_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc); _RTE_ETH_RX_PROTO(i40e_recv_pkts_vec_avx512); _RTE_ETH_RX_PROTO(i40e_recv_scattered_pkts_vec_avx512); -uint16_t i40e_xmit_pkts_vec_avx512(void *tx_queue, - struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); +_RTE_ETH_TX_PROTO(i40e_xmit_pkts_vec_avx512); /* For each value it means, datasheet of hardware can tell more details * diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx2.c b/drivers/net/i40e/i40e_rxtx_vec_avx2.c index 5c03d16644..f011088ad7 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_avx2.c +++ b/drivers/net/i40e/i40e_rxtx_vec_avx2.c @@ -824,7 +824,7 @@ i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_pkts; } -uint16_t +static inline uint16_t i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -845,3 +845,5 @@ i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } + +_RTE_ETH_TX_DEF(i40e_xmit_pkts_vec_avx2) diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx512.c b/drivers/net/i40e/i40e_rxtx_vec_avx512.c index 96ff3d60c3..e37dc5a401 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_avx512.c +++ b/drivers/net/i40e/i40e_rxtx_vec_avx512.c @@ -1120,7 +1120,7 @@ i40e_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_pkts; } -uint16_t +static inline uint16_t i40e_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -1141,3 +1141,5 @@ i40e_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } + +_RTE_ETH_TX_DEF(i40e_xmit_pkts_vec_avx512) diff --git a/drivers/net/i40e/i40e_vf_representor.c b/drivers/net/i40e/i40e_vf_representor.c index 9d32a5c85d..f488ef51cd 100644 --- a/drivers/net/i40e/i40e_vf_representor.c +++ b/drivers/net/i40e/i40e_vf_representor.c @@ -475,6 +475,8 @@ i40e_vf_representor_tx_burst(__rte_unused void *tx_queue, return 0; } +static _RTE_ETH_TX_DEF(i40e_vf_representor_tx_burst) + int i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params) { @@ -505,7 +507,8 @@ i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params) */ rte_eth_set_rx_burst(ethdev->data->port_id, _RTE_ETH_FUNC(i40e_vf_representor_rx_burst)); - ethdev->tx_pkt_burst = i40e_vf_representor_tx_burst; + rte_eth_set_tx_burst(ethdev->data->port_id, + _RTE_ETH_FUNC(i40e_vf_representor_tx_burst)); vf = &pf->vfs[representor->vf_id]; diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index 58a4204621..f9a917a13f 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -50,6 +50,8 @@ ice_dcf_xmit_pkts(__rte_unused void *tx_queue, return 0; } +static _RTE_ETH_TX_DEF(ice_dcf_xmit_pkts) + static int ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq) { @@ -1043,7 +1045,8 @@ ice_dcf_dev_init(struct rte_eth_dev *eth_dev) eth_dev->dev_ops = &ice_dcf_eth_dev_ops; rte_eth_set_rx_burst(eth_dev->data->port_id, _RTE_ETH_FUNC(ice_dcf_recv_pkts)); - eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts; + rte_eth_set_tx_burst(eth_dev->data->port_id, + _RTE_ETH_FUNC(ice_dcf_xmit_pkts)); if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; diff --git a/drivers/net/ice/ice_dcf_vf_representor.c b/drivers/net/ice/ice_dcf_vf_representor.c index 8136169ebd..8b46c9614a 100644 --- a/drivers/net/ice/ice_dcf_vf_representor.c +++ b/drivers/net/ice/ice_dcf_vf_representor.c @@ -28,6 +28,8 @@ ice_dcf_vf_repr_tx_burst(__rte_unused void *txq, return 0; } +static _RTE_ETH_TX_DEF(ice_dcf_vf_repr_tx_burst) + static int ice_dcf_vf_repr_dev_configure(struct rte_eth_dev *dev) { @@ -417,7 +419,8 @@ ice_dcf_vf_repr_init(struct rte_eth_dev *vf_rep_eth_dev, void *init_param) rte_eth_set_rx_burst(vf_rep_eth_dev->data->port_id, _RTE_ETH_FUNC(ice_dcf_vf_repr_rx_burst)); - vf_rep_eth_dev->tx_pkt_burst = ice_dcf_vf_repr_tx_burst; + rte_eth_set_tx_burst(vf_rep_eth_dev->data->port_id, + _RTE_ETH_FUNC(ice_dcf_vf_repr_tx_burst)); vf_rep_eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; vf_rep_eth_dev->data->representor_id = repr->vf_id; diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index 4d67a2dddf..9558455f7f 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -1997,7 +1997,7 @@ ice_dev_init(struct rte_eth_dev *dev) dev->rx_descriptor_status = ice_rx_descriptor_status; dev->tx_descriptor_status = ice_tx_descriptor_status; rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC(ice_recv_pkts)); - dev->tx_pkt_burst = ice_xmit_pkts; + rte_eth_set_tx_burst(dev->data->port_id, _RTE_ETH_FUNC(ice_xmit_pkts)); dev->tx_pkt_prepare = ice_prep_pkts; /* for secondary processes, we don't initialise any further as primary diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index 2cc411d315..e97564fdd6 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -2558,7 +2558,7 @@ ice_calc_pkt_desc(struct rte_mbuf *tx_pkt) return count; } -uint16_t +static inline uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { struct ice_tx_queue *txq; @@ -2775,6 +2775,8 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) return nb_tx; } +_RTE_ETH_TX_DEF(ice_xmit_pkts) + static __rte_always_inline int ice_tx_free_bufs(struct ice_tx_queue *txq) { @@ -3064,6 +3066,8 @@ ice_xmit_pkts_simple(void *tx_queue, return nb_tx; } +static _RTE_ETH_TX_DEF(ice_xmit_pkts_simple) + void __rte_cold ice_set_rx_function(struct rte_eth_dev *dev) { @@ -3433,14 +3437,15 @@ ice_set_tx_function(struct rte_eth_dev *dev) PMD_DRV_LOG(NOTICE, "Using AVX512 OFFLOAD Vector Tx (port %d).", dev->data->port_id); - dev->tx_pkt_burst = - ice_xmit_pkts_vec_avx512_offload; + rte_eth_set_tx_burst(dev->data->port_id, + _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx512_offload)); dev->tx_pkt_prepare = ice_prep_pkts; } else { PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).", dev->data->port_id); - dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512; + rte_eth_set_tx_burst(dev->data->port_id, + _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx512)); } #endif } else { @@ -3448,16 +3453,17 @@ ice_set_tx_function(struct rte_eth_dev *dev) PMD_DRV_LOG(NOTICE, "Using AVX2 OFFLOAD Vector Tx (port %d).", dev->data->port_id); - dev->tx_pkt_burst = - ice_xmit_pkts_vec_avx2_offload; + rte_eth_set_tx_burst(dev->data->port_id, + _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx2_offload)); dev->tx_pkt_prepare = ice_prep_pkts; } else { PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).", ad->tx_use_avx2 ? "avx2 " : "", dev->data->port_id); - dev->tx_pkt_burst = ad->tx_use_avx2 ? - ice_xmit_pkts_vec_avx2 : - ice_xmit_pkts_vec; + rte_eth_set_tx_burst(dev->data->port_id, + ad->tx_use_avx2 ? + _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx2) : + _RTE_ETH_FUNC(ice_xmit_pkts_vec)); } } @@ -3467,28 +3473,31 @@ ice_set_tx_function(struct rte_eth_dev *dev) if (ad->tx_simple_allowed) { PMD_INIT_LOG(DEBUG, "Simple tx finally be used."); - dev->tx_pkt_burst = ice_xmit_pkts_simple; + rte_eth_set_tx_burst(dev->data->port_id, + _RTE_ETH_FUNC(ice_xmit_pkts_simple)); dev->tx_pkt_prepare = NULL; } else { PMD_INIT_LOG(DEBUG, "Normal tx finally be used."); - dev->tx_pkt_burst = ice_xmit_pkts; + rte_eth_set_tx_burst(dev->data->port_id, + _RTE_ETH_FUNC(ice_xmit_pkts)); dev->tx_pkt_prepare = ice_prep_pkts; } } static const struct { - eth_tx_burst_t pkt_burst; + rte_eth_tx_burst_t pkt_burst; const char *info; } ice_tx_burst_infos[] = { - { ice_xmit_pkts_simple, "Scalar Simple" }, - { ice_xmit_pkts, "Scalar" }, + { _RTE_ETH_FUNC(ice_xmit_pkts_simple), "Scalar Simple" }, + { _RTE_ETH_FUNC(ice_xmit_pkts), "Scalar" }, #ifdef RTE_ARCH_X86 #ifdef CC_AVX512_SUPPORT - { ice_xmit_pkts_vec_avx512, "Vector AVX512" }, - { ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" }, + { _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx512), "Vector AVX512" }, + { _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx512_offload), + "Offload Vector AVX512" }, #endif - { ice_xmit_pkts_vec_avx2, "Vector AVX2" }, - { ice_xmit_pkts_vec, "Vector SSE" }, + { _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx2), "Vector AVX2" }, + { _RTE_ETH_FUNC(ice_xmit_pkts_vec), "Vector SSE" }, #endif }; @@ -3496,7 +3505,7 @@ int ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, struct rte_eth_burst_mode *mode) { - eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; + rte_eth_tx_burst_t pkt_burst = rte_eth_get_tx_burst(dev->data->port_id); int ret = -EINVAL; unsigned int i; diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index be8d43a591..3c06406204 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -213,8 +213,7 @@ void ice_free_queues(struct rte_eth_dev *dev); int ice_fdir_setup_tx_resources(struct ice_pf *pf); int ice_fdir_setup_rx_resources(struct ice_pf *pf); _RTE_ETH_RX_PROTO(ice_recv_pkts); -uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); +_RTE_ETH_TX_PROTO(ice_xmit_pkts); void ice_set_rx_function(struct rte_eth_dev *dev); uint16_t ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); @@ -245,29 +244,24 @@ int ice_txq_vec_setup(struct ice_tx_queue *txq); _RTE_ETH_RX_PROTO(ice_recv_pkts_vec); _RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec); -uint16_t ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); +_RTE_ETH_TX_PROTO(ice_xmit_pkts_vec); _RTE_ETH_RX_PROTO(ice_recv_pkts_vec_avx2); _RTE_ETH_RX_PROTO(ice_recv_pkts_vec_avx2_offload); _RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec_avx2); _RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec_avx2_offload); -uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); -uint16_t ice_xmit_pkts_vec_avx2_offload(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); +_RTE_ETH_TX_PROTO(ice_xmit_pkts_vec_avx2); +_RTE_ETH_TX_PROTO(ice_xmit_pkts_vec_avx2_offload); _RTE_ETH_RX_PROTO(ice_recv_pkts_vec_avx512); _RTE_ETH_RX_PROTO(ice_recv_pkts_vec_avx512_offload); _RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec_avx512); _RTE_ETH_RX_PROTO(ice_recv_scattered_pkts_vec_avx512_offload); -uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); -uint16_t ice_xmit_pkts_vec_avx512_offload(void *tx_queue, - struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); +_RTE_ETH_TX_PROTO(ice_xmit_pkts_vec_avx512); +_RTE_ETH_TX_PROTO(ice_xmit_pkts_vec_avx512_offload); + int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc); int ice_tx_done_cleanup(void *txq, uint32_t free_cnt); int ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc); diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c index 29b9b57f9f..a15a673767 100644 --- a/drivers/net/ice/ice_rxtx_vec_avx2.c +++ b/drivers/net/ice/ice_rxtx_vec_avx2.c @@ -985,16 +985,20 @@ ice_xmit_pkts_vec_avx2_common(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } -uint16_t +static inline uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { return ice_xmit_pkts_vec_avx2_common(tx_queue, tx_pkts, nb_pkts, false); } -uint16_t +_RTE_ETH_TX_DEF(ice_xmit_pkts_vec_avx2) + +static inline uint16_t ice_xmit_pkts_vec_avx2_offload(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { return ice_xmit_pkts_vec_avx2_common(tx_queue, tx_pkts, nb_pkts, true); } + +_RTE_ETH_TX_DEF(ice_xmit_pkts_vec_avx2_offload) diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c index 30c44c8918..d2fdd64cf8 100644 --- a/drivers/net/ice/ice_rxtx_vec_avx512.c +++ b/drivers/net/ice/ice_rxtx_vec_avx512.c @@ -1235,7 +1235,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_pkts; } -uint16_t +static inline uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -1257,7 +1257,9 @@ ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } -uint16_t +_RTE_ETH_TX_DEF(ice_xmit_pkts_vec_avx512) + +static inline uint16_t ice_xmit_pkts_vec_avx512_offload(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -1279,3 +1281,5 @@ ice_xmit_pkts_vec_avx512_offload(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } + +_RTE_ETH_TX_DEF(ice_xmit_pkts_vec_avx512_offload) diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h index 2d8ef7dc8a..f7604f960b 100644 --- a/drivers/net/ice/ice_rxtx_vec_common.h +++ b/drivers/net/ice/ice_rxtx_vec_common.h @@ -195,10 +195,11 @@ _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq) i = txq->tx_next_dd - txq->tx_rs_thresh + 1; #ifdef CC_AVX512_SUPPORT - struct rte_eth_dev *dev = &rte_eth_devices[txq->vsi->adapter->pf.dev_data->port_id]; + rte_eth_tx_burst_t tx_pkt_burst = + rte_eth_get_tx_burst(txq->vsi->adapter->pf.dev_data->port_id); - if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512 || - dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512_offload) { + if (tx_pkt_burst == _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx512) || + tx_pkt_burst == _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx512_offload)) { struct ice_vec_tx_entry *swr = (void *)txq->sw_ring; if (txq->tx_tail < i) { diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c index 2caf1c6941..344bd11508 100644 --- a/drivers/net/ice/ice_rxtx_vec_sse.c +++ b/drivers/net/ice/ice_rxtx_vec_sse.c @@ -758,7 +758,7 @@ ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_pkts; } -uint16_t +static inline uint16_t ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -779,6 +779,8 @@ ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } +_RTE_ETH_TX_DEF(ice_xmit_pkts_vec) + int __rte_cold ice_rxq_vec_setup(struct ice_rx_queue *rxq) { diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h index 8b7d1e8840..45d1160465 100644 --- a/lib/ethdev/ethdev_driver.h +++ b/lib/ethdev/ethdev_driver.h @@ -1633,6 +1633,100 @@ rte_eth_rx_burst_t rte_eth_get_rx_burst(uint16_t port_id); __rte_experimental int rte_eth_set_rx_burst(uint16_t port_id, rte_eth_rx_burst_t rxf); +/** + * @internal + * Helper routine for eth driver tx_burst API. + * Should be called as first thing on entrance to the PMD's rte_eth_tx_bulk + * implementation. + * Does necessary checks and post-processing - invokes TX callbacks if any, + * tracing, etc. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param queue_id + * The index of the transmit queues. + * @param tx_pkts + * The address of an array of *nb_pkts* pointers to *rte_mbuf* structures + * which contain the output packets. + * @param nb_pkts + * The pointer to the maximum number of packets to transmit. + * + * @return + * Pointer to device TX queue structure on success or NULL otherwise. + */ +__rte_internal +static inline void * +_rte_eth_tx_prolog(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **tx_pkts, uint16_t *nb_pkts) +{ + uint16_t n; + struct rte_eth_dev *dev; + + n = *nb_pkts; + dev = &rte_eth_devices[port_id]; + +#ifdef RTE_ETHDEV_DEBUG_TX + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); + + if (queue_id >= dev->data->nb_tx_queues) { + RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); + return NULL; + } +#endif + +#ifdef RTE_ETHDEV_RXTX_CALLBACKS + struct rte_eth_rxtx_callback *cb; + + /* __ATOMIC_RELEASE memory order was used when the + * call back was inserted into the list. + * Since there is a clear dependency between loading + * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is + * not required. + */ + cb = __atomic_load_n(&dev->pre_tx_burst_cbs[queue_id], + __ATOMIC_RELAXED); + + if (unlikely(cb != NULL)) { + do { + n = cb->fn.tx(port_id, queue_id, tx_pkts, n, cb->param); + cb = cb->next; + } while (cb != NULL); + } + + *nb_pkts = n; +#endif + + rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, n); + return dev->data->tx_queues[queue_id]; +} + +/** + * @internal + * Helper macro to create new API wrappers for existing PMD tx_burst functions. + */ +#define _RTE_ETH_TX_PROTO(fn) \ + uint16_t _RTE_ETH_FUNC(fn)(uint16_t port_id, uint16_t queue_id, \ + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + +/** + * @internal + * Helper macro to create new API wrappers for existing PMD tx_burst functions. + */ +#define _RTE_ETH_TX_DEF(fn) \ +_RTE_ETH_TX_PROTO(fn) \ +{ \ + void *txq = _rte_eth_tx_prolog(port_id, queue_id, tx_pkts, &nb_pkts); \ + if (txq == NULL) \ + return 0; \ + return fn(txq, tx_pkts, nb_pkts); \ +} + +__rte_experimental +rte_eth_tx_burst_t rte_eth_get_tx_burst(uint16_t port_id); + +__rte_experimental +int rte_eth_set_tx_burst(uint16_t port_id, rte_eth_tx_burst_t txf); + #ifdef __cplusplus } #endif diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index c126626281..1165e0bb32 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -588,7 +588,6 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) eth_dev->device = NULL; eth_dev->process_private = NULL; eth_dev->intr_handle = NULL; - eth_dev->tx_pkt_burst = NULL; eth_dev->tx_pkt_prepare = NULL; eth_dev->rx_queue_count = NULL; eth_dev->rx_descriptor_done = NULL; @@ -6358,3 +6357,25 @@ rte_eth_set_rx_burst(uint16_t port_id, rte_eth_rx_burst_t rxf) rte_eth_burst_api[port_id].rx_pkt_burst = rxf; return 0; } + +__rte_experimental +rte_eth_tx_burst_t +rte_eth_get_tx_burst(uint16_t port_id) +{ + if (port_id >= RTE_DIM(rte_eth_burst_api)) { + rte_errno = EINVAL; + return NULL; + } + return rte_eth_burst_api[port_id].tx_pkt_burst; +} + +__rte_experimental +int +rte_eth_set_tx_burst(uint16_t port_id, rte_eth_tx_burst_t txf) +{ + if (port_id >= RTE_DIM(rte_eth_burst_api)) + return -EINVAL; + + rte_eth_burst_api[port_id].tx_pkt_burst = txf; + return 0; +} diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h index a155f255ad..3eac61a289 100644 --- a/lib/ethdev/rte_ethdev.h +++ b/lib/ethdev/rte_ethdev.h @@ -5226,42 +5226,11 @@ static inline uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - struct rte_eth_dev *dev = &rte_eth_devices[port_id]; - -#ifdef RTE_ETHDEV_DEBUG_TX - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); - RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0); - - if (queue_id >= dev->data->nb_tx_queues) { - RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); + if (port_id >= RTE_MAX_ETHPORTS) return 0; - } -#endif - -#ifdef RTE_ETHDEV_RXTX_CALLBACKS - struct rte_eth_rxtx_callback *cb; - /* __ATOMIC_RELEASE memory order was used when the - * call back was inserted into the list. - * Since there is a clear dependency between loading - * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is - * not required. - */ - cb = __atomic_load_n(&dev->pre_tx_burst_cbs[queue_id], - __ATOMIC_RELAXED); - - if (unlikely(cb != NULL)) { - do { - nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts, - cb->param); - cb = cb->next; - } while (cb != NULL); - } -#endif - - rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, - nb_pkts); - return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts); + return rte_eth_burst_api[port_id].tx_pkt_burst(port_id, queue_id, + tx_pkts, nb_pkts); } /** diff --git a/lib/ethdev/rte_ethdev_core.h b/lib/ethdev/rte_ethdev_core.h index 94ffa071e3..ace77db1b6 100644 --- a/lib/ethdev/rte_ethdev_core.h +++ b/lib/ethdev/rte_ethdev_core.h @@ -115,7 +115,6 @@ struct rte_eth_rxtx_callback { * process, while the actual configuration data for the device is shared. */ struct rte_eth_dev { - eth_tx_burst_t tx_pkt_burst; /**< Pointer to PMD transmit function. */ eth_tx_prep_t tx_pkt_prepare; /**< Pointer to PMD transmit prepare function. */ eth_rx_queue_count_t rx_queue_count; /**< Get the number of used RX descriptors. */ diff --git a/lib/ethdev/version.map b/lib/ethdev/version.map index 2698c75940..8f8a6b4a5a 100644 --- a/lib/ethdev/version.map +++ b/lib/ethdev/version.map @@ -253,7 +253,9 @@ EXPERIMENTAL { # added in 21.11 rte_eth_burst_api; rte_eth_get_rx_burst; + rte_eth_get_tx_burst; rte_eth_set_rx_burst; + rte_eth_set_tx_burst; }; INTERNAL { From patchwork Fri Aug 20 16:28:31 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ananyev, Konstantin" X-Patchwork-Id: 97170 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id C82C1A0C4D; Fri, 20 Aug 2021 18:29:42 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id ABAAA41246; Fri, 20 Aug 2021 18:29:34 +0200 (CEST) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id 7D10641246; Fri, 20 Aug 2021 18:29:32 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10082"; a="203946808" X-IronPort-AV: E=Sophos;i="5.84,338,1620716400"; d="scan'208";a="203946808" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 20 Aug 2021 09:29:31 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.84,338,1620716400"; d="scan'208";a="490551946" Received: from sivswdev08.ir.intel.com ([10.237.217.47]) by fmsmga008.fm.intel.com with ESMTP; 20 Aug 2021 09:29:29 -0700 From: Konstantin Ananyev To: dev@dpdk.org Cc: thomas@monjalon.net, ferruh.yigit@intel.com, andrew.rybchenko@oktetlabs.ru, qiming.yang@intel.com, qi.z.zhang@intel.com, beilei.xing@intel.com, techboard@dpdk.org, Konstantin Ananyev Date: Fri, 20 Aug 2021 17:28:31 +0100 Message-Id: <20210820162834.12544-5-konstantin.ananyev@intel.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20210820162834.12544-1-konstantin.ananyev@intel.com> References: <20210820162834.12544-1-konstantin.ananyev@intel.com> Subject: [dpdk-dev] [RFC 4/7] eth: make drivers to use new API for Tx prepare X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" ethdev: - make changes so drivers can start using new API for tx_pkt_prepare(). - provide helper functions/macros. - remove tx_pkt_prepare() from 'struct rte_eth_dev'. drivers/net: - adjust to new tx_prepare API. Signed-off-by: Konstantin Ananyev --- drivers/net/i40e/i40e_ethdev.c | 2 +- drivers/net/i40e/i40e_rxtx.c | 14 +++++--- drivers/net/i40e/i40e_rxtx.h | 7 ++-- drivers/net/ice/ice_ethdev.c | 2 +- drivers/net/ice/ice_rxtx.c | 17 ++++++---- drivers/net/ice/ice_rxtx.h | 3 +- lib/ethdev/ethdev_driver.h | 62 ++++++++++++++++++++++++++++++++++ lib/ethdev/rte_ethdev.c | 23 ++++++++++++- lib/ethdev/rte_ethdev.h | 23 ++----------- lib/ethdev/rte_ethdev_core.h | 2 -- lib/ethdev/version.map | 2 ++ 11 files changed, 116 insertions(+), 41 deletions(-) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 9eb9129ae9..bd6408da90 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -1439,7 +1439,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) dev->tx_descriptor_status = i40e_dev_tx_descriptor_status; rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC(i40e_recv_pkts)); rte_eth_set_tx_burst(dev->data->port_id, _RTE_ETH_FUNC(i40e_xmit_pkts)); - dev->tx_pkt_prepare = i40e_prep_pkts; + rte_eth_set_tx_prep(dev->data->port_id, _RTE_ETH_FUNC(i40e_prep_pkts)); /* for secondary processes, we don't initialise any further as primary * has already done this work. Only check we don't need a different diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 5a400435dd..44c4d33879 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -1542,7 +1542,7 @@ static _RTE_ETH_TX_DEF(i40e_xmit_pkts_vec) * TX simple prep functions * **********************************************************************/ -uint16_t +static uint16_t i40e_simple_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -1574,12 +1574,14 @@ i40e_simple_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, return i; } +_RTE_ETH_TX_PREP_DEF(i40e_simple_prep_pkts) + /********************************************************************* * * TX prep functions * **********************************************************************/ -uint16_t +static uint16_t i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -1636,6 +1638,8 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, return i; } +_RTE_ETH_TX_PREP_DEF(i40e_prep_pkts) + /* * Find the VSI the queue belongs to. 'queue_idx' is the queue index * application used, which assume having sequential ones. But from driver's @@ -3594,12 +3598,14 @@ i40e_set_tx_function(struct rte_eth_dev *dev) rte_eth_set_tx_burst(dev->data->port_id, _RTE_ETH_FUNC(i40e_xmit_pkts_simple)); } - dev->tx_pkt_prepare = i40e_simple_prep_pkts; + rte_eth_set_tx_prep(dev->data->port_id, + _RTE_ETH_FUNC(i40e_simple_prep_pkts)); } else { PMD_INIT_LOG(DEBUG, "Xmit tx finally be used."); rte_eth_set_tx_burst(dev->data->port_id, _RTE_ETH_FUNC(i40e_xmit_pkts)); - dev->tx_pkt_prepare = i40e_prep_pkts; + rte_eth_set_tx_prep(dev->data->port_id, + _RTE_ETH_FUNC(i40e_prep_pkts)); } } diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index c51d5db2f7..85bc29b23a 100644 --- a/drivers/net/i40e/i40e_rxtx.h +++ b/drivers/net/i40e/i40e_rxtx.h @@ -204,10 +204,9 @@ _RTE_ETH_RX_PROTO(i40e_recv_pkts); _RTE_ETH_RX_PROTO(i40e_recv_scattered_pkts); _RTE_ETH_TX_PROTO(i40e_xmit_pkts); -uint16_t i40e_simple_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); -uint16_t i40e_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); +_RTE_ETH_TX_PROTO(i40e_simple_prep_pkts); +_RTE_ETH_TX_PROTO(i40e_prep_pkts); + int i40e_tx_queue_init(struct i40e_tx_queue *txq); int i40e_rx_queue_init(struct i40e_rx_queue *rxq); void i40e_free_tx_resources(struct i40e_tx_queue *txq); diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index 9558455f7f..42b6f5928d 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -1998,7 +1998,7 @@ ice_dev_init(struct rte_eth_dev *dev) dev->tx_descriptor_status = ice_tx_descriptor_status; rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC(ice_recv_pkts)); rte_eth_set_tx_burst(dev->data->port_id, _RTE_ETH_FUNC(ice_xmit_pkts)); - dev->tx_pkt_prepare = ice_prep_pkts; + rte_eth_set_tx_prep(dev->data->port_id, _RTE_ETH_FUNC(ice_prep_pkts)); /* for secondary processes, we don't initialise any further as primary * has already done this work. diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index e97564fdd6..2ddcbbb721 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -3339,7 +3339,7 @@ ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq) #define ICE_MIN_TSO_MSS 64 #define ICE_MAX_TSO_MSS 9728 #define ICE_MAX_TSO_FRAME_SIZE 262144 -uint16_t +static uint16_t ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -3378,6 +3378,8 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, return i; } +_RTE_ETH_TX_PREP_DEF(ice_prep_pkts) + void __rte_cold ice_set_tx_function(struct rte_eth_dev *dev) { @@ -3430,7 +3432,7 @@ ice_set_tx_function(struct rte_eth_dev *dev) } if (ad->tx_vec_allowed) { - dev->tx_pkt_prepare = NULL; + rte_eth_set_tx_prep(dev->data->port_id, NULL); if (ad->tx_use_avx512) { #ifdef CC_AVX512_SUPPORT if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) { @@ -3439,7 +3441,8 @@ ice_set_tx_function(struct rte_eth_dev *dev) dev->data->port_id); rte_eth_set_tx_burst(dev->data->port_id, _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx512_offload)); - dev->tx_pkt_prepare = ice_prep_pkts; + rte_eth_set_tx_prep(dev->data->port_id, + _RTE_ETH_FUNC(ice_prep_pkts)); } else { PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).", @@ -3455,7 +3458,8 @@ ice_set_tx_function(struct rte_eth_dev *dev) dev->data->port_id); rte_eth_set_tx_burst(dev->data->port_id, _RTE_ETH_FUNC(ice_xmit_pkts_vec_avx2_offload)); - dev->tx_pkt_prepare = ice_prep_pkts; + rte_eth_set_tx_prep(dev->data->port_id, + _RTE_ETH_FUNC(ice_prep_pkts)); } else { PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).", ad->tx_use_avx2 ? "avx2 " : "", @@ -3475,12 +3479,13 @@ ice_set_tx_function(struct rte_eth_dev *dev) PMD_INIT_LOG(DEBUG, "Simple tx finally be used."); rte_eth_set_tx_burst(dev->data->port_id, _RTE_ETH_FUNC(ice_xmit_pkts_simple)); - dev->tx_pkt_prepare = NULL; + rte_eth_set_tx_prep(dev->data->port_id, NULL); } else { PMD_INIT_LOG(DEBUG, "Normal tx finally be used."); rte_eth_set_tx_burst(dev->data->port_id, _RTE_ETH_FUNC(ice_xmit_pkts)); - dev->tx_pkt_prepare = ice_prep_pkts; + rte_eth_set_tx_prep(dev->data->port_id, + _RTE_ETH_FUNC(ice_prep_pkts)); } } diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index 3c06406204..53f6080cc9 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -215,8 +215,7 @@ int ice_fdir_setup_rx_resources(struct ice_pf *pf); _RTE_ETH_RX_PROTO(ice_recv_pkts); _RTE_ETH_TX_PROTO(ice_xmit_pkts); void ice_set_rx_function(struct rte_eth_dev *dev); -uint16_t ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); +_RTE_ETH_TX_PROTO(ice_prep_pkts); void ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq); void ice_set_tx_function(struct rte_eth_dev *dev); diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h index 45d1160465..fe1b4fc349 100644 --- a/lib/ethdev/ethdev_driver.h +++ b/lib/ethdev/ethdev_driver.h @@ -1727,6 +1727,68 @@ rte_eth_tx_burst_t rte_eth_get_tx_burst(uint16_t port_id); __rte_experimental int rte_eth_set_tx_burst(uint16_t port_id, rte_eth_tx_burst_t txf); +/** + * @internal + * Helper routine for eth driver tx_prepare API. + * Should be called as first thing on entrance to the PMD's rte_eth_tx_prepare + * implementation. + * Does necessary checks and returns pointer to TX queue data structure. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param queue_id + * The index of the transmit queues. + * + * @return + * Pointer to device TX queue structure on success or NULL otherwise. + */ +__rte_internal +static inline void * +_rte_eth_tx_prep_prolog(uint16_t port_id, uint16_t queue_id) +{ + struct rte_eth_dev *dev; + +#ifdef RTE_ETHDEV_DEBUG_TX + if (!rte_eth_dev_is_valid_port(port_id)) { + RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id); + rte_errno = ENODEV; + return NULL; + } +#endif + + dev = &rte_eth_devices[port_id]; + +#ifdef RTE_ETHDEV_DEBUG_TX + if (queue_id >= dev->data->nb_tx_queues) { + RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); + rte_errno = EINVAL; + return NULL; + } +#endif + + return dev->data->tx_queues[queue_id]; +} + +/** + * @internal + * Helper macro to create new API wrappers for existing PMD tx_prepare + * functions. + */ +#define _RTE_ETH_TX_PREP_DEF(fn) \ +_RTE_ETH_TX_PROTO(fn) \ +{ \ + void *txq = _rte_eth_tx_prep_prolog(port_id, queue_id); \ + if (txq == NULL) \ + return 0; \ + return fn(txq, tx_pkts, nb_pkts); \ +} + +__rte_experimental +rte_eth_tx_prep_t rte_eth_get_tx_prep(uint16_t port_id); + +__rte_experimental +int rte_eth_set_tx_prep(uint16_t port_id, rte_eth_tx_prep_t txf); + #ifdef __cplusplus } #endif diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index 1165e0bb32..6b1d9c5f83 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -588,7 +588,6 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) eth_dev->device = NULL; eth_dev->process_private = NULL; eth_dev->intr_handle = NULL; - eth_dev->tx_pkt_prepare = NULL; eth_dev->rx_queue_count = NULL; eth_dev->rx_descriptor_done = NULL; eth_dev->rx_descriptor_status = NULL; @@ -6379,3 +6378,25 @@ rte_eth_set_tx_burst(uint16_t port_id, rte_eth_tx_burst_t txf) rte_eth_burst_api[port_id].tx_pkt_burst = txf; return 0; } + +__rte_experimental +rte_eth_tx_prep_t +rte_eth_get_tx_prep(uint16_t port_id) +{ + if (port_id >= RTE_DIM(rte_eth_burst_api)) { + rte_errno = EINVAL; + return NULL; + } + return rte_eth_burst_api[port_id].tx_pkt_prepare; +} + +__rte_experimental +int +rte_eth_set_tx_prep(uint16_t port_id, rte_eth_tx_prep_t tpf) +{ + if (port_id >= RTE_DIM(rte_eth_burst_api)) + return -EINVAL; + + rte_eth_burst_api[port_id].tx_pkt_prepare = tpf; + return 0; +} diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h index 3eac61a289..01fd1c99c3 100644 --- a/lib/ethdev/rte_ethdev.h +++ b/lib/ethdev/rte_ethdev.h @@ -5293,30 +5293,13 @@ static inline uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - struct rte_eth_dev *dev; - -#ifdef RTE_ETHDEV_DEBUG_TX - if (!rte_eth_dev_is_valid_port(port_id)) { - RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id); - rte_errno = ENODEV; - return 0; - } -#endif - - dev = &rte_eth_devices[port_id]; - -#ifdef RTE_ETHDEV_DEBUG_TX - if (queue_id >= dev->data->nb_tx_queues) { - RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); - rte_errno = EINVAL; + if (port_id >= RTE_MAX_ETHPORTS) return 0; - } -#endif - if (!dev->tx_pkt_prepare) + if (rte_eth_burst_api[port_id].tx_pkt_prepare == NULL) return nb_pkts; - return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id], + return rte_eth_burst_api[port_id].tx_pkt_prepare(port_id, queue_id, tx_pkts, nb_pkts); } diff --git a/lib/ethdev/rte_ethdev_core.h b/lib/ethdev/rte_ethdev_core.h index ace77db1b6..2d4600af4d 100644 --- a/lib/ethdev/rte_ethdev_core.h +++ b/lib/ethdev/rte_ethdev_core.h @@ -115,8 +115,6 @@ struct rte_eth_rxtx_callback { * process, while the actual configuration data for the device is shared. */ struct rte_eth_dev { - eth_tx_prep_t tx_pkt_prepare; /**< Pointer to PMD transmit prepare function. */ - eth_rx_queue_count_t rx_queue_count; /**< Get the number of used RX descriptors. */ eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit. */ eth_rx_descriptor_status_t rx_descriptor_status; /**< Check the status of a Rx descriptor. */ diff --git a/lib/ethdev/version.map b/lib/ethdev/version.map index 8f8a6b4a5a..b26fd478aa 100644 --- a/lib/ethdev/version.map +++ b/lib/ethdev/version.map @@ -254,8 +254,10 @@ EXPERIMENTAL { rte_eth_burst_api; rte_eth_get_rx_burst; rte_eth_get_tx_burst; + rte_eth_get_tx_prep; rte_eth_set_rx_burst; rte_eth_set_tx_burst; + rte_eth_set_tx_prep; }; INTERNAL { From patchwork Fri Aug 20 16:28:32 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ananyev, Konstantin" X-Patchwork-Id: 97171 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3EB25A0C4D; Fri, 20 Aug 2021 18:29:49 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0535841226; Fri, 20 Aug 2021 18:29:40 +0200 (CEST) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id 2034941226; Fri, 20 Aug 2021 18:29:37 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10082"; a="203946824" X-IronPort-AV: E=Sophos;i="5.84,338,1620716400"; d="scan'208";a="203946824" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 20 Aug 2021 09:29:37 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.84,338,1620716400"; d="scan'208";a="490551967" Received: from sivswdev08.ir.intel.com ([10.237.217.47]) by fmsmga008.fm.intel.com with ESMTP; 20 Aug 2021 09:29:35 -0700 From: Konstantin Ananyev To: dev@dpdk.org Cc: thomas@monjalon.net, ferruh.yigit@intel.com, andrew.rybchenko@oktetlabs.ru, qiming.yang@intel.com, qi.z.zhang@intel.com, beilei.xing@intel.com, techboard@dpdk.org, Konstantin Ananyev Date: Fri, 20 Aug 2021 17:28:32 +0100 Message-Id: <20210820162834.12544-6-konstantin.ananyev@intel.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20210820162834.12544-1-konstantin.ananyev@intel.com> References: <20210820162834.12544-1-konstantin.ananyev@intel.com> Subject: [dpdk-dev] [RFC 5/7] eth: make drivers to use new API to obtain descriptor status X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" ethdev: make changes so drivers can start using new API for rx_descriptor_status() and tx_descriptor_status(). remove related function pointers from 'struct rte_eth_dev'. drivers/net: adjust to new API. Signed-off-by: Konstantin Ananyev --- drivers/net/i40e/i40e_ethdev.c | 6 +- drivers/net/i40e/i40e_ethdev_vf.c | 6 +- drivers/net/i40e/i40e_rxtx.c | 8 +- drivers/net/i40e/i40e_rxtx.h | 4 +- drivers/net/ice/ice_ethdev.c | 6 +- drivers/net/ice/ice_rxtx.c | 8 +- drivers/net/ice/ice_rxtx.h | 4 +- lib/ethdev/ethdev_driver.h | 132 ++++++++++++++++++++++++++++++ lib/ethdev/rte_ethdev.c | 45 +++++++++- lib/ethdev/rte_ethdev.h | 40 ++++----- lib/ethdev/rte_ethdev_core.h | 3 - lib/ethdev/version.map | 4 + 12 files changed, 221 insertions(+), 45 deletions(-) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index bd6408da90..da5a7ec168 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -1435,8 +1435,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) dev->dev_ops = &i40e_eth_dev_ops; dev->rx_queue_count = i40e_dev_rx_queue_count; dev->rx_descriptor_done = i40e_dev_rx_descriptor_done; - dev->rx_descriptor_status = i40e_dev_rx_descriptor_status; - dev->tx_descriptor_status = i40e_dev_tx_descriptor_status; + rte_eth_set_rx_desc_st(dev->data->port_id, + _RTE_ETH_FUNC(i40e_dev_rx_descriptor_status)); + rte_eth_set_tx_desc_st(dev->data->port_id, + _RTE_ETH_FUNC(i40e_dev_tx_descriptor_status)); rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC(i40e_recv_pkts)); rte_eth_set_tx_burst(dev->data->port_id, _RTE_ETH_FUNC(i40e_xmit_pkts)); rte_eth_set_tx_prep(dev->data->port_id, _RTE_ETH_FUNC(i40e_prep_pkts)); diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index 3755bdb66a..f1bd6d4e1b 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -1574,8 +1574,10 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev) eth_dev->dev_ops = &i40evf_eth_dev_ops; eth_dev->rx_queue_count = i40e_dev_rx_queue_count; eth_dev->rx_descriptor_done = i40e_dev_rx_descriptor_done; - eth_dev->rx_descriptor_status = i40e_dev_rx_descriptor_status; - eth_dev->tx_descriptor_status = i40e_dev_tx_descriptor_status; + rte_eth_set_rx_desc_st(eth_dev->data->port_id, + _RTE_ETH_FUNC(i40e_dev_rx_descriptor_status)); + rte_eth_set_tx_desc_st(eth_dev->data->port_id, + _RTE_ETH_FUNC(i40e_dev_tx_descriptor_status)); rte_eth_set_rx_burst(eth_dev->data->port_id, _RTE_ETH_FUNC(i40e_recv_pkts)); rte_eth_set_tx_burst(eth_dev->data->port_id, diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 44c4d33879..310bb3f496 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -2189,7 +2189,7 @@ i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) return ret; } -int +static int i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) { struct i40e_rx_queue *rxq = rx_queue; @@ -2216,7 +2216,9 @@ i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) return RTE_ETH_RX_DESC_AVAIL; } -int +_RTE_ETH_RX_DESC_DEF(i40e_dev_rx_descriptor_status) + +static int i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) { struct i40e_tx_queue *txq = tx_queue; @@ -2247,6 +2249,8 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) return RTE_ETH_TX_DESC_FULL; } +_RTE_ETH_TX_DESC_DEF(i40e_dev_tx_descriptor_status) + static int i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev, struct i40e_tx_queue *txq) diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index 85bc29b23a..42b3407fe2 100644 --- a/drivers/net/i40e/i40e_rxtx.h +++ b/drivers/net/i40e/i40e_rxtx.h @@ -223,8 +223,8 @@ void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq); uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id); int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset); -int i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset); -int i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset); +_RTE_ETH_RX_DESC_PROTO(i40e_dev_rx_descriptor_status); +_RTE_ETH_TX_DESC_PROTO(i40e_dev_tx_descriptor_status); _RTE_ETH_RX_PROTO(i40e_recv_pkts_vec); _RTE_ETH_RX_PROTO(i40e_recv_scattered_pkts_vec); diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index 42b6f5928d..8907737ba3 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -1994,8 +1994,10 @@ ice_dev_init(struct rte_eth_dev *dev) dev->dev_ops = &ice_eth_dev_ops; dev->rx_queue_count = ice_rx_queue_count; - dev->rx_descriptor_status = ice_rx_descriptor_status; - dev->tx_descriptor_status = ice_tx_descriptor_status; + rte_eth_set_rx_desc_st(dev->data->port_id, + _RTE_ETH_FUNC(ice_rx_descriptor_status)); + rte_eth_set_tx_desc_st(dev->data->port_id, + _RTE_ETH_FUNC(ice_tx_descriptor_status)); rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC(ice_recv_pkts)); rte_eth_set_tx_burst(dev->data->port_id, _RTE_ETH_FUNC(ice_xmit_pkts)); rte_eth_set_tx_prep(dev->data->port_id, _RTE_ETH_FUNC(ice_prep_pkts)); diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index 2ddcbbb721..461135b4b4 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -2021,7 +2021,7 @@ ice_dev_supported_ptypes_get(struct rte_eth_dev *dev) return NULL; } -int +static int ice_rx_descriptor_status(void *rx_queue, uint16_t offset) { volatile union ice_rx_flex_desc *rxdp; @@ -2046,7 +2046,9 @@ ice_rx_descriptor_status(void *rx_queue, uint16_t offset) return RTE_ETH_RX_DESC_AVAIL; } -int +_RTE_ETH_RX_DESC_DEF(ice_rx_descriptor_status) + +static int ice_tx_descriptor_status(void *tx_queue, uint16_t offset) { struct ice_tx_queue *txq = tx_queue; @@ -2077,6 +2079,8 @@ ice_tx_descriptor_status(void *tx_queue, uint16_t offset) return RTE_ETH_TX_DESC_FULL; } +_RTE_ETH_TX_DESC_DEF(ice_tx_descriptor_status) + void ice_free_queues(struct rte_eth_dev *dev) { diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index 53f6080cc9..49418442eb 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -228,8 +228,8 @@ int ice_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_burst_mode *mode); int ice_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_burst_mode *mode); -int ice_rx_descriptor_status(void *rx_queue, uint16_t offset); -int ice_tx_descriptor_status(void *tx_queue, uint16_t offset); +_RTE_ETH_RX_DESC_PROTO(ice_rx_descriptor_status); +_RTE_ETH_TX_DESC_PROTO(ice_tx_descriptor_status); void ice_set_default_ptype_table(struct rte_eth_dev *dev); const uint32_t *ice_dev_supported_ptypes_get(struct rte_eth_dev *dev); void ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h index fe1b4fc349..eec56189a0 100644 --- a/lib/ethdev/ethdev_driver.h +++ b/lib/ethdev/ethdev_driver.h @@ -1789,6 +1789,138 @@ rte_eth_tx_prep_t rte_eth_get_tx_prep(uint16_t port_id); __rte_experimental int rte_eth_set_tx_prep(uint16_t port_id, rte_eth_tx_prep_t txf); +/** + * @internal + * Helper routine for eth driver rx_descriptor_status API. + * Should be called as first thing on entrance to the PMD's + * rx_descriptor_status implementation. + * Does necessary checks and retrieves pointer to device RX queue. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param queue_id + * The index of the receive queue. + * @param rxq + * The output pointer to the RX queue structure. + * + * @return + * Zero on success or negative error code otherwise. + */ +__rte_internal +static inline int +_rte_eth_rx_desc_prolog(uint16_t port_id, uint16_t queue_id, void **rxq) +{ + struct rte_eth_dev *dev; + +#ifdef RTE_ETHDEV_DEBUG_RX + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); +#endif + dev = &rte_eth_devices[port_id]; +#ifdef RTE_ETHDEV_DEBUG_RX + if (queue_id >= dev->data->nb_rx_queues) + return -ENODEV; +#endif + *rxq = dev->data->rx_queues[queue_id]; + return 0; +} + +/** + * @internal + * Helper macro to create new API wrappers for existing PMD rx_descriptor_status + * functions. + */ +#define _RTE_ETH_RX_DESC_PROTO(fn) \ + int _RTE_ETH_FUNC(fn)(uint16_t port_id, uint16_t queue_id, \ + uint16_t offset) + +/** + * @internal + * Helper macro to create new API wrappers for existing PMD rx_descriptor_status + * functions. + */ +#define _RTE_ETH_RX_DESC_DEF(fn) \ +_RTE_ETH_RX_DESC_PROTO(fn) \ +{ \ + int rc; \ + void *rxq; \ + rc = _rte_eth_rx_desc_prolog(port_id, queue_id, &rxq); \ + if (rc != 0) \ + return rc; \ + return fn(rxq, offset); \ +} + +__rte_experimental +rte_eth_rx_descriptor_status_t rte_eth_get_rx_desc_st(uint16_t port_id); + +__rte_experimental +int rte_eth_set_rx_desc_st(uint16_t port_id, rte_eth_rx_descriptor_status_t rf); + +/** + * @internal + * Helper routine for eth driver tx_descriptor_status API. + * Should be called as first thing on entrance to the PMD's + * tx_descriptor_status implementation. + * Does necessary checks and retrieves pointer to device TX queue. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param queue_id + * The index of the transmit queue. + * @param txq + * The output pointer to the TX queue structure. + * + * @return + * Zero on success or negative error code otherwise. + */ +__rte_internal +static inline int +_rte_eth_tx_desc_prolog(uint16_t port_id, uint16_t queue_id, void **txq) +{ + struct rte_eth_dev *dev; + +#ifdef RTE_ETHDEV_DEBUG_TX + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); +#endif + dev = &rte_eth_devices[port_id]; +#ifdef RTE_ETHDEV_DEBUG_TX + if (queue_id >= dev->data->nb_tx_queues) + return -ENODEV; +#endif + *txq = dev->data->tx_queues[queue_id]; + return 0; +} + +/** + * @internal + * Helper macro to create new API wrappers for existing PMD tx_descriptor_status + * functions. + */ +#define _RTE_ETH_TX_DESC_PROTO(fn) \ + int _RTE_ETH_FUNC(fn)(uint16_t port_id, uint16_t queue_id, \ + uint16_t offset) + +/** + * @internal + * Helper macro to create new API wrappers for existing PMD tx_descriptor_status + * functions. + */ +#define _RTE_ETH_TX_DESC_DEF(fn) \ +_RTE_ETH_TX_DESC_PROTO(fn) \ +{ \ + int rc; \ + void *txq; \ + rc = _rte_eth_tx_desc_prolog(port_id, queue_id, &txq); \ + if (rc != 0) \ + return rc; \ + return fn(txq, offset); \ +} + +__rte_experimental +rte_eth_tx_descriptor_status_t rte_eth_get_tx_desc_st(uint16_t port_id); + +__rte_experimental +int rte_eth_set_tx_desc_st(uint16_t port_id, rte_eth_tx_descriptor_status_t rf); + #ifdef __cplusplus } #endif diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index 6b1d9c5f83..e48d1ec281 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -590,8 +590,6 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) eth_dev->intr_handle = NULL; eth_dev->rx_queue_count = NULL; eth_dev->rx_descriptor_done = NULL; - eth_dev->rx_descriptor_status = NULL; - eth_dev->tx_descriptor_status = NULL; eth_dev->dev_ops = NULL; if (rte_eal_process_type() == RTE_PROC_PRIMARY) { @@ -6400,3 +6398,46 @@ rte_eth_set_tx_prep(uint16_t port_id, rte_eth_tx_prep_t tpf) rte_eth_burst_api[port_id].tx_pkt_prepare = tpf; return 0; } + +__rte_experimental +rte_eth_rx_descriptor_status_t +rte_eth_get_rx_desc_st(uint16_t port_id) +{ + if (port_id >= RTE_DIM(rte_eth_burst_api)) { + rte_errno = EINVAL; + return NULL; + } + return rte_eth_burst_api[port_id].rx_descriptor_status; +} + +__rte_experimental +int +rte_eth_set_rx_desc_st(uint16_t port_id, rte_eth_rx_descriptor_status_t rf) +{ + if (port_id >= RTE_DIM(rte_eth_burst_api)) + return -EINVAL; + + rte_eth_burst_api[port_id].rx_descriptor_status = rf; + return 0; +} + +rte_eth_tx_descriptor_status_t +rte_eth_get_tx_desc_st(uint16_t port_id) +{ + if (port_id >= RTE_DIM(rte_eth_burst_api)) { + rte_errno = EINVAL; + return NULL; + } + return rte_eth_burst_api[port_id].tx_descriptor_status; +} + +__rte_experimental +int +rte_eth_set_tx_desc_st(uint16_t port_id, rte_eth_tx_descriptor_status_t tf) +{ + if (port_id >= RTE_DIM(rte_eth_burst_api)) + return -EINVAL; + + rte_eth_burst_api[port_id].tx_descriptor_status = tf; + return 0; +} diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h index 01fd1c99c3..073b532b7b 100644 --- a/lib/ethdev/rte_ethdev.h +++ b/lib/ethdev/rte_ethdev.h @@ -5082,21 +5082,15 @@ static inline int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset) { - struct rte_eth_dev *dev; - void *rxq; + rte_eth_rx_descriptor_status_t rds; -#ifdef RTE_ETHDEV_DEBUG_RX - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); -#endif - dev = &rte_eth_devices[port_id]; -#ifdef RTE_ETHDEV_DEBUG_RX - if (queue_id >= dev->data->nb_rx_queues) - return -ENODEV; -#endif - RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_status, -ENOTSUP); - rxq = dev->data->rx_queues[queue_id]; + if (port_id >= RTE_MAX_ETHPORTS) + return -EINVAL; + + rds = rte_eth_burst_api[port_id].rx_descriptor_status; + RTE_FUNC_PTR_OR_ERR_RET(rds, -ENOTSUP); - return (*dev->rx_descriptor_status)(rxq, offset); + return (rds)(port_id, queue_id, offset); } #define RTE_ETH_TX_DESC_FULL 0 /**< Desc filled for hw, waiting xmit. */ @@ -5139,21 +5133,15 @@ rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, static inline int rte_eth_tx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset) { - struct rte_eth_dev *dev; - void *txq; + rte_eth_tx_descriptor_status_t tds; -#ifdef RTE_ETHDEV_DEBUG_TX - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); -#endif - dev = &rte_eth_devices[port_id]; -#ifdef RTE_ETHDEV_DEBUG_TX - if (queue_id >= dev->data->nb_tx_queues) - return -ENODEV; -#endif - RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_descriptor_status, -ENOTSUP); - txq = dev->data->tx_queues[queue_id]; + if (port_id >= RTE_MAX_ETHPORTS) + return -EINVAL; + + tds = rte_eth_burst_api[port_id].tx_descriptor_status; + RTE_FUNC_PTR_OR_ERR_RET(tds, -ENOTSUP); - return (*dev->tx_descriptor_status)(txq, offset); + return (tds)(port_id, queue_id, offset); } /** diff --git a/lib/ethdev/rte_ethdev_core.h b/lib/ethdev/rte_ethdev_core.h index 2d4600af4d..1e42bacfce 100644 --- a/lib/ethdev/rte_ethdev_core.h +++ b/lib/ethdev/rte_ethdev_core.h @@ -117,9 +117,6 @@ struct rte_eth_rxtx_callback { struct rte_eth_dev { eth_rx_queue_count_t rx_queue_count; /**< Get the number of used RX descriptors. */ eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit. */ - eth_rx_descriptor_status_t rx_descriptor_status; /**< Check the status of a Rx descriptor. */ - eth_tx_descriptor_status_t tx_descriptor_status; /**< Check the status of a Tx descriptor. */ - /** * Next two fields are per-device data but *data is shared between * primary and secondary processes and *process_private is per-process diff --git a/lib/ethdev/version.map b/lib/ethdev/version.map index b26fd478aa..802d9c3c11 100644 --- a/lib/ethdev/version.map +++ b/lib/ethdev/version.map @@ -253,9 +253,13 @@ EXPERIMENTAL { # added in 21.11 rte_eth_burst_api; rte_eth_get_rx_burst; + rte_eth_get_rx_desc_st; + rte_eth_get_tx_desc_st; rte_eth_get_tx_burst; rte_eth_get_tx_prep; rte_eth_set_rx_burst; + rte_eth_set_rx_desc_st; + rte_eth_set_tx_desc_st; rte_eth_set_tx_burst; rte_eth_set_tx_prep; }; From patchwork Fri Aug 20 16:28:33 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ananyev, Konstantin" X-Patchwork-Id: 97172 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id B5915A0C4D; Fri, 20 Aug 2021 18:29:58 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9738241250; Fri, 20 Aug 2021 18:29:44 +0200 (CEST) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id 7820E4121F; Fri, 20 Aug 2021 18:29:42 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10082"; a="203946828" X-IronPort-AV: E=Sophos;i="5.84,338,1620716400"; d="scan'208";a="203946828" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 20 Aug 2021 09:29:42 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.84,338,1620716400"; d="scan'208";a="490551977" Received: from sivswdev08.ir.intel.com ([10.237.217.47]) by fmsmga008.fm.intel.com with ESMTP; 20 Aug 2021 09:29:39 -0700 From: Konstantin Ananyev To: dev@dpdk.org Cc: thomas@monjalon.net, ferruh.yigit@intel.com, andrew.rybchenko@oktetlabs.ru, qiming.yang@intel.com, qi.z.zhang@intel.com, beilei.xing@intel.com, techboard@dpdk.org, Konstantin Ananyev Date: Fri, 20 Aug 2021 17:28:33 +0100 Message-Id: <20210820162834.12544-7-konstantin.ananyev@intel.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20210820162834.12544-1-konstantin.ananyev@intel.com> References: <20210820162834.12544-1-konstantin.ananyev@intel.com> Subject: [dpdk-dev] [RFC 6/7] eth: make drivers to use new API for Rx queue count X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" ethdev: - make changes so drivers can start using new API for rx_queue_count(). - provide helper functions/macros. - remove rx_queue_count() from 'struct rte_eth_dev'. drivers/net: - adjust to new rx_queue_count() API. Signed-off-by: Konstantin Ananyev --- drivers/net/i40e/i40e_ethdev.c | 3 +- drivers/net/i40e/i40e_ethdev_vf.c | 3 +- drivers/net/i40e/i40e_rxtx.c | 4 ++- drivers/net/i40e/i40e_rxtx.h | 3 +- drivers/net/ice/ice_ethdev.c | 3 +- drivers/net/ice/ice_rxtx.c | 4 ++- drivers/net/ice/ice_rxtx.h | 2 +- lib/ethdev/ethdev_driver.h | 58 +++++++++++++++++++++++++++++++ lib/ethdev/rte_ethdev.c | 24 ++++++++++++- lib/ethdev/rte_ethdev.h | 13 ++++--- lib/ethdev/rte_ethdev_core.h | 1 - lib/ethdev/version.map | 6 ++-- 12 files changed, 105 insertions(+), 19 deletions(-) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index da5a7ec168..a99363659a 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -1433,7 +1433,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) PMD_INIT_FUNC_TRACE(); dev->dev_ops = &i40e_eth_dev_ops; - dev->rx_queue_count = i40e_dev_rx_queue_count; + rte_eth_set_rx_qcnt(dev->data->port_id, + _RTE_ETH_FUNC(i40e_dev_rx_queue_count)); dev->rx_descriptor_done = i40e_dev_rx_descriptor_done; rte_eth_set_rx_desc_st(dev->data->port_id, _RTE_ETH_FUNC(i40e_dev_rx_descriptor_status)); diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index f1bd6d4e1b..0da30f6784 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -1572,7 +1572,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev) /* assign ops func pointer */ eth_dev->dev_ops = &i40evf_eth_dev_ops; - eth_dev->rx_queue_count = i40e_dev_rx_queue_count; + rte_eth_set_rx_qcnt(eth_dev->data->port_id, + _RTE_ETH_FUNC(i40e_dev_rx_queue_count)); eth_dev->rx_descriptor_done = i40e_dev_rx_descriptor_done; rte_eth_set_rx_desc_st(eth_dev->data->port_id, _RTE_ETH_FUNC(i40e_dev_rx_descriptor_status)); diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 310bb3f496..f0f42c41b2 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -2134,7 +2134,7 @@ i40e_dev_rx_queue_release(void *rxq) rte_free(q); } -uint32_t +static uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) { #define I40E_RXQ_SCAN_INTERVAL 4 @@ -2163,6 +2163,8 @@ i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) return desc; } +_RTE_ETH_RX_QCNT_DEF(i40e_dev_rx_queue_count) + int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) { diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index 42b3407fe2..3d98b1f9fb 100644 --- a/drivers/net/i40e/i40e_rxtx.h +++ b/drivers/net/i40e/i40e_rxtx.h @@ -220,8 +220,7 @@ int i40e_tx_done_cleanup(void *txq, uint32_t free_cnt); int i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq); void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq); -uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev *dev, - uint16_t rx_queue_id); +_RTE_ETH_RX_QCNT_PROTO(i40e_dev_rx_queue_count); int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset); _RTE_ETH_RX_DESC_PROTO(i40e_dev_rx_descriptor_status); _RTE_ETH_TX_DESC_PROTO(i40e_dev_tx_descriptor_status); diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index 8907737ba3..cb27f2f501 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -1993,7 +1993,8 @@ ice_dev_init(struct rte_eth_dev *dev) #endif dev->dev_ops = &ice_eth_dev_ops; - dev->rx_queue_count = ice_rx_queue_count; + rte_eth_set_rx_qcnt(dev->data->port_id, + _RTE_ETH_FUNC(ice_rx_queue_count)); rte_eth_set_rx_desc_st(dev->data->port_id, _RTE_ETH_FUNC(ice_rx_descriptor_status)); rte_eth_set_tx_desc_st(dev->data->port_id, diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index 461135b4b4..e7af0a649b 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -1426,7 +1426,7 @@ ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_deferred_start = txq->tx_deferred_start; } -uint32_t +static uint32_t ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) { #define ICE_RXQ_SCAN_INTERVAL 4 @@ -1454,6 +1454,8 @@ ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) return desc; } +_RTE_ETH_RX_QCNT_DEF(ice_rx_queue_count) + #define ICE_RX_FLEX_ERR0_BITS \ ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \ (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \ diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index 49418442eb..d1e0a8b011 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -219,7 +219,7 @@ _RTE_ETH_TX_PROTO(ice_prep_pkts); void ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq); void ice_set_tx_function(struct rte_eth_dev *dev); -uint32_t ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id); +_RTE_ETH_RX_QCNT_PROTO(ice_rx_queue_count); void ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_rxq_info *qinfo); void ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h index eec56189a0..accaf1aab2 100644 --- a/lib/ethdev/ethdev_driver.h +++ b/lib/ethdev/ethdev_driver.h @@ -1921,6 +1921,64 @@ rte_eth_tx_descriptor_status_t rte_eth_get_tx_desc_st(uint16_t port_id); __rte_experimental int rte_eth_set_tx_desc_st(uint16_t port_id, rte_eth_tx_descriptor_status_t rf); +/** + * @internal + * Helper routine for eth driver rx_queue_count API. + * Should be called as first thing on entrance to the PMD's + * rx_queue_count implementation. + * Does necessary checks for input parameters. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param queue_id + * The index of the receive queue. + * + * @return + * Zero on success or negative error code otherwise. + */ +__rte_internal +static inline int +_rte_eth_rx_qcnt_prolog(uint16_t port_id, uint16_t queue_id) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + dev = &rte_eth_devices[port_id]; + if (queue_id >= dev->data->nb_rx_queues || + dev->data->rx_queues[queue_id] == NULL) + return -EINVAL; + return 0; +} + +/** + * @internal + * Helper macro to create new API wrappers for existing PMD rx_queue_count + * functions. + */ +#define _RTE_ETH_RX_QCNT_PROTO(fn) \ + int _RTE_ETH_FUNC(fn)(uint16_t port_id, uint16_t queue_id) + +/** + * @internal + * Helper macro to create new API wrappers for existing PMD rx_queue_count + * functions. + */ +#define _RTE_ETH_RX_QCNT_DEF(fn) \ +_RTE_ETH_RX_QCNT_PROTO(fn) \ +{ \ + int rc; \ + rc = _rte_eth_rx_qcnt_prolog(port_id, queue_id); \ + if (rc != 0) \ + return rc; \ + return fn(&rte_eth_devices[port_id], queue_id); \ +} + +__rte_experimental +rte_eth_rx_queue_count_t rte_eth_get_rx_qcnt(uint16_t port_id); + +__rte_experimental +int rte_eth_set_rx_qcnt(uint16_t port_id, rte_eth_rx_queue_count_t rf); + #ifdef __cplusplus } #endif diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index e48d1ec281..0cc9f40e95 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -588,7 +588,6 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) eth_dev->device = NULL; eth_dev->process_private = NULL; eth_dev->intr_handle = NULL; - eth_dev->rx_queue_count = NULL; eth_dev->rx_descriptor_done = NULL; eth_dev->dev_ops = NULL; @@ -6421,6 +6420,7 @@ rte_eth_set_rx_desc_st(uint16_t port_id, rte_eth_rx_descriptor_status_t rf) return 0; } +__rte_experimental rte_eth_tx_descriptor_status_t rte_eth_get_tx_desc_st(uint16_t port_id) { @@ -6441,3 +6441,25 @@ rte_eth_set_tx_desc_st(uint16_t port_id, rte_eth_tx_descriptor_status_t tf) rte_eth_burst_api[port_id].tx_descriptor_status = tf; return 0; } + +__rte_experimental +rte_eth_rx_queue_count_t +rte_eth_get_rx_qcnt(uint16_t port_id) +{ + if (port_id >= RTE_DIM(rte_eth_burst_api)) { + rte_errno = EINVAL; + return NULL; + } + return rte_eth_burst_api[port_id].rx_queue_count; +} + +__rte_experimental +int +rte_eth_set_rx_qcnt(uint16_t port_id, rte_eth_rx_queue_count_t rf) +{ + if (port_id >= RTE_DIM(rte_eth_burst_api)) + return -EINVAL; + + rte_eth_burst_api[port_id].rx_queue_count = rf; + return 0; +} diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h index 073b532b7b..73aeef8c36 100644 --- a/lib/ethdev/rte_ethdev.h +++ b/lib/ethdev/rte_ethdev.h @@ -5004,16 +5004,15 @@ rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, static inline int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id) { - struct rte_eth_dev *dev; + rte_eth_rx_queue_count_t rqc; - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_queue_count, -ENOTSUP); - if (queue_id >= dev->data->nb_rx_queues || - dev->data->rx_queues[queue_id] == NULL) + if (port_id >= RTE_MAX_ETHPORTS) return -EINVAL; - return (int)(*dev->rx_queue_count)(dev, queue_id); + rqc = rte_eth_burst_api[port_id].rx_queue_count; + RTE_FUNC_PTR_OR_ERR_RET(rqc, -ENOTSUP); + + return (rqc)(port_id, queue_id); } /** diff --git a/lib/ethdev/rte_ethdev_core.h b/lib/ethdev/rte_ethdev_core.h index 1e42bacfce..53dd5c2114 100644 --- a/lib/ethdev/rte_ethdev_core.h +++ b/lib/ethdev/rte_ethdev_core.h @@ -115,7 +115,6 @@ struct rte_eth_rxtx_callback { * process, while the actual configuration data for the device is shared. */ struct rte_eth_dev { - eth_rx_queue_count_t rx_queue_count; /**< Get the number of used RX descriptors. */ eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit. */ /** * Next two fields are per-device data but *data is shared between diff --git a/lib/ethdev/version.map b/lib/ethdev/version.map index 802d9c3c11..ff838fef53 100644 --- a/lib/ethdev/version.map +++ b/lib/ethdev/version.map @@ -254,13 +254,15 @@ EXPERIMENTAL { rte_eth_burst_api; rte_eth_get_rx_burst; rte_eth_get_rx_desc_st; - rte_eth_get_tx_desc_st; + rte_eth_get_rx_qcnt; rte_eth_get_tx_burst; + rte_eth_get_tx_desc_st; rte_eth_get_tx_prep; rte_eth_set_rx_burst; rte_eth_set_rx_desc_st; - rte_eth_set_tx_desc_st; + rte_eth_set_rx_qcnt; rte_eth_set_tx_burst; + rte_eth_set_tx_desc_st; rte_eth_set_tx_prep; }; From patchwork Fri Aug 20 16:28:34 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ananyev, Konstantin" X-Patchwork-Id: 97173 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id EFEC2A0C4D; Fri, 20 Aug 2021 18:30:06 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D5E794125F; Fri, 20 Aug 2021 18:29:49 +0200 (CEST) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id 2FA5641208; Fri, 20 Aug 2021 18:29:47 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10082"; a="203946836" X-IronPort-AV: E=Sophos;i="5.84,338,1620716400"; d="scan'208";a="203946836" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 20 Aug 2021 09:29:46 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.84,338,1620716400"; d="scan'208";a="490551988" Received: from sivswdev08.ir.intel.com ([10.237.217.47]) by fmsmga008.fm.intel.com with ESMTP; 20 Aug 2021 09:29:44 -0700 From: Konstantin Ananyev To: dev@dpdk.org Cc: thomas@monjalon.net, ferruh.yigit@intel.com, andrew.rybchenko@oktetlabs.ru, qiming.yang@intel.com, qi.z.zhang@intel.com, beilei.xing@intel.com, techboard@dpdk.org, Konstantin Ananyev Date: Fri, 20 Aug 2021 17:28:34 +0100 Message-Id: <20210820162834.12544-8-konstantin.ananyev@intel.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20210820162834.12544-1-konstantin.ananyev@intel.com> References: <20210820162834.12544-1-konstantin.ananyev@intel.com> Subject: [dpdk-dev] [RFC 7/7] eth: hide eth dev related structures X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Move rte_eth_dev, rte_eth_dev_data, rte_eth_rxtx_callback and related data into ethdev_driver.h. Make changes to keep DPDK building after that. Remove references to 'rte_eth_devices[]' from test-pmd. Signed-off-by: Konstantin Ananyev --- app/test-pmd/config.c | 23 ++- drivers/common/octeontx2/otx2_sec_idev.c | 2 +- drivers/crypto/octeontx2/otx2_cryptodev_ops.c | 2 +- lib/ethdev/ethdev_driver.h | 135 ++++++++++++++++++ lib/ethdev/rte_ethdev.c | 26 ++++ lib/ethdev/rte_ethdev.h | 44 +++--- lib/ethdev/rte_ethdev_core.h | 135 ------------------ lib/ethdev/version.map | 1 + lib/eventdev/rte_event_eth_rx_adapter.c | 2 +- lib/eventdev/rte_event_eth_tx_adapter.c | 2 +- lib/eventdev/rte_eventdev.c | 2 +- 11 files changed, 197 insertions(+), 177 deletions(-) diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index 31d8ba1b91..5b6a8a1680 100644 --- a/app/test-pmd/config.c +++ b/app/test-pmd/config.c @@ -5197,20 +5197,20 @@ show_macs(portid_t port_id) { char buf[RTE_ETHER_ADDR_FMT_SIZE]; struct rte_eth_dev_info dev_info; - struct rte_ether_addr *addr; - uint32_t i, num_macs = 0; - struct rte_eth_dev *dev; - - dev = &rte_eth_devices[port_id]; + int32_t i, rc, num_macs = 0; if (eth_dev_info_get_print_err(port_id, &dev_info)) return; - for (i = 0; i < dev_info.max_mac_addrs; i++) { - addr = &dev->data->mac_addrs[i]; + struct rte_ether_addr addr[dev_info.max_mac_addrs]; + rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs); + if (rc < 0) + return; + + for (i = 0; i < rc; i++) { /* skip zero address */ - if (rte_is_zero_ether_addr(addr)) + if (rte_is_zero_ether_addr(&addr[i])) continue; num_macs++; @@ -5218,14 +5218,13 @@ show_macs(portid_t port_id) printf("Number of MAC address added: %d\n", num_macs); - for (i = 0; i < dev_info.max_mac_addrs; i++) { - addr = &dev->data->mac_addrs[i]; + for (i = 0; i < rc; i++) { /* skip zero address */ - if (rte_is_zero_ether_addr(addr)) + if (rte_is_zero_ether_addr(&addr[i])) continue; - rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); + rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]); printf(" %s\n", buf); } } diff --git a/drivers/common/octeontx2/otx2_sec_idev.c b/drivers/common/octeontx2/otx2_sec_idev.c index 6e9643c383..b561b67174 100644 --- a/drivers/common/octeontx2/otx2_sec_idev.c +++ b/drivers/common/octeontx2/otx2_sec_idev.c @@ -4,7 +4,7 @@ #include #include -#include +#include #include #include "otx2_common.h" diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c index 42100154cd..c71be61158 100644 --- a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c +++ b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c @@ -6,7 +6,7 @@ #include #include -#include +#include #include #include "otx2_cryptodev.h" diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h index accaf1aab2..f931fd10e1 100644 --- a/lib/ethdev/ethdev_driver.h +++ b/lib/ethdev/ethdev_driver.h @@ -21,6 +21,141 @@ extern "C" { #endif +/** + * @internal + * Structure used to hold information about the callbacks to be called for a + * queue on RX and TX. + */ +struct rte_eth_rxtx_callback { + struct rte_eth_rxtx_callback *next; + union{ + rte_rx_callback_fn rx; + rte_tx_callback_fn tx; + } fn; + void *param; +}; + +/** + * @internal + * The generic data structure associated with each ethernet device. + * + * Pointers to burst-oriented packet receive and transmit functions are + * located at the beginning of the structure, along with the pointer to + * where all the data elements for the particular device are stored in shared + * memory. This split allows the function pointer and driver data to be per- + * process, while the actual configuration data for the device is shared. + */ +struct rte_eth_dev { + eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit. */ + /** + * Next two fields are per-device data but *data is shared between + * primary and secondary processes and *process_private is per-process + * private. The second one is managed by PMDs if necessary. + */ + struct rte_eth_dev_data *data; /**< Pointer to device data. */ + void *process_private; /**< Pointer to per-process device data. */ + const struct eth_dev_ops *dev_ops; /**< Functions exported by PMD */ + struct rte_device *device; /**< Backing device */ + struct rte_intr_handle *intr_handle; /**< Device interrupt handle */ + /** User application callbacks for NIC interrupts */ + struct rte_eth_dev_cb_list link_intr_cbs; + /** + * User-supplied functions called from rx_burst to post-process + * received packets before passing them to the user + */ + struct rte_eth_rxtx_callback *post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT]; + /** + * User-supplied functions called from tx_burst to pre-process + * received packets before passing them to the driver for transmission. + */ + struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT]; + enum rte_eth_dev_state state; /**< Flag indicating the port state */ + void *security_ctx; /**< Context for security ops */ + + uint64_t reserved_64s[4]; /**< Reserved for future fields */ + void *reserved_ptrs[4]; /**< Reserved for future fields */ +} __rte_cache_aligned; + +struct rte_eth_dev_sriov; +struct rte_eth_dev_owner; + +/** + * @internal + * The data part, with no function pointers, associated with each ethernet device. + * + * This structure is safe to place in shared memory to be common among different + * processes in a multi-process configuration. + */ +struct rte_eth_dev_data { + char name[RTE_ETH_NAME_MAX_LEN]; /**< Unique identifier name */ + + void **rx_queues; /**< Array of pointers to RX queues. */ + void **tx_queues; /**< Array of pointers to TX queues. */ + uint16_t nb_rx_queues; /**< Number of RX queues. */ + uint16_t nb_tx_queues; /**< Number of TX queues. */ + + struct rte_eth_dev_sriov sriov; /**< SRIOV data */ + + void *dev_private; + /**< PMD-specific private data. + * @see rte_eth_dev_release_port() + */ + + struct rte_eth_link dev_link; /**< Link-level information & status. */ + struct rte_eth_conf dev_conf; /**< Configuration applied to device. */ + uint16_t mtu; /**< Maximum Transmission Unit. */ + uint32_t min_rx_buf_size; + /**< Common RX buffer size handled by all queues. */ + + uint64_t rx_mbuf_alloc_failed; /**< RX ring mbuf allocation failures. */ + struct rte_ether_addr *mac_addrs; + /**< Device Ethernet link address. + * @see rte_eth_dev_release_port() + */ + uint64_t mac_pool_sel[ETH_NUM_RECEIVE_MAC_ADDR]; + /**< Bitmap associating MAC addresses to pools. */ + struct rte_ether_addr *hash_mac_addrs; + /**< Device Ethernet MAC addresses of hash filtering. + * @see rte_eth_dev_release_port() + */ + uint16_t port_id; /**< Device [external] port identifier. */ + + __extension__ + uint8_t promiscuous : 1, /**< RX promiscuous mode ON(1) / OFF(0). */ + scattered_rx : 1, /**< RX of scattered packets is ON(1) / OFF(0) */ + all_multicast : 1, /**< RX all multicast mode ON(1) / OFF(0). */ + dev_started : 1, /**< Device state: STARTED(1) / STOPPED(0). */ + lro : 1, /**< RX LRO is ON(1) / OFF(0) */ + dev_configured : 1; + /**< Indicates whether the device is configured. + * CONFIGURED(1) / NOT CONFIGURED(0). + */ + uint8_t rx_queue_state[RTE_MAX_QUEUES_PER_PORT]; + /**< Queues state: HAIRPIN(2) / STARTED(1) / STOPPED(0). */ + uint8_t tx_queue_state[RTE_MAX_QUEUES_PER_PORT]; + /**< Queues state: HAIRPIN(2) / STARTED(1) / STOPPED(0). */ + uint32_t dev_flags; /**< Capabilities. */ + int numa_node; /**< NUMA node connection. */ + struct rte_vlan_filter_conf vlan_filter_conf; + /**< VLAN filter configuration. */ + struct rte_eth_dev_owner owner; /**< The port owner. */ + uint16_t representor_id; + /**< Switch-specific identifier. + * Valid if RTE_ETH_DEV_REPRESENTOR in dev_flags. + */ + + pthread_mutex_t flow_ops_mutex; /**< rte_flow ops mutex. */ + uint64_t reserved_64s[4]; /**< Reserved for future fields */ + void *reserved_ptrs[4]; /**< Reserved for future fields */ +} __rte_cache_aligned; + +/** + * @internal + * The pool of *rte_eth_dev* structures. The size of the pool + * is configured at compile-time in the file. + */ +extern struct rte_eth_dev rte_eth_devices[]; + /**< @internal Declaration of the hairpin peer queue information structure. */ struct rte_hairpin_peer_info; diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index 0cc9f40e95..a41f3b2d57 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -3588,6 +3588,32 @@ rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, return ret; } +__rte_experimental +int +rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr ma[], uint32_t num) +{ + int32_t ret; + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + dev = &rte_eth_devices[port_id]; + + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; + + if (ma == NULL) { + RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); + return -EINVAL; + } + + num = RTE_MIN(dev_info.max_mac_addrs, num); + memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); + + return num; +} + int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) { diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h index 73aeef8c36..0f425cf042 100644 --- a/lib/ethdev/rte_ethdev.h +++ b/lib/ethdev/rte_ethdev.h @@ -3013,6 +3013,25 @@ int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, */ int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr); +/** + * Retrieve the Ethernet addresses of an Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param ma + * A pointer to an array of structures of type *ether_addr* to be filled with + * the Ethernet addresses of the Ethernet device. + * @param ma + * Number of elements in the *ma* array. + * @return + * - (0) if successful + * - (-ENODEV) if *port_id* invalid. + * - (-EINVAL) if bad parameter. + */ +__rte_experimental +int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr ma[], + uint32_t num); + /** * Retrieve the contextual information of an Ethernet device. * @@ -5015,31 +5034,6 @@ rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id) return (rqc)(port_id, queue_id); } -/** - * Check if the DD bit of the specific RX descriptor in the queue has been set - * - * @param port_id - * The port identifier of the Ethernet device. - * @param queue_id - * The queue id on the specific port. - * @param offset - * The offset of the descriptor ID from tail. - * @return - * - (1) if the specific DD bit is set. - * - (0) if the specific DD bit is not set. - * - (-ENODEV) if *port_id* invalid. - * - (-ENOTSUP) if the device does not support this function - */ -__rte_deprecated -static inline int -rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset) -{ - struct rte_eth_dev *dev = &rte_eth_devices[port_id]; - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_done, -ENOTSUP); - return (*dev->rx_descriptor_done)(dev->data->rx_queues[queue_id], offset); -} - #define RTE_ETH_RX_DESC_AVAIL 0 /**< Desc available for hw. */ #define RTE_ETH_RX_DESC_DONE 1 /**< Desc done, filled by hw. */ #define RTE_ETH_RX_DESC_UNAVAIL 2 /**< Desc used by driver or hw. */ diff --git a/lib/ethdev/rte_ethdev_core.h b/lib/ethdev/rte_ethdev_core.h index 53dd5c2114..06f42ce899 100644 --- a/lib/ethdev/rte_ethdev_core.h +++ b/lib/ethdev/rte_ethdev_core.h @@ -90,139 +90,4 @@ struct rte_eth_burst_api { extern struct rte_eth_burst_api rte_eth_burst_api[RTE_MAX_ETHPORTS]; -/** - * @internal - * Structure used to hold information about the callbacks to be called for a - * queue on RX and TX. - */ -struct rte_eth_rxtx_callback { - struct rte_eth_rxtx_callback *next; - union{ - rte_rx_callback_fn rx; - rte_tx_callback_fn tx; - } fn; - void *param; -}; - -/** - * @internal - * The generic data structure associated with each ethernet device. - * - * Pointers to burst-oriented packet receive and transmit functions are - * located at the beginning of the structure, along with the pointer to - * where all the data elements for the particular device are stored in shared - * memory. This split allows the function pointer and driver data to be per- - * process, while the actual configuration data for the device is shared. - */ -struct rte_eth_dev { - eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit. */ - /** - * Next two fields are per-device data but *data is shared between - * primary and secondary processes and *process_private is per-process - * private. The second one is managed by PMDs if necessary. - */ - struct rte_eth_dev_data *data; /**< Pointer to device data. */ - void *process_private; /**< Pointer to per-process device data. */ - const struct eth_dev_ops *dev_ops; /**< Functions exported by PMD */ - struct rte_device *device; /**< Backing device */ - struct rte_intr_handle *intr_handle; /**< Device interrupt handle */ - /** User application callbacks for NIC interrupts */ - struct rte_eth_dev_cb_list link_intr_cbs; - /** - * User-supplied functions called from rx_burst to post-process - * received packets before passing them to the user - */ - struct rte_eth_rxtx_callback *post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT]; - /** - * User-supplied functions called from tx_burst to pre-process - * received packets before passing them to the driver for transmission. - */ - struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT]; - enum rte_eth_dev_state state; /**< Flag indicating the port state */ - void *security_ctx; /**< Context for security ops */ - - uint64_t reserved_64s[4]; /**< Reserved for future fields */ - void *reserved_ptrs[4]; /**< Reserved for future fields */ -} __rte_cache_aligned; - -struct rte_eth_dev_sriov; -struct rte_eth_dev_owner; - -/** - * @internal - * The data part, with no function pointers, associated with each ethernet device. - * - * This structure is safe to place in shared memory to be common among different - * processes in a multi-process configuration. - */ -struct rte_eth_dev_data { - char name[RTE_ETH_NAME_MAX_LEN]; /**< Unique identifier name */ - - void **rx_queues; /**< Array of pointers to RX queues. */ - void **tx_queues; /**< Array of pointers to TX queues. */ - uint16_t nb_rx_queues; /**< Number of RX queues. */ - uint16_t nb_tx_queues; /**< Number of TX queues. */ - - struct rte_eth_dev_sriov sriov; /**< SRIOV data */ - - void *dev_private; - /**< PMD-specific private data. - * @see rte_eth_dev_release_port() - */ - - struct rte_eth_link dev_link; /**< Link-level information & status. */ - struct rte_eth_conf dev_conf; /**< Configuration applied to device. */ - uint16_t mtu; /**< Maximum Transmission Unit. */ - uint32_t min_rx_buf_size; - /**< Common RX buffer size handled by all queues. */ - - uint64_t rx_mbuf_alloc_failed; /**< RX ring mbuf allocation failures. */ - struct rte_ether_addr *mac_addrs; - /**< Device Ethernet link address. - * @see rte_eth_dev_release_port() - */ - uint64_t mac_pool_sel[ETH_NUM_RECEIVE_MAC_ADDR]; - /**< Bitmap associating MAC addresses to pools. */ - struct rte_ether_addr *hash_mac_addrs; - /**< Device Ethernet MAC addresses of hash filtering. - * @see rte_eth_dev_release_port() - */ - uint16_t port_id; /**< Device [external] port identifier. */ - - __extension__ - uint8_t promiscuous : 1, /**< RX promiscuous mode ON(1) / OFF(0). */ - scattered_rx : 1, /**< RX of scattered packets is ON(1) / OFF(0) */ - all_multicast : 1, /**< RX all multicast mode ON(1) / OFF(0). */ - dev_started : 1, /**< Device state: STARTED(1) / STOPPED(0). */ - lro : 1, /**< RX LRO is ON(1) / OFF(0) */ - dev_configured : 1; - /**< Indicates whether the device is configured. - * CONFIGURED(1) / NOT CONFIGURED(0). - */ - uint8_t rx_queue_state[RTE_MAX_QUEUES_PER_PORT]; - /**< Queues state: HAIRPIN(2) / STARTED(1) / STOPPED(0). */ - uint8_t tx_queue_state[RTE_MAX_QUEUES_PER_PORT]; - /**< Queues state: HAIRPIN(2) / STARTED(1) / STOPPED(0). */ - uint32_t dev_flags; /**< Capabilities. */ - int numa_node; /**< NUMA node connection. */ - struct rte_vlan_filter_conf vlan_filter_conf; - /**< VLAN filter configuration. */ - struct rte_eth_dev_owner owner; /**< The port owner. */ - uint16_t representor_id; - /**< Switch-specific identifier. - * Valid if RTE_ETH_DEV_REPRESENTOR in dev_flags. - */ - - pthread_mutex_t flow_ops_mutex; /**< rte_flow ops mutex. */ - uint64_t reserved_64s[4]; /**< Reserved for future fields */ - void *reserved_ptrs[4]; /**< Reserved for future fields */ -} __rte_cache_aligned; - -/** - * @internal - * The pool of *rte_eth_dev* structures. The size of the pool - * is configured at compile-time in the file. - */ -extern struct rte_eth_dev rte_eth_devices[]; - #endif /* _RTE_ETHDEV_CORE_H_ */ diff --git a/lib/ethdev/version.map b/lib/ethdev/version.map index ff838fef53..24d90c05ea 100644 --- a/lib/ethdev/version.map +++ b/lib/ethdev/version.map @@ -258,6 +258,7 @@ EXPERIMENTAL { rte_eth_get_tx_burst; rte_eth_get_tx_desc_st; rte_eth_get_tx_prep; + rte_eth_macaddrs_get; rte_eth_set_rx_burst; rte_eth_set_rx_desc_st; rte_eth_set_rx_qcnt; diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index 13dfb28401..89c4ca5d40 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/lib/eventdev/rte_event_eth_tx_adapter.c b/lib/eventdev/rte_event_eth_tx_adapter.c index 18c0359db7..1c06c8707c 100644 --- a/lib/eventdev/rte_event_eth_tx_adapter.c +++ b/lib/eventdev/rte_event_eth_tx_adapter.c @@ -3,7 +3,7 @@ */ #include #include -#include +#include #include "eventdev_pmd.h" #include "rte_eventdev_trace.h" diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c index 594dd5e759..ca1552475e 100644 --- a/lib/eventdev/rte_eventdev.c +++ b/lib/eventdev/rte_eventdev.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include #include