From patchwork Fri Oct 4 19:54:02 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ori Kam X-Patchwork-Id: 60545 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 1FDAF1C2A3; Fri, 4 Oct 2019 21:55:12 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 0A9EA1C2A2 for ; Fri, 4 Oct 2019 21:55:09 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from orika@mellanox.com) with ESMTPS (AES256-SHA encrypted); 4 Oct 2019 22:55:05 +0300 Received: from pegasus04.mtr.labs.mlnx. (pegasus04.mtr.labs.mlnx [10.210.16.126]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id x94JsxSf006678; Fri, 4 Oct 2019 22:55:05 +0300 From: Ori Kam To: Thomas Monjalon , Ferruh Yigit , Andrew Rybchenko Cc: dev@dpdk.org, orika@mellanox.com, jingjing.wu@intel.com, stephen@networkplumber.org Date: Fri, 4 Oct 2019 19:54:02 +0000 Message-Id: <1570218855-73478-2-git-send-email-orika@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1570218855-73478-1-git-send-email-orika@mellanox.com> References: <1569479349-36962-1-git-send-email-orika@mellanox.com> <1570218855-73478-1-git-send-email-orika@mellanox.com> Subject: [dpdk-dev] [PATCH v2 01/14] ethdev: add support for hairpin queue X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This commit introduce hairpin queue type. The hairpin queue in build from Rx queue binded to Tx queue. It is used to offload traffic coming from the wire and redirect it back to the wire. There are 3 new functions: - rte_eth_dev_hairpin_capability_get - rte_eth_rx_hairpin_queue_setup - rte_eth_tx_hairpin_queue_setup In order to use the queue, there is a need to create rte_flow with queue / RSS action that targets one or more of the Rx queues. Signed-off-by: Ori Kam --- V2: - update according to ML comments. --- lib/librte_ethdev/rte_ethdev.c | 214 ++++++++++++++++++++++++++++++- lib/librte_ethdev/rte_ethdev.h | 126 ++++++++++++++++++ lib/librte_ethdev/rte_ethdev_core.h | 27 +++- lib/librte_ethdev/rte_ethdev_version.map | 5 + 4 files changed, 368 insertions(+), 4 deletions(-) diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c index af82360..ee8af42 100644 --- a/lib/librte_ethdev/rte_ethdev.c +++ b/lib/librte_ethdev/rte_ethdev.c @@ -1752,12 +1752,102 @@ struct rte_eth_dev * if (!dev->data->min_rx_buf_size || dev->data->min_rx_buf_size > mbp_buf_size) dev->data->min_rx_buf_size = mbp_buf_size; + if (dev->data->rx_queue_state[rx_queue_id] == + RTE_ETH_QUEUE_STATE_HAIRPIN) + dev->data->rx_queue_state[rx_queue_id] = + RTE_ETH_QUEUE_STATE_STOPPED; } return eth_err(port_id, ret); } int +rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, + uint16_t nb_rx_desc, + const struct rte_eth_hairpin_conf *conf) +{ + int ret; + struct rte_eth_dev *dev; + struct rte_eth_hairpin_cap cap; + struct rte_eth_dev_info dev_info; + void **rxq; + int i; + int count = 0; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + + dev = &rte_eth_devices[port_id]; + if (rx_queue_id >= dev->data->nb_rx_queues) { + RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id); + return -EINVAL; + } + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, + -ENOTSUP); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup, + -ENOTSUP); + rte_eth_dev_hairpin_capability_get(port_id, &cap); + /* Use default specified by driver, if nb_rx_desc is zero */ + if (nb_rx_desc == 0) + nb_rx_desc = cap.max_nb_desc; + if (nb_rx_desc > cap.max_nb_desc) { + RTE_ETHDEV_LOG(ERR, + "Invalid value for nb_rx_desc(=%hu), should be: " + "<= %hu", nb_rx_desc, cap.max_nb_desc); + return -EINVAL; + } + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; + if (dev->data->dev_started && + !(dev_info.dev_capa & + RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) + return -EBUSY; + if (dev->data->dev_started && + (dev->data->rx_queue_state[rx_queue_id] != + RTE_ETH_QUEUE_STATE_STOPPED)) + return -EBUSY; + if (conf->peer_n > cap.max_rx_2_tx) { + RTE_ETHDEV_LOG(ERR, + "Invalid value for number of peers(=%hu), " + "should be: <= %hu", conf->peer_n, + cap.max_rx_2_tx); + return -EINVAL; + } + if (conf->peer_n == 0) { + RTE_ETHDEV_LOG(ERR, + "Invalid value for number of peers(=%hu), " + "should be: > 0", conf->peer_n); + return -EINVAL; + } + if (cap.max_n_queues != -1) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (dev->data->rx_queue_state[i] == + RTE_ETH_QUEUE_STATE_HAIRPIN) + count++; + } + if (count > cap.max_n_queues) { + RTE_ETHDEV_LOG(ERR, + "To many Rx hairpin queues %d", count); + return -EINVAL; + } + } + rxq = dev->data->rx_queues; + if (rxq[rx_queue_id]) { + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, + -ENOTSUP); + (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]); + rxq[rx_queue_id] = NULL; + } + ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, + nb_rx_desc, conf); + if (!ret) + dev->data->rx_queue_state[rx_queue_id] = + RTE_ETH_QUEUE_STATE_HAIRPIN; + return eth_err(port_id, ret); +} + +int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf) @@ -1851,9 +1941,97 @@ struct rte_eth_dev * __func__); return -EINVAL; } + ret = (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc, + socket_id, &local_conf); + if (!ret) + if (dev->data->tx_queue_state[tx_queue_id] == + RTE_ETH_QUEUE_STATE_HAIRPIN) + dev->data->tx_queue_state[tx_queue_id] = + RTE_ETH_QUEUE_STATE_STOPPED; + return eth_err(port_id, ret); +} + +int +rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, + uint16_t nb_tx_desc, + const struct rte_eth_hairpin_conf *conf) +{ + struct rte_eth_dev *dev; + struct rte_eth_hairpin_cap cap; + struct rte_eth_dev_info dev_info; + void **txq; + int i; + int count = 0; + int ret; - return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, - tx_queue_id, nb_tx_desc, socket_id, &local_conf)); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + dev = &rte_eth_devices[port_id]; + if (tx_queue_id >= dev->data->nb_tx_queues) { + RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id); + return -EINVAL; + } + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, + -ENOTSUP); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup, + -ENOTSUP); + rte_eth_dev_info_get(port_id, &dev_info); + rte_eth_dev_hairpin_capability_get(port_id, &cap); + /* Use default specified by driver, if nb_tx_desc is zero */ + if (nb_tx_desc == 0) + nb_tx_desc = cap.max_nb_desc; + if (nb_tx_desc > cap.max_nb_desc) { + RTE_ETHDEV_LOG(ERR, + "Invalid value for nb_tx_desc(=%hu), should be: " + "<= %hu", nb_tx_desc, cap.max_nb_desc); + return -EINVAL; + } + if (conf->peer_n > cap.max_tx_2_rx) { + RTE_ETHDEV_LOG(ERR, + "Invalid value for number of peers(=%hu), " + "should be: <= %hu", conf->peer_n, + cap.max_tx_2_rx); + return -EINVAL; + } + if (conf->peer_n == 0) { + RTE_ETHDEV_LOG(ERR, + "Invalid value for number of peers(=%hu), " + "should be: > 0", conf->peer_n); + return -EINVAL; + } + if (cap.max_n_queues != -1) { + for (i = 0; i < dev->data->nb_tx_queues; i++) { + if (dev->data->tx_queue_state[i] == + RTE_ETH_QUEUE_STATE_HAIRPIN) + count++; + } + if (count > cap.max_n_queues) { + RTE_ETHDEV_LOG(ERR, + "To many Rx hairpin queues %d", count); + return -EINVAL; + } + } + if (dev->data->dev_started && + !(dev_info.dev_capa & + RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) + return -EBUSY; + if (dev->data->dev_started && + (dev->data->tx_queue_state[tx_queue_id] != + RTE_ETH_QUEUE_STATE_STOPPED)) + return -EBUSY; + txq = dev->data->tx_queues; + if (txq[tx_queue_id]) { + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, + -ENOTSUP); + (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]); + txq[tx_queue_id] = NULL; + } + ret = (*dev->dev_ops->tx_hairpin_queue_setup) + (dev, tx_queue_id, nb_tx_desc, conf); + if (!ret) + dev->data->tx_queue_state[tx_queue_id] = + RTE_ETH_QUEUE_STATE_HAIRPIN; + return eth_err(port_id, ret); } void @@ -3981,12 +4159,20 @@ int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, rte_errno = ENOTSUP; return NULL; #endif + struct rte_eth_dev *dev; + /* check input parameters */ if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { rte_errno = EINVAL; return NULL; } + dev = &rte_eth_devices[port_id]; + if (dev->data->rx_queue_state[queue_id] == + RTE_ETH_QUEUE_STATE_HAIRPIN) { + rte_errno = EINVAL; + return NULL; + } struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); if (cb == NULL) { @@ -4058,6 +4244,8 @@ int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, rte_errno = ENOTSUP; return NULL; #endif + struct rte_eth_dev *dev; + /* check input parameters */ if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { @@ -4065,6 +4253,13 @@ int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, return NULL; } + dev = &rte_eth_devices[port_id]; + if (dev->data->tx_queue_state[queue_id] == + RTE_ETH_QUEUE_STATE_HAIRPIN) { + rte_errno = EINVAL; + return NULL; + } + struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); if (cb == NULL) { @@ -4510,6 +4705,21 @@ int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, } int +rte_eth_dev_hairpin_capability_get(uint16_t port_id, + struct rte_eth_hairpin_cap *cap) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + + dev = &rte_eth_devices[port_id]; + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, + -ENOTSUP); + return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get) + (dev, cap)); +} + +int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) { struct rte_eth_dev *dev; diff --git a/lib/librte_ethdev/rte_ethdev.h b/lib/librte_ethdev/rte_ethdev.h index d937fb4..29dcfea 100644 --- a/lib/librte_ethdev/rte_ethdev.h +++ b/lib/librte_ethdev/rte_ethdev.h @@ -804,6 +804,46 @@ struct rte_eth_txconf { }; /** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * A structure used to return the hairpin capabilities that are supportd. + */ +struct rte_eth_hairpin_cap { + int16_t max_n_queues; + /**< The max number of hairpin queuesi. -1 no limit. */ + int16_t max_rx_2_tx; + /**< Max number of Rx queues to be connected to one Tx queue. */ + int16_t max_tx_2_rx; + /**< Max number of Tx queues to be connected to one Rx queue. */ + uint16_t max_nb_desc; /**< The max num of descriptors. */ +}; + +#define RTE_ETH_MAX_HAIRPIN_PEERS 32 + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * A structure used to hold hairpin peer data. + */ +struct rte_eth_hairpin_peer { + uint16_t port; /**< Peer port. */ + uint16_t queue; /**< Peer queue. */ +}; + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * A structure used to configure hairpin binding. + */ +struct rte_eth_hairpin_conf { + uint16_t peer_n; /**< The number of peers. */ + struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS]; +}; + +/** * A structure contains information about HW descriptor ring limitations. */ struct rte_eth_desc_lim { @@ -1080,6 +1120,8 @@ struct rte_eth_conf { /**< Device supports Rx queue setup after device started*/ #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002 /**< Device supports Tx queue setup after device started*/ +#define RTE_ETH_DEV_CAPA_HAIRPIN_SUPPORT 0x00000004 +/**< Device supports hairpin queues. */ /* * If new Tx offload capabilities are defined, they also must be @@ -1277,6 +1319,7 @@ struct rte_eth_dcb_info { */ #define RTE_ETH_QUEUE_STATE_STOPPED 0 #define RTE_ETH_QUEUE_STATE_STARTED 1 +#define RTE_ETH_QUEUE_STATE_HAIRPIN 2 #define RTE_ETH_ALL RTE_MAX_ETHPORTS @@ -1771,6 +1814,34 @@ int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, struct rte_mempool *mb_pool); /** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Allocate and set up a hairpin receive queue for an Ethernet device. + * + * The function set up the selected queue to be used in hairpin. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param rx_queue_id + * The index of the receive queue to set up. + * The value must be in the range [0, nb_rx_queue - 1] previously supplied + * to rte_eth_dev_configure(). + * @param nb_rx_desc + * The number of receive descriptors to allocate for the receive ring. + * @param conf + * The pointer to the hairpin configuration. + * @return + * - 0: Success, receive queue correctly set up. + * - -EINVAL: Selected Queue can't be configured for hairpin. + * - -ENOMEM: Unable to allocate the resources required for the queue. + */ +__rte_experimental +int rte_eth_rx_hairpin_queue_setup + (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, + const struct rte_eth_hairpin_conf *conf); + +/** * Allocate and set up a transmit queue for an Ethernet device. * * @param port_id @@ -1823,6 +1894,33 @@ int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, const struct rte_eth_txconf *tx_conf); /** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Allocate and set up a transmit hairpin queue for an Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param tx_queue_id + * The index of the transmit queue to set up. + * The value must be in the range [0, nb_tx_queue - 1] previously supplied + * to rte_eth_dev_configure(). + * @param nb_tx_desc + * The number of transmit descriptors to allocate for the transmit ring. + * @param conf + * The hairpin configuration. + * + * @return + * - 0: Success, transmit queue correctly set up. + * - -EINVAL: Selected Queue can't be configured for hairpin. + * - -ENOMEM: Unable to allocate the resources required for the queue. + */ +__rte_experimental +int rte_eth_tx_hairpin_queue_setup + (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, + const struct rte_eth_hairpin_conf *conf); + +/** * Return the NUMA socket to which an Ethernet device is connected * * @param port_id @@ -4037,6 +4135,22 @@ int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, void * rte_eth_dev_get_sec_ctx(uint16_t port_id); +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Query the device hairpin capabilities. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param cap + * Pointer to a structure that will hold the hairpin capabilities. + * @return + * - 0 on success, -ENOTSUP if the device doesn't support hairpin. + */ +__rte_experimental +int rte_eth_dev_hairpin_capability_get(uint16_t port_id, + struct rte_eth_hairpin_cap *cap); #include @@ -4137,6 +4251,12 @@ int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); return 0; } + if (dev->data->rx_queue_state[queue_id] == + RTE_ETH_QUEUE_STATE_HAIRPIN) { + RTE_ETHDEV_LOG(ERR, "RX queue_id=%u is hairpin queue\n", + queue_id); + return 0; + } #endif nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id], rx_pkts, nb_pkts); @@ -4403,6 +4523,12 @@ static inline int rte_eth_tx_descriptor_status(uint16_t port_id, RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); return 0; } + if (dev->data->tx_queue_state[queue_id] == + RTE_ETH_QUEUE_STATE_HAIRPIN) { + RTE_ETHDEV_LOG(ERR, "TX queue_id=%u is hairpin queue\n", + queue_id); + return 0; + } #endif #ifdef RTE_ETHDEV_RXTX_CALLBACKS diff --git a/lib/librte_ethdev/rte_ethdev_core.h b/lib/librte_ethdev/rte_ethdev_core.h index dcb5ae6..ef46e71 100644 --- a/lib/librte_ethdev/rte_ethdev_core.h +++ b/lib/librte_ethdev/rte_ethdev_core.h @@ -250,6 +250,12 @@ typedef int (*eth_rx_queue_setup_t)(struct rte_eth_dev *dev, struct rte_mempool *mb_pool); /**< @internal Set up a receive queue of an Ethernet device. */ +typedef int (*eth_rx_hairpin_queue_setup_t) + (struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, + const struct rte_eth_hairpin_conf *conf); +/**< @internal Set up a receive hairpin queue of an Ethernet device. */ + typedef int (*eth_tx_queue_setup_t)(struct rte_eth_dev *dev, uint16_t tx_queue_id, uint16_t nb_tx_desc, @@ -257,6 +263,12 @@ typedef int (*eth_tx_queue_setup_t)(struct rte_eth_dev *dev, const struct rte_eth_txconf *tx_conf); /**< @internal Setup a transmit queue of an Ethernet device. */ +typedef int (*eth_tx_hairpin_queue_setup_t) + (struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, + const struct rte_eth_hairpin_conf *hairpin_conf); +/**< @internal Setup a transmit hairpin queue of an Ethernet device. */ + typedef int (*eth_rx_enable_intr_t)(struct rte_eth_dev *dev, uint16_t rx_queue_id); /**< @internal Enable interrupt of a receive queue of an Ethernet device. */ @@ -505,6 +517,10 @@ typedef int (*eth_pool_ops_supported_t)(struct rte_eth_dev *dev, const char *pool); /**< @internal Test if a port supports specific mempool ops */ +typedef int (*eth_hairpin_cap_get_t)(struct rte_eth_dev *dev, + struct rte_eth_hairpin_cap *cap); +/**< @internal get the hairpin capabilities. */ + /** * @internal A structure containing the functions exported by an Ethernet driver. */ @@ -557,6 +573,8 @@ struct eth_dev_ops { eth_queue_start_t tx_queue_start;/**< Start TX for a queue. */ eth_queue_stop_t tx_queue_stop; /**< Stop TX for a queue. */ eth_rx_queue_setup_t rx_queue_setup;/**< Set up device RX queue. */ + eth_rx_hairpin_queue_setup_t rx_hairpin_queue_setup; + /**< Set up device RX hairpin queue. */ eth_queue_release_t rx_queue_release; /**< Release RX queue. */ eth_rx_queue_count_t rx_queue_count; /**< Get the number of used RX descriptors. */ @@ -568,6 +586,8 @@ struct eth_dev_ops { eth_rx_enable_intr_t rx_queue_intr_enable; /**< Enable Rx queue interrupt. */ eth_rx_disable_intr_t rx_queue_intr_disable; /**< Disable Rx queue interrupt. */ eth_tx_queue_setup_t tx_queue_setup;/**< Set up device TX queue. */ + eth_tx_hairpin_queue_setup_t tx_hairpin_queue_setup; + /**< Set up device TX hairpin queue. */ eth_queue_release_t tx_queue_release; /**< Release TX queue. */ eth_tx_done_cleanup_t tx_done_cleanup;/**< Free tx ring mbufs */ @@ -639,6 +659,9 @@ struct eth_dev_ops { eth_pool_ops_supported_t pool_ops_supported; /**< Test if a port supports specific mempool ops */ + + eth_hairpin_cap_get_t hairpin_cap_get; + /**< Returns the hairpin capabilities. */ }; /** @@ -746,9 +769,9 @@ struct rte_eth_dev_data { dev_started : 1, /**< Device state: STARTED(1) / STOPPED(0). */ lro : 1; /**< RX LRO is ON(1) / OFF(0) */ uint8_t rx_queue_state[RTE_MAX_QUEUES_PER_PORT]; - /**< Queues state: STARTED(1) / STOPPED(0). */ + /**< Queues state: HAIRPIN(2) / STARTED(1) / STOPPED(0). */ uint8_t tx_queue_state[RTE_MAX_QUEUES_PER_PORT]; - /**< Queues state: STARTED(1) / STOPPED(0). */ + /**< Queues state: HAIRPIN(2) STARTED(1) / STOPPED(0). */ uint32_t dev_flags; /**< Capabilities. */ enum rte_kernel_driver kdrv; /**< Kernel driver passthrough. */ int numa_node; /**< NUMA node connection. */ diff --git a/lib/librte_ethdev/rte_ethdev_version.map b/lib/librte_ethdev/rte_ethdev_version.map index 6df42a4..77b0a86 100644 --- a/lib/librte_ethdev/rte_ethdev_version.map +++ b/lib/librte_ethdev/rte_ethdev_version.map @@ -283,4 +283,9 @@ EXPERIMENTAL { # added in 19.08 rte_eth_read_clock; + + # added in 19.11 + rte_eth_rx_hairpin_queue_setup; + rte_eth_tx_hairpin_queue_setup; + rte_eth_dev_hairpin_capability_get; };