@@ -379,9 +379,12 @@ Flow control
Supports configuring link flow control.
* **[implements] eth_dev_ops**: ``flow_ctrl_get``, ``flow_ctrl_set``,
- ``priority_flow_ctrl_set``.
+ ``priority_flow_ctrl_set``, ``priority_flow_ctrl_queue_info_get``,
+ ``priority_flow_ctrl_queue_configure``
* **[related] API**: ``rte_eth_dev_flow_ctrl_get()``, ``rte_eth_dev_flow_ctrl_set()``,
- ``rte_eth_dev_priority_flow_ctrl_set()``.
+ ``rte_eth_dev_priority_flow_ctrl_set()``,
+ ``rte_eth_dev_priority_flow_ctrl_queue_info_get()``,
+ ``rte_eth_dev_priority_flow_ctrl_queue_configure()``.
.. _nic_features_rate_limitation:
@@ -55,6 +55,11 @@ New Features
Also, make sure to start the actual text at the margin.
=======================================================
+* **Added an API to enable queue based priority flow ctrl(PFC).**
+
+ New APIs, ``rte_eth_dev_priority_flow_ctrl_queue_info_get()`` and
+ ``rte_eth_dev_priority_flow_ctrl_queue_configure()``, was added.
+
* **Updated Cisco enic driver.**
* Added rte_flow support for matching GENEVE packets.
@@ -533,6 +533,13 @@ typedef int (*flow_ctrl_set_t)(struct rte_eth_dev *dev,
typedef int (*priority_flow_ctrl_set_t)(struct rte_eth_dev *dev,
struct rte_eth_pfc_conf *pfc_conf);
+/** @internal Get info for queue based PFC on an Ethernet device. */
+typedef int (*priority_flow_ctrl_queue_info_get_t)(struct rte_eth_dev *dev,
+ struct rte_eth_pfc_queue_info *pfc_queue_info);
+/** @internal Configure queue based PFC parameter on an Ethernet device. */
+typedef int (*priority_flow_ctrl_queue_config_t)(struct rte_eth_dev *dev,
+ struct rte_eth_pfc_queue_conf *pfc_queue_conf);
+
/** @internal Update RSS redirection table on an Ethernet device. */
typedef int (*reta_update_t)(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
@@ -1080,6 +1087,10 @@ struct eth_dev_ops {
flow_ctrl_set_t flow_ctrl_set; /**< Setup flow control */
/** Setup priority flow control */
priority_flow_ctrl_set_t priority_flow_ctrl_set;
+ /** Priority flow control queue info get */
+ priority_flow_ctrl_queue_info_get_t priority_flow_ctrl_queue_info_get;
+ /** Priority flow control queue configure */
+ priority_flow_ctrl_queue_config_t priority_flow_ctrl_queue_config;
/** Set Unicast Table Array */
eth_uc_hash_table_set_t uc_hash_table_set;
@@ -4022,6 +4022,149 @@ rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
return -ENOTSUP;
}
+static int
+validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
+ struct rte_eth_pfc_queue_conf *pfc_queue_conf)
+{
+ if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) ||
+ (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
+ if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) {
+ RTE_ETHDEV_LOG(ERR,
+ "PFC Tx queue not in range for Rx pause requested:%d configured:%d\n",
+ pfc_queue_conf->rx_pause.tx_qid,
+ dev_info->nb_tx_queues);
+ return -EINVAL;
+ }
+
+ if (pfc_queue_conf->rx_pause.tc >= tc_max) {
+ RTE_ETHDEV_LOG(ERR,
+ "PFC TC not in range for Rx pause requested:%d max:%d\n",
+ pfc_queue_conf->rx_pause.tc, tc_max);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
+validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
+ struct rte_eth_pfc_queue_conf *pfc_queue_conf)
+{
+ if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) ||
+ (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
+ if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) {
+ RTE_ETHDEV_LOG(ERR,
+ "PFC Rx queue not in range for Tx pause requested:%d configured:%d\n",
+ pfc_queue_conf->tx_pause.rx_qid,
+ dev_info->nb_rx_queues);
+ return -EINVAL;
+ }
+
+ if (pfc_queue_conf->tx_pause.tc >= tc_max) {
+ RTE_ETHDEV_LOG(ERR,
+ "PFC TC not in range for Tx pause requested:%d max:%d\n",
+ pfc_queue_conf->tx_pause.tc, tc_max);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int
+rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id,
+ struct rte_eth_pfc_queue_info *pfc_queue_info)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ if (pfc_queue_info == NULL) {
+ RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ if (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
+ return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
+ (dev, pfc_queue_info));
+ return -ENOTSUP;
+}
+
+int
+rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id,
+ struct rte_eth_pfc_queue_conf *pfc_queue_conf)
+{
+ struct rte_eth_pfc_queue_info pfc_info;
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_dev *dev;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ if (pfc_queue_conf == NULL) {
+ RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ return ret;
+
+ ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info);
+ if (ret != 0)
+ return ret;
+
+ if (pfc_info.tc_max == 0) {
+ RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n",
+ port_id);
+ return -ENOTSUP;
+ }
+
+ /* Check requested mode supported or not */
+ if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE &&
+ pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) {
+ RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE &&
+ pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) {
+ RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ /* Validate Rx pause parameters */
+ if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
+ pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) {
+ ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max,
+ pfc_queue_conf);
+ if (ret != 0)
+ return ret;
+ }
+
+ /* Validate Tx pause parameters */
+ if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
+ pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) {
+ ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max,
+ pfc_queue_conf);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (*dev->dev_ops->priority_flow_ctrl_queue_config)
+ return eth_err(port_id,
+ (*dev->dev_ops->priority_flow_ctrl_queue_config)(
+ dev, pfc_queue_conf));
+ return -ENOTSUP;
+}
+
static int
eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
@@ -1408,6 +1408,59 @@ struct rte_eth_pfc_conf {
uint8_t priority; /**< VLAN User Priority. */
};
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * A structure used to retrieve information of queue based PFC.
+ */
+struct rte_eth_pfc_queue_info {
+ /**
+ * Maximum supported traffic class as per PFC (802.1Qbb) specification.
+ */
+ uint8_t tc_max;
+ /** PFC queue mode capabilities. */
+ enum rte_eth_fc_mode mode_capa;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * A structure used to configure Ethernet priority flow control parameters for
+ * ethdev queues.
+ *
+ * rte_eth_pfc_queue_conf::rx_pause structure shall be used to configure given
+ * tx_qid with corresponding tc. When ethdev device receives PFC frame with
+ * rte_eth_pfc_queue_conf::rx_pause::tc, traffic will be paused on
+ * rte_eth_pfc_queue_conf::rx_pause::tx_qid for that tc.
+ *
+ * rte_eth_pfc_queue_conf::tx_pause structure shall be used to configure given
+ * rx_qid. When rx_qid is congested, PFC frames are generated with
+ * rte_eth_pfc_queue_conf::rx_pause::tc and
+ * rte_eth_pfc_queue_conf::rx_pause::pause_time to the peer.
+ */
+struct rte_eth_pfc_queue_conf {
+ enum rte_eth_fc_mode mode; /**< Link flow control mode */
+
+ struct {
+ uint16_t tx_qid; /**< Tx queue ID */
+ /** Traffic class as per PFC (802.1Qbb) spec. The value must be
+ * in the range [0, rte_eth_pfc_queue_info::tx_max - 1]
+ */
+ uint8_t tc;
+ } rx_pause; /* Valid when (mode == FC_RX_PAUSE || mode == FC_FULL) */
+
+ struct {
+ uint16_t pause_time; /**< Pause quota in the Pause frame */
+ uint16_t rx_qid; /**< Rx queue ID */
+ /** Traffic class as per PFC (802.1Qbb) spec. The value must be
+ * in the range [0, rte_eth_pfc_queue_info::tx_max - 1]
+ */
+ uint8_t tc;
+ } tx_pause; /* Valid when (mode == FC_TX_PAUSE || mode == FC_FULL) */
+};
+
/**
* Tunnel type for device-specific classifier configuration.
* @see rte_eth_udp_tunnel
@@ -4158,6 +4211,55 @@ int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
uint32_t pool);
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve the information for queue based PFC.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param pfc_queue_info
+ * A pointer to a structure of type *rte_eth_pfc_queue_info* to be filled with
+ * the information about queue based PFC.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if support for priority_flow_ctrl_queue_info_get does not exist.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+__rte_experimental
+int rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id,
+ struct rte_eth_pfc_queue_info *pfc_queue_info);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure the queue based priority flow control for a given queue
+ * for Ethernet device.
+ *
+ * @note When an ethdev port switches to queue based PFC mode, the
+ * unconfigured queues shall be configured by the driver with
+ * default values such as lower priority value for TC etc.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param pfc_queue_conf
+ * The pointer to the structure of the priority flow control parameters
+ * for the queue.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support queue based PFC mode.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter
+ * - (-EIO) if flow control setup queue failure
+ */
+__rte_experimental
+int rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id,
+ struct rte_eth_pfc_queue_conf *pfc_queue_conf);
+
+
/**
* Remove a MAC address from the internal array of addresses.
*
@@ -256,6 +256,10 @@ EXPERIMENTAL {
rte_flow_flex_item_create;
rte_flow_flex_item_release;
rte_flow_pick_transfer_proxy;
+
+ # added in 22.03
+ rte_eth_dev_priority_flow_ctrl_queue_configure;
+ rte_eth_dev_priority_flow_ctrl_queue_info_get;
};
INTERNAL {