@@ -470,6 +470,23 @@ typedef int (*eth_rx_queue_setup_t)(struct rte_eth_dev *dev,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool);
+/**
+ * @internal Set Rx queue limit watermark.
+ * @see rte_eth_rx_lwm_set()
+ */
+typedef int (*eth_rx_queue_lwm_set_t)(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint8_t lwm);
+
+/**
+ * @internal Query queue limit watermark event.
+ * @see rte_eth_rx_lwm_query()
+ */
+
+typedef int (*eth_rx_queue_lwm_query_t)(struct rte_eth_dev *dev,
+ uint16_t *rx_queue_id,
+ uint8_t *lwm);
+
/** @internal Setup a transmit queue of an Ethernet device. */
typedef int (*eth_tx_queue_setup_t)(struct rte_eth_dev *dev,
uint16_t tx_queue_id,
@@ -1168,6 +1185,11 @@ struct eth_dev_ops {
/** Priority flow control queue configure */
priority_flow_ctrl_queue_config_t priority_flow_ctrl_queue_config;
+ /** Set Rx queue limit watermark. */
+ eth_rx_queue_lwm_set_t rx_queue_lwm_set;
+ /** Query Rx queue limit watermark event. */
+ eth_rx_queue_lwm_query_t rx_queue_lwm_query;
+
/** Set Unicast Table Array */
eth_uc_hash_table_set_t uc_hash_table_set;
/** Set Unicast hash bitmap */
@@ -4424,6 +4424,58 @@ int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
queue_idx, tx_rate));
}
+int rte_eth_rx_lwm_set(uint16_t port_id, uint16_t queue_id,
+ uint8_t lwm)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ return ret;
+
+ if (queue_id > dev_info.max_rx_queues) {
+ RTE_ETHDEV_LOG(ERR,
+ "Set queue LWM: port %u: invalid queue ID=%u.\n",
+ port_id, queue_id);
+ return -EINVAL;
+ }
+
+ if (lwm > 99)
+ return -EINVAL;
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_lwm_set, -ENOTSUP);
+ return eth_err(port_id, (*dev->dev_ops->rx_queue_lwm_set)(dev,
+ queue_id, lwm));
+}
+
+int rte_eth_rx_lwm_query(uint16_t port_id, uint16_t *queue_id,
+ uint8_t *lwm)
+{
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_dev *dev;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ return ret;
+
+ if (queue_id == NULL)
+ return -EINVAL;
+ if (*queue_id >= dev_info.max_rx_queues)
+ *queue_id = 0;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_lwm_query, -ENOTSUP);
+ return eth_err(port_id, (*dev->dev_ops->rx_queue_lwm_query)(dev,
+ queue_id, lwm));
+}
+
RTE_INIT(eth_dev_init_fp_ops)
{
uint32_t i;
@@ -1931,6 +1931,14 @@ struct rte_eth_rxq_info {
uint8_t queue_state; /**< one of RTE_ETH_QUEUE_STATE_*. */
uint16_t nb_desc; /**< configured number of RXDs. */
uint16_t rx_buf_size; /**< hardware receive buffer size. */
+ /**
+ * Per-queue Rx limit watermark defined as percentage of Rx queue
+ * size. If Rx queue receives traffic higher than this percentage,
+ * the event RTE_ETH_EVENT_RX_LWM is triggered.
+ * Value 0 means watermark monitoring is disabled, no event is
+ * triggered.
+ */
+ uint8_t lwm;
} __rte_cache_min_aligned;
/**
@@ -3672,6 +3680,64 @@ int rte_eth_dev_get_vlan_offload(uint16_t port_id);
*/
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Set Rx queue based limit watermark.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The index of the receive queue.
+ * @param lwm
+ * The limit watermark percentage of Rx queue size which describes
+ * the fullness of Rx queue. If the Rx queue fullness is above LWM,
+ * the device will trigger the event RTE_ETH_EVENT_RX_LWM.
+ * [1-99] to set a new LWM.
+ * 0 to disable watermark monitoring.
+ *
+ * @return
+ * - 0 if successful.
+ * - negative if failed.
+ */
+__rte_experimental
+int rte_eth_rx_lwm_set(uint16_t port_id, uint16_t queue_id, uint8_t lwm);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Query Rx queue based limit watermark event.
+ * The function queries all queues in the port circularly until one
+ * pending LWM event is found or no pending LWM event is found.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The API caller sets the starting Rx queue id in the pointer.
+ * If the queue_id is bigger than maximum queue id of the port,
+ * it's rewinded to 0 so that application can keep calling
+ * this function to handle all pending LWM events in the queues
+ * with a simple increment between calls.
+ * If a Rx queue has pending LWM event, the pointer is updated
+ * with this Rx queue id; otherwise this pointer's content is
+ * unchanged.
+ * @param lwm
+ * The pointer to the limit watermark percentage of Rx queue.
+ * If Rx queue with pending LWM event is found, the queue's LWM
+ * percentage is stored in this pointer, otherwise the pointer's
+ * content is unchanged.
+ *
+ * @return
+ * - 1 if a Rx queue with pending LWM event is found.
+ * - 0 if no Rx queue with pending LWM event is found.
+ * - -EINVAL if queue_id is NULL.
+ */
+__rte_experimental
+int rte_eth_rx_lwm_query(uint16_t port_id, uint16_t *queue_id,
+ uint8_t *lwm);
+
typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
void *userdata);
@@ -3877,6 +3943,11 @@ enum rte_eth_event_type {
RTE_ETH_EVENT_DESTROY, /**< port is released */
RTE_ETH_EVENT_IPSEC, /**< IPsec offload related event */
RTE_ETH_EVENT_FLOW_AGED,/**< New aged-out flows is detected */
+ /**
+ * Watermark value is exceeded in a queue.
+ * @see rte_eth_rx_lwm_set()
+ */
+ RTE_ETH_EVENT_RX_LWM,
RTE_ETH_EVENT_MAX /**< max value of this enum */
};
@@ -285,6 +285,8 @@ EXPERIMENTAL {
rte_mtr_color_in_protocol_priority_get;
rte_mtr_color_in_protocol_set;
rte_mtr_meter_vlan_table_update;
+ rte_eth_rx_lwm_set;
+ rte_eth_rx_lwm_query;
};
INTERNAL {