@@ -879,8 +879,11 @@ dlb2_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig)
dlb2_iface_domain_reset(dlb2);
/* Free all dynamically allocated port memory */
- for (i = 0; i < dlb2->num_ports; i++)
+ for (i = 0; i < dlb2->num_ports; i++) {
dlb2_free_qe_mem(&dlb2->ev_ports[i].qm_port);
+ if (!reconfig)
+ memset(&dlb2->ev_ports[i], 0, sizeof(struct dlb2_eventdev_port));
+ }
/* If reconfiguring, mark the device's queues and ports as "previously
* configured." If the user doesn't reconfigure them, the PMD will
@@ -1525,7 +1528,7 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
struct dlb2_hw_dev *handle = &dlb2->qm_instance;
struct dlb2_create_ldb_port_args cfg = { {0} };
int ret;
- struct dlb2_port *qm_port = NULL;
+ struct dlb2_port *qm_port = &ev_port->qm_port;
char mz_name[RTE_MEMZONE_NAMESIZE];
uint32_t qm_port_id;
uint16_t ldb_credit_high_watermark = 0;
@@ -1554,6 +1557,11 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
cfg.cq_depth = rte_align32pow2(dequeue_depth);
cfg.cq_depth_threshold = 1;
+ if (dlb2->version == DLB2_HW_V2_5 && qm_port->enable_inflight_ctrl) {
+ cfg.enable_inflight_ctrl = 1;
+ cfg.inflight_threshold = qm_port->inflight_threshold;
+ }
+
cfg.cq_history_list_size = cfg.cq_depth;
cfg.cos_id = ev_port->cos_id;
@@ -4321,6 +4329,52 @@ dlb2_get_queue_depth(struct dlb2_eventdev *dlb2,
return dlb2_get_ldb_queue_depth(dlb2, queue);
}
+int
+dlb2_set_port_param(struct dlb2_eventdev *dlb2,
+ int port_id,
+ uint64_t param_flags,
+ void *param_val)
+{
+ struct dlb2_port_param *port_param = (struct dlb2_port_param *)param_val;
+ struct dlb2_port *port = &dlb2->ev_ports[port_id].qm_port;
+ struct dlb2_hw_dev *handle = &dlb2->qm_instance;
+ int ret = 0, bit = 0;
+
+ while (param_flags) {
+ uint64_t param = rte_bit_relaxed_test_and_clear64(bit++, ¶m_flags);
+
+ if (!param)
+ continue;
+ switch (param) {
+ case DLB2_FLOW_MIGRATION_THRESHOLD:
+ if (dlb2->version == DLB2_HW_V2_5) {
+ struct dlb2_cq_inflight_ctrl_args args = {0};
+ args.enable = true;
+ args.port_id = port->id;
+ args.threshold = port_param->inflight_threshold;
+
+ if (dlb2->ev_ports[port_id].setup_done)
+ ret = dlb2_iface_set_cq_inflight_ctrl(handle, &args);
+ if (ret < 0) {
+ DLB2_LOG_ERR("dlb2: can not set port parameters\n");
+ return -EINVAL;
+ }
+ port->enable_inflight_ctrl = true;
+ port->inflight_threshold = args.threshold;
+ } else {
+ DLB2_LOG_ERR("dlb2: FLOW_MIGRATION_THRESHOLD is only supported for 2.5 HW\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ DLB2_LOG_ERR("dlb2: Unsupported flag\n");
+ return -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
static bool
dlb2_queue_is_empty(struct dlb2_eventdev *dlb2,
struct dlb2_eventdev_queue *queue)
@@ -77,5 +77,8 @@ int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle,
int (*dlb2_iface_enable_cq_weight)(struct dlb2_hw_dev *handle,
struct dlb2_enable_cq_weight_args *args);
+int (*dlb2_iface_set_cq_inflight_ctrl)(struct dlb2_hw_dev *handle,
+ struct dlb2_cq_inflight_ctrl_args *args);
+
int (*dlb2_iface_set_cos_bw)(struct dlb2_hw_dev *handle,
struct dlb2_set_cos_bw_args *args);
@@ -72,10 +72,12 @@ extern int (*dlb2_iface_get_ldb_queue_depth)(struct dlb2_hw_dev *handle,
extern int (*dlb2_iface_get_dir_queue_depth)(struct dlb2_hw_dev *handle,
struct dlb2_get_dir_queue_depth_args *args);
-
extern int (*dlb2_iface_enable_cq_weight)(struct dlb2_hw_dev *handle,
struct dlb2_enable_cq_weight_args *args);
+extern int (*dlb2_iface_set_cq_inflight_ctrl)(struct dlb2_hw_dev *handle,
+ struct dlb2_cq_inflight_ctrl_args *args);
+
extern int (*dlb2_iface_set_cos_bw)(struct dlb2_hw_dev *handle,
struct dlb2_set_cos_bw_args *args);
@@ -389,6 +389,8 @@ struct dlb2_port {
bool use_avx512;
uint32_t cq_weight;
bool is_producer; /* True if port is of type producer */
+ uint16_t inflight_threshold; /* DLB2.5 HW inflight threshold */
+ bool enable_inflight_ctrl; /*DLB2.5 enable HW inflight control */
};
/* Per-process per-port mmio and memory pointers */
@@ -718,6 +720,9 @@ int dlb2_secondary_eventdev_probe(struct rte_eventdev *dev,
uint32_t dlb2_get_queue_depth(struct dlb2_eventdev *dlb2,
struct dlb2_eventdev_queue *queue);
+int dlb2_set_port_param(struct dlb2_eventdev *dlb2, int port_id,
+ uint64_t flags, void *val);
+
int dlb2_parse_params(const char *params,
const char *name,
struct dlb2_devargs *dlb2_args,
@@ -472,6 +472,8 @@ struct dlb2_create_ldb_port_args {
__u16 cq_history_list_size;
__u8 cos_id;
__u8 cos_strict;
+ __u8 enable_inflight_ctrl;
+ __u16 inflight_threshold;
};
/*
@@ -717,6 +719,28 @@ struct dlb2_enable_cq_weight_args {
__u32 limit;
};
+/*
+ * DLB2_DOMAIN_CMD_SET_CQ_INFLIGHT_CTRL: Set Per-CQ inflight control for
+ * {ATM,UNO,ORD} QEs.
+ *
+ * Input parameters:
+ * - port_id: Load-balanced port ID.
+ * - enable: True if inflight control is enabled. False otherwise
+ * - threshold: Per CQ inflight threshold.
+ *
+ * Output parameters:
+ * - response.status: Detailed error code. In certain cases, such as if the
+ * ioctl request arg is invalid, the driver won't set status.
+ */
+struct dlb2_cq_inflight_ctrl_args {
+ /* Output parameters */
+ struct dlb2_cmd_response response;
+ /* Input parameters */
+ __u32 port_id;
+ __u16 enable;
+ __u16 threshold;
+};
+
/*
* Mapping sizes for memory mapping the consumer queue (CQ) memory space, and
* producer port (PP) MMIO space.
@@ -3238,6 +3238,15 @@
#define DLB2_LSP_CQ_LDB_INFL_LIM_LIMIT_LOC 0
#define DLB2_LSP_CQ_LDB_INFL_LIM_RSVD0_LOC 12
+#define DLB2_LSP_CQ_LDB_INFL_THRESH(x) \
+ (0x90580000 + (x) * 0x1000)
+#define DLB2_LSP_CQ_LDB_INFL_THRESH_RST 0x0
+
+#define DLB2_LSP_CQ_LDB_INFL_THRESH_THRESH 0x00000FFF
+#define DLB2_LSP_CQ_LDB_INFL_THRESH_RSVD0 0xFFFFF000
+#define DLB2_LSP_CQ_LDB_INFL_THRESH_THRESH_LOC 0
+#define DLB2_LSP_CQ_LDB_INFL_THRESH_RSVD0_LOC 12
+
#define DLB2_V2LSP_CQ_LDB_TKN_CNT(x) \
(0xa0580000 + (x) * 0x1000)
#define DLB2_V2_5LSP_CQ_LDB_TKN_CNT(x) \
@@ -3062,10 +3062,14 @@ static void __dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
DLB2_CHP_LDB_CQ_DEPTH(hw->ver, port->id.phys_id),
DLB2_CHP_LDB_CQ_DEPTH_RST);
- if (hw->ver != DLB2_HW_V2)
+ if (hw->ver != DLB2_HW_V2) {
DLB2_CSR_WR(hw,
DLB2_LSP_CFG_CQ_LDB_WU_LIMIT(port->id.phys_id),
DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_RST);
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_CQ_LDB_INFL_THRESH(port->id.phys_id),
+ DLB2_LSP_CQ_LDB_INFL_THRESH_RST);
+ }
DLB2_CSR_WR(hw,
DLB2_LSP_CQ_LDB_INFL_LIM(hw->ver, port->id.phys_id),
@@ -4446,6 +4450,20 @@ static int dlb2_ldb_port_configure_cq(struct dlb2_hw *hw,
reg = 0;
DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id), reg);
+ if (hw->ver == DLB2_HW_V2_5) {
+ reg = 0;
+ DLB2_BITS_SET(reg, args->enable_inflight_ctrl,
+ DLB2_LSP_CFG_CTRL_GENERAL_0_ENAB_IF_THRESH_V2_5);
+ DLB2_CSR_WR(hw, DLB2_V2_5LSP_CFG_CTRL_GENERAL_0, reg);
+
+ if (args->enable_inflight_ctrl) {
+ reg = 0;
+ DLB2_BITS_SET(reg, args->inflight_threshold,
+ DLB2_LSP_CQ_LDB_INFL_THRESH_THRESH);
+ DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_INFL_THRESH(port->id.phys_id), reg);
+ }
+ }
+
return 0;
}
@@ -5464,6 +5482,35 @@ dlb2_get_domain_used_ldb_port(u32 id,
return NULL;
}
+static struct dlb2_ldb_port *
+dlb2_get_domain_ldb_port(u32 id,
+ bool vdev_req,
+ struct dlb2_hw_domain *domain)
+{
+ struct dlb2_list_entry *iter __attribute__((unused));
+ struct dlb2_ldb_port *port;
+ int i;
+
+ if (id >= DLB2_MAX_NUM_LDB_PORTS)
+ return NULL;
+
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+ if ((!vdev_req && port->id.phys_id == id) ||
+ (vdev_req && port->id.virt_id == id))
+ return port;
+ }
+
+ DLB2_DOM_LIST_FOR(domain->avail_ldb_ports[i], port, iter) {
+ if ((!vdev_req && port->id.phys_id == id) ||
+ (vdev_req && port->id.virt_id == id))
+ return port;
+ }
+ }
+
+ return NULL;
+}
+
static void dlb2_ldb_port_change_qid_priority(struct dlb2_hw *hw,
struct dlb2_ldb_port *port,
int slot,
@@ -6816,3 +6863,49 @@ int dlb2_hw_set_cos_bandwidth(struct dlb2_hw *hw, u32 cos_id, u8 bandwidth)
return 0;
}
+
+int dlb2_hw_set_cq_inflight_ctrl(struct dlb2_hw *hw, u32 domain_id,
+ struct dlb2_cq_inflight_ctrl_args *args,
+ struct dlb2_cmd_response *resp,
+ bool vdev_req,
+ unsigned int vdev_id)
+{
+ struct dlb2_hw_domain *domain;
+ struct dlb2_ldb_port *port;
+ u32 reg = 0;
+ int id;
+
+ domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
+ if (!domain) {
+ DLB2_HW_ERR(hw,
+ "[%s():%d] Internal error: domain not found\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ id = args->port_id;
+
+ port = dlb2_get_domain_ldb_port(id, vdev_req, domain);
+ if (!port) {
+ DLB2_HW_ERR(hw,
+ "[%s():%d] Internal error: port not found\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ DLB2_BITS_SET(reg, args->enable,
+ DLB2_LSP_CFG_CTRL_GENERAL_0_ENAB_IF_THRESH_V2_5);
+ DLB2_CSR_WR(hw, DLB2_V2_5LSP_CFG_CTRL_GENERAL_0, reg);
+
+ if (args->enable) {
+ reg = 0;
+ DLB2_BITS_SET(reg, args->threshold,
+ DLB2_LSP_CQ_LDB_INFL_THRESH_THRESH);
+ DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_INFL_THRESH(port->id.phys_id),
+ reg);
+ }
+
+ resp->status = 0;
+
+ return 0;
+}
@@ -1956,4 +1956,23 @@ int dlb2_hw_enable_cq_weight(struct dlb2_hw *hw,
bool vdev_request,
unsigned int vdev_id);
+/**
+ * This function configures the inflight control threshold for a cq.
+ *
+ * This must be called after creating the port.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
+ * assigned a detailed error code from enum dlb2_error. If successful, resp->id
+ * contains the queue ID.
+ *
+ * Errors:
+ * EINVAL - The domain or port is not configured.
+ */
+int dlb2_hw_set_cq_inflight_ctrl(struct dlb2_hw *hw, u32 domain_id,
+ struct dlb2_cq_inflight_ctrl_args *args,
+ struct dlb2_cmd_response *resp,
+ bool vdev_req,
+ unsigned int vdev_id);
+
#endif /* __DLB2_RESOURCE_H */
@@ -665,6 +665,26 @@ dlb2_pf_set_cos_bandwidth(struct dlb2_hw_dev *handle,
return ret;
}
+static int
+dlb2_pf_set_cq_inflight_ctrl(struct dlb2_hw_dev *handle,
+ struct dlb2_cq_inflight_ctrl_args *args)
+{
+ struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
+ struct dlb2_cmd_response response = {0};
+ int ret = 0;
+
+ DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
+
+ ret = dlb2_hw_set_cq_inflight_ctrl(&dlb2_dev->hw, handle->domain_id,
+ args, &response, false, 0);
+ args->response = response;
+
+ DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
+ __func__, ret);
+
+ return ret;
+}
+
static void
dlb2_pf_iface_fn_ptrs_init(void)
{
@@ -691,6 +711,7 @@ dlb2_pf_iface_fn_ptrs_init(void)
dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy;
dlb2_iface_enable_cq_weight = dlb2_pf_enable_cq_weight;
dlb2_iface_set_cos_bw = dlb2_pf_set_cos_bandwidth;
+ dlb2_iface_set_cq_inflight_ctrl = dlb2_pf_set_cq_inflight_ctrl;
}
/* PCI DEV HOOKS */
@@ -33,7 +33,36 @@ rte_pmd_dlb2_set_token_pop_mode(uint8_t dev_id,
if (port_id >= dlb2->num_ports || dlb2->ev_ports[port_id].setup_done)
return -EINVAL;
+ if (dlb2->version == DLB2_HW_V2_5 && mode == DELAYED_POP) {
+ dlb2->ev_ports[port_id].qm_port.enable_inflight_ctrl = true;
+ dlb2->ev_ports[port_id].qm_port.inflight_threshold = 1;
+ mode = AUTO_POP;
+ }
+
dlb2->ev_ports[port_id].qm_port.token_pop_mode = mode;
return 0;
}
+
+int
+rte_pmd_dlb2_set_port_param(uint8_t dev_id,
+ uint8_t port_id,
+ uint64_t flags,
+ void *val)
+{
+ struct dlb2_eventdev *dlb2;
+ struct rte_eventdev *dev;
+
+ if (val == NULL)
+ return -EINVAL;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ dlb2 = dlb2_pmd_priv(dev);
+
+ if (port_id >= dlb2->num_ports)
+ return -EINVAL;
+
+ return dlb2_set_port_param(dlb2, port_id, flags, val);
+}
@@ -67,6 +67,46 @@ rte_pmd_dlb2_set_token_pop_mode(uint8_t dev_id,
uint8_t port_id,
enum dlb2_token_pop_mode mode);
+/** Set inflight threshold for flow migration */
+#define DLB2_FLOW_MIGRATION_THRESHOLD RTE_BIT64(0)
+
+/** Set port history list */
+#define DLB2_SET_PORT_HL RTE_BIT64(1)
+
+struct dlb2_port_param {
+ uint16_t inflight_threshold : 12;
+};
+
+/*!
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * Configure various port parameters.
+ * AUTO_POP. This function must be called before calling rte_event_port_setup()
+ * for the port, but after calling rte_event_dev_configure().
+ *
+ * @param dev_id
+ * The identifier of the event device.
+ * @param port_id
+ * The identifier of the event port.
+ * @param flags
+ * Bitmask of the parameters being set.
+ * @param val
+ * Structure coantaining the values of parameters being set.
+ *
+ * @return
+ * - 0: Success
+ * - EINVAL: Invalid dev_id, port_id, or mode
+ * - EINVAL: The DLB2 is not configured, is already running, or the port is
+ * already setup
+ */
+__rte_experimental
+int
+rte_pmd_dlb2_set_port_param(uint8_t dev_id,
+ uint8_t port_id,
+ uint64_t flags,
+ void *val);
+
#ifdef __cplusplus
}
#endif
@@ -5,6 +5,9 @@ DPDK_24 {
EXPERIMENTAL {
global:
+ # added in 24.07
+ rte_pmd_dlb2_set_port_param;
+
# added in 20.11
rte_pmd_dlb2_set_token_pop_mode;
};