@@ -275,6 +275,8 @@ static const struct rte_flow_desc_data rte_flow_desc_action[] = {
MK_FLOW_ACTION(PROG,
sizeof(struct rte_flow_action_prog)),
MK_FLOW_ACTION(NAT64, sizeof(struct rte_flow_action_nat64)),
+ MK_FLOW_ACTION(JUMP_TO_TABLE, sizeof(struct rte_flow_action_jump_to_table)),
+ MK_FLOW_ACTION(JUMP_TO_TABLE_INDEX, sizeof(struct rte_flow_action_jump_to_table_index)),
};
int
@@ -2109,6 +2111,43 @@ rte_flow_async_create_by_index(uint16_t port_id,
user_data, error);
}
+struct rte_flow *
+rte_flow_async_create_by_index_with_pattern(uint16_t port_id,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_template_table *template_table,
+ uint32_t rule_index,
+ const struct rte_flow_item pattern[],
+ uint8_t pattern_template_index,
+ const struct rte_flow_action actions[],
+ uint8_t actions_template_index,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+
+#ifdef RTE_FLOW_DEBUG
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENODEV));
+ return NULL;
+ }
+ if (dev->flow_fp_ops == NULL ||
+ dev->flow_fp_ops->async_create_by_index_with_pattern == NULL) {
+ rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENOSYS));
+ return NULL;
+ }
+#endif
+
+ return dev->flow_fp_ops->async_create_by_index_with_pattern(dev, queue_id,
+ op_attr, template_table,
+ rule_index,
+ pattern, pattern_template_index,
+ actions, actions_template_index,
+ user_data, error);
+}
+
int
rte_flow_async_destroy(uint16_t port_id,
uint32_t queue_id,
@@ -2733,6 +2772,24 @@ rte_flow_dummy_async_create_by_index(struct rte_eth_dev *dev __rte_unused,
return NULL;
}
+static struct rte_flow *
+rte_flow_dummy_async_create_by_index_with_pattern(struct rte_eth_dev *dev __rte_unused,
+ uint32_t queue __rte_unused,
+ const struct rte_flow_op_attr *attr __rte_unused,
+ struct rte_flow_template_table *table __rte_unused,
+ uint32_t rule_index __rte_unused,
+ const struct rte_flow_item items[] __rte_unused,
+ uint8_t pattern_template_index __rte_unused,
+ const struct rte_flow_action actions[] __rte_unused,
+ uint8_t action_template_index __rte_unused,
+ void *user_data __rte_unused,
+ struct rte_flow_error *error)
+{
+ rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENOSYS));
+ return NULL;
+}
+
static int
rte_flow_dummy_async_actions_update(struct rte_eth_dev *dev __rte_unused,
uint32_t queue_id __rte_unused,
@@ -2898,6 +2955,7 @@ rte_flow_dummy_async_action_list_handle_query_update(
struct rte_flow_fp_ops rte_flow_fp_default_ops = {
.async_create = rte_flow_dummy_async_create,
.async_create_by_index = rte_flow_dummy_async_create_by_index,
+ .async_create_by_index_with_pattern = rte_flow_dummy_async_create_by_index_with_pattern,
.async_actions_update = rte_flow_dummy_async_actions_update,
.async_destroy = rte_flow_dummy_async_destroy,
.push = rte_flow_dummy_push,
@@ -3262,6 +3262,24 @@ enum rte_flow_action_type {
* @see struct rte_flow_action_nat64
*/
RTE_FLOW_ACTION_TYPE_NAT64,
+
+ /**
+ * RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE,
+ *
+ * Redirects packets to a particular flow table.
+ *
+ * @see struct rte_flow_action_jump_to_table.
+ */
+ RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE,
+
+ /**
+ * RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX,
+ *
+ * Redirects packets to a particular index in a flow table.
+ *
+ * @see struct rte_flow_action_jump_to_table_index.
+ */
+ RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX,
};
/**
@@ -4266,6 +4284,26 @@ rte_flow_dynf_metadata_set(struct rte_mbuf *m, uint32_t v)
*RTE_FLOW_DYNF_METADATA(m) = v;
}
+/**
+ * RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE
+ *
+ * Redirects packets to a particular flow table.
+ */
+struct rte_flow_action_jump_to_table {
+ struct rte_flow_template_table *table;
+};
+
+/**
+ * RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX
+ *
+ * Redirects packets to a particular flow table.
+ */
+struct rte_flow_action_jump_to_table_index {
+ struct rte_flow_template_table *table;
+ uint32_t index;
+};
+
+
/**
* Definition of a single action.
*
@@ -5898,6 +5936,10 @@ enum rte_flow_table_insertion_type {
* Index-based insertion.
*/
RTE_FLOW_TABLE_INSERTION_TYPE_INDEX,
+ /**
+ * Index-based insertion with pattern.
+ */
+ RTE_FLOW_TABLE_INSERTION_TYPE_INDEX_WITH_PATTERN,
};
/**
@@ -6183,6 +6225,59 @@ rte_flow_async_create_by_index(uint16_t port_id,
void *user_data,
struct rte_flow_error *error);
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue rule creation operation.
+ *
+ * @param port_id
+ * Port identifier of Ethernet device.
+ * @param queue_id
+ * Flow queue used to insert the rule.
+ * @param[in] op_attr
+ * Rule creation operation attributes.
+ * @param[in] template_table
+ * Template table to select templates from.
+ * @param[in] rule_index
+ * Rule index in the table.
+ * Inserting a rule to already occupied index results in undefined behavior.
+ * @param[in] pattern
+ * List of pattern items to be used.
+ * The list order should match the order in the pattern template.
+ * The spec is the only relevant member of the item that is being used.
+ * @param[in] pattern_template_index
+ * Pattern template index in the table.
+ * @param[in] actions
+ * List of actions to be used.
+ * The list order should match the order in the actions template.
+ * @param[in] actions_template_index
+ * Actions template index in the table.
+ * @param[in] user_data
+ * The user data that will be returned on the completion events.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * PMDs initialize this structure in case of error only.
+ *
+ * @return
+ * Handle on success, NULL otherwise and rte_errno is set.
+ * The rule handle doesn't mean that the rule has been populated.
+ * Only completion result indicates that if there was success or failure.
+ */
+__rte_experimental
+struct rte_flow *
+rte_flow_async_create_by_index_with_pattern(uint16_t port_id,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_template_table *template_table,
+ uint32_t rule_index,
+ const struct rte_flow_item pattern[],
+ uint8_t pattern_template_index,
+ const struct rte_flow_action actions[],
+ uint8_t actions_template_index,
+ void *user_data,
+ struct rte_flow_error *error);
+
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
@@ -319,6 +319,19 @@ typedef struct rte_flow *(*rte_flow_async_create_by_index_t)(struct rte_eth_dev
void *user_data,
struct rte_flow_error *error);
+/** @internal Enqueue rule creation by index with pattern operation. */
+typedef struct rte_flow *(*rte_flow_async_create_by_index_with_pattern_t)(struct rte_eth_dev *dev,
+ uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow_template_table *table,
+ uint32_t rule_index,
+ const struct rte_flow_item *items,
+ uint8_t pattern_template_index,
+ const struct rte_flow_action *actions,
+ uint8_t action_template_index,
+ void *user_data,
+ struct rte_flow_error *error);
+
/** @internal Enqueue rule update operation. */
typedef int (*rte_flow_async_actions_update_t)(struct rte_eth_dev *dev,
uint32_t queue_id,
@@ -435,6 +448,7 @@ typedef int (*rte_flow_async_action_list_handle_query_update_t)(
struct __rte_cache_aligned rte_flow_fp_ops {
rte_flow_async_create_t async_create;
rte_flow_async_create_by_index_t async_create_by_index;
+ rte_flow_async_create_by_index_with_pattern_t async_create_by_index_with_pattern;
rte_flow_async_actions_update_t async_actions_update;
rte_flow_async_destroy_t async_destroy;
rte_flow_push_t push;
@@ -325,6 +325,10 @@ EXPERIMENTAL {
rte_flow_template_table_resizable;
rte_flow_template_table_resize;
rte_flow_template_table_resize_complete;
+
+ # added in 24.11
+ rte_flow_async_create_by_index_with_pattern;
+
};
INTERNAL {