@@ -134,6 +134,7 @@ enum index {
/* Queue arguments. */
QUEUE_CREATE,
QUEUE_DESTROY,
+ QUEUE_FLOW_UPDATE_RESIZED,
QUEUE_UPDATE,
QUEUE_AGED,
QUEUE_INDIRECT_ACTION,
@@ -191,8 +192,12 @@ enum index {
/* Table arguments. */
TABLE_CREATE,
TABLE_DESTROY,
+ TABLE_RESIZE,
+ TABLE_RESIZE_COMPLETE,
TABLE_CREATE_ID,
TABLE_DESTROY_ID,
+ TABLE_RESIZE_ID,
+ TABLE_RESIZE_RULES_NUMBER,
TABLE_INSERTION_TYPE,
TABLE_INSERTION_TYPE_NAME,
TABLE_HASH_FUNC,
@@ -204,6 +209,7 @@ enum index {
TABLE_TRANSFER,
TABLE_TRANSFER_WIRE_ORIG,
TABLE_TRANSFER_VPORT_ORIG,
+ TABLE_RESIZABLE,
TABLE_RULES_NUMBER,
TABLE_PATTERN_TEMPLATE,
TABLE_ACTIONS_TEMPLATE,
@@ -1323,6 +1329,8 @@ static const enum index next_group_attr[] = {
static const enum index next_table_subcmd[] = {
TABLE_CREATE,
TABLE_DESTROY,
+ TABLE_RESIZE,
+ TABLE_RESIZE_COMPLETE,
ZERO,
};
@@ -1337,6 +1345,7 @@ static const enum index next_table_attr[] = {
TABLE_TRANSFER,
TABLE_TRANSFER_WIRE_ORIG,
TABLE_TRANSFER_VPORT_ORIG,
+ TABLE_RESIZABLE,
TABLE_RULES_NUMBER,
TABLE_PATTERN_TEMPLATE,
TABLE_ACTIONS_TEMPLATE,
@@ -1353,6 +1362,7 @@ static const enum index next_table_destroy_attr[] = {
static const enum index next_queue_subcmd[] = {
QUEUE_CREATE,
QUEUE_DESTROY,
+ QUEUE_FLOW_UPDATE_RESIZED,
QUEUE_UPDATE,
QUEUE_AGED,
QUEUE_INDIRECT_ACTION,
@@ -3344,6 +3354,19 @@ static const struct token token_list[] = {
.args = ARGS(ARGS_ENTRY(struct buffer, port)),
.call = parse_table_destroy,
},
+ [TABLE_RESIZE] = {
+ .name = "resize",
+ .help = "resize template table",
+ .next = NEXT(NEXT_ENTRY(TABLE_RESIZE_ID)),
+ .call = parse_table
+ },
+ [TABLE_RESIZE_COMPLETE] = {
+ .name = "resize_complete",
+ .help = "complete table resize",
+ .next = NEXT(NEXT_ENTRY(TABLE_DESTROY_ID)),
+ .args = ARGS(ARGS_ENTRY(struct buffer, port)),
+ .call = parse_table_destroy,
+ },
/* Table arguments. */
[TABLE_CREATE_ID] = {
.name = "table_id",
@@ -3354,13 +3377,29 @@ static const struct token token_list[] = {
},
[TABLE_DESTROY_ID] = {
.name = "table",
- .help = "specify table id to destroy",
+ .help = "table id",
.next = NEXT(next_table_destroy_attr,
NEXT_ENTRY(COMMON_TABLE_ID)),
.args = ARGS(ARGS_ENTRY_PTR(struct buffer,
args.table_destroy.table_id)),
.call = parse_table_destroy,
},
+ [TABLE_RESIZE_ID] = {
+ .name = "table_resize_id",
+ .help = "table resize id",
+ .next = NEXT(NEXT_ENTRY(TABLE_RESIZE_RULES_NUMBER),
+ NEXT_ENTRY(COMMON_TABLE_ID)),
+ .args = ARGS(ARGS_ENTRY(struct buffer, args.table.id)),
+ .call = parse_table
+ },
+ [TABLE_RESIZE_RULES_NUMBER] = {
+ .name = "table_resize_rules_num",
+ .help = "table resize rules number",
+ .next = NEXT(NEXT_ENTRY(END), NEXT_ENTRY(COMMON_UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY(struct buffer,
+ args.table.attr.nb_flows)),
+ .call = parse_table
+ },
[TABLE_INSERTION_TYPE] = {
.name = "insertion_type",
.help = "specify insertion type",
@@ -3433,6 +3472,12 @@ static const struct token token_list[] = {
.next = NEXT(next_table_attr),
.call = parse_table,
},
+ [TABLE_RESIZABLE] = {
+ .name = "resizable",
+ .help = "set resizable attribute",
+ .next = NEXT(next_table_attr),
+ .call = parse_table,
+ },
[TABLE_RULES_NUMBER] = {
.name = "rules_number",
.help = "number of rules in table",
@@ -3525,6 +3570,14 @@ static const struct token token_list[] = {
.args = ARGS(ARGS_ENTRY(struct buffer, queue)),
.call = parse_qo_destroy,
},
+ [QUEUE_FLOW_UPDATE_RESIZED] = {
+ .name = "update_resized",
+ .help = "update a flow after table resize",
+ .next = NEXT(NEXT_ENTRY(QUEUE_DESTROY_ID),
+ NEXT_ENTRY(COMMON_QUEUE_ID)),
+ .args = ARGS(ARGS_ENTRY(struct buffer, queue)),
+ .call = parse_qo_destroy,
+ },
[QUEUE_UPDATE] = {
.name = "update",
.help = "update a flow rule",
@@ -10334,6 +10387,7 @@ parse_table(struct context *ctx, const struct token *token,
}
switch (ctx->curr) {
case TABLE_CREATE:
+ case TABLE_RESIZE:
out->command = ctx->curr;
ctx->objdata = 0;
ctx->object = out;
@@ -10378,18 +10432,25 @@ parse_table(struct context *ctx, const struct token *token,
case TABLE_TRANSFER_WIRE_ORIG:
if (!out->args.table.attr.flow_attr.transfer)
return -1;
- out->args.table.attr.specialize = RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG;
+ out->args.table.attr.specialize |= RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG;
return len;
case TABLE_TRANSFER_VPORT_ORIG:
if (!out->args.table.attr.flow_attr.transfer)
return -1;
- out->args.table.attr.specialize = RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG;
+ out->args.table.attr.specialize |= RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG;
+ return len;
+ case TABLE_RESIZABLE:
+ out->args.table.attr.specialize |=
+ RTE_FLOW_TABLE_SPECIALIZE_RESIZABLE_TABLE;
return len;
case TABLE_RULES_NUMBER:
ctx->objdata = 0;
ctx->object = out;
ctx->objmask = NULL;
return len;
+ case TABLE_RESIZE_ID:
+ case TABLE_RESIZE_RULES_NUMBER:
+ return len;
default:
return -1;
}
@@ -10411,7 +10472,8 @@ parse_table_destroy(struct context *ctx, const struct token *token,
if (!out)
return len;
if (!out->command || out->command == TABLE) {
- if (ctx->curr != TABLE_DESTROY)
+ if (ctx->curr != TABLE_DESTROY &&
+ ctx->curr != TABLE_RESIZE_COMPLETE)
return -1;
if (sizeof(*out) > size)
return -1;
@@ -10513,7 +10575,8 @@ parse_qo_destroy(struct context *ctx, const struct token *token,
if (!out)
return len;
if (!out->command || out->command == QUEUE) {
- if (ctx->curr != QUEUE_DESTROY)
+ if (ctx->curr != QUEUE_DESTROY &&
+ ctx->curr != QUEUE_FLOW_UPDATE_RESIZED)
return -1;
if (sizeof(*out) > size)
return -1;
@@ -12569,10 +12632,18 @@ cmd_flow_parsed(const struct buffer *in)
in->args.table_destroy.table_id_n,
in->args.table_destroy.table_id);
break;
+ case TABLE_RESIZE_COMPLETE:
+ port_flow_template_table_resize_complete
+ (in->port, in->args.table_destroy.table_id[0]);
+ break;
case GROUP_SET_MISS_ACTIONS:
port_queue_group_set_miss_actions(in->port, &in->args.vc.attr,
in->args.vc.actions);
break;
+ case TABLE_RESIZE:
+ port_flow_template_table_resize(in->port, in->args.table.id,
+ in->args.table.attr.nb_flows);
+ break;
case QUEUE_CREATE:
port_queue_flow_create(in->port, in->queue, in->postpone,
in->args.vc.table_id, in->args.vc.rule_id,
@@ -12584,6 +12655,11 @@ cmd_flow_parsed(const struct buffer *in)
in->args.destroy.rule_n,
in->args.destroy.rule);
break;
+ case QUEUE_FLOW_UPDATE_RESIZED:
+ port_queue_flow_update_resized(in->port, in->queue,
+ in->postpone,
+ in->args.destroy.rule[0]);
+ break;
case QUEUE_UPDATE:
port_queue_flow_update(in->port, in->queue, in->postpone,
in->args.vc.rule_id, in->args.vc.act_templ_id,
@@ -1403,6 +1403,19 @@ port_flow_new(const struct rte_flow_attr *attr,
return NULL;
}
+static struct port_flow *
+port_flow_locate(struct port_flow *flows_list, uint32_t flow_id)
+{
+ struct port_flow *pf = flows_list;
+
+ while (pf) {
+ if (pf->id == flow_id)
+ break;
+ pf = pf->next;
+ }
+ return pf;
+}
+
/** Print a message out of a flow error. */
static int
port_flow_complain(struct rte_flow_error *error)
@@ -1693,6 +1706,19 @@ table_alloc(uint32_t id, struct port_table **table,
return 0;
}
+static struct port_table *
+port_table_locate(struct port_table *tables_list, uint32_t table_id)
+{
+ struct port_table *pt = tables_list;
+
+ while (pt) {
+ if (pt->id == table_id)
+ break;
+ pt = pt->next;
+ }
+ return pt;
+}
+
/** Get info about flow management resources. */
int
port_flow_get_info(portid_t port_id)
@@ -2665,6 +2691,46 @@ port_flow_template_table_destroy(portid_t port_id,
return ret;
}
+int
+port_flow_template_table_resize_complete(portid_t port_id, uint32_t table_id)
+{
+ struct rte_port *port;
+ struct port_table *pt;
+ struct rte_flow_error error = { 0, };
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return -EINVAL;
+ port = &ports[port_id];
+ pt = port_table_locate(port->table_list, table_id);
+ if (!pt)
+ return -EINVAL;
+ ret = rte_flow_template_table_resize_complete(port_id,
+ pt->table, &error);
+ return !ret ? 0 : port_flow_complain(&error);
+}
+
+int
+port_flow_template_table_resize(portid_t port_id,
+ uint32_t table_id, uint32_t flows_num)
+{
+ struct rte_port *port;
+ struct port_table *pt;
+ struct rte_flow_error error = { 0, };
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return -EINVAL;
+ port = &ports[port_id];
+ pt = port_table_locate(port->table_list, table_id);
+ if (!pt)
+ return -EINVAL;
+ ret = rte_flow_template_table_resize(port_id, pt->table, flows_num, &error);
+ if (ret)
+ return port_flow_complain(&error);
+ return 0;
+}
+
/** Flush table */
int
port_flow_template_table_flush(portid_t port_id)
@@ -2805,6 +2871,42 @@ port_queue_flow_create(portid_t port_id, queueid_t queue_id,
return 0;
}
+int
+port_queue_flow_update_resized(portid_t port_id, queueid_t queue_id,
+ bool postpone, uint32_t flow_id)
+{
+ const struct rte_flow_op_attr op_attr = { .postpone = postpone };
+ struct rte_flow_error error = { 0, };
+ struct port_flow *pf;
+ struct rte_port *port;
+ struct queue_job *job;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN) ||
+ port_id == (portid_t)RTE_PORT_ALL)
+ return -EINVAL;
+ port = &ports[port_id];
+ if (queue_id >= port->queue_nb) {
+ printf("Queue #%u is invalid\n", queue_id);
+ return -EINVAL;
+ }
+ pf = port_flow_locate(port->flow_list, flow_id);
+ if (!pf)
+ return -EINVAL;
+ job = calloc(1, sizeof(*job));
+ if (!job)
+ return -ENOMEM;
+ job->type = QUEUE_JOB_TYPE_FLOW_TRANSFER;
+ job->pf = pf;
+ ret = rte_flow_async_update_resized(port_id, queue_id, &op_attr,
+ pf->flow, job, &error);
+ if (ret) {
+ free(job);
+ return port_flow_complain(&error);
+ }
+ return 0;
+}
+
/** Enqueue number of destroy flow rules operations. */
int
port_queue_flow_destroy(portid_t port_id, queueid_t queue_id,
@@ -110,6 +110,7 @@ enum {
enum {
QUEUE_JOB_TYPE_FLOW_CREATE,
QUEUE_JOB_TYPE_FLOW_DESTROY,
+ QUEUE_JOB_TYPE_FLOW_TRANSFER,
QUEUE_JOB_TYPE_FLOW_UPDATE,
QUEUE_JOB_TYPE_ACTION_CREATE,
QUEUE_JOB_TYPE_ACTION_DESTROY,
@@ -981,7 +982,12 @@ int port_flow_template_table_create(portid_t port_id, uint32_t id,
uint32_t nb_actions_templates, uint32_t *actions_templates);
int port_flow_template_table_destroy(portid_t port_id,
uint32_t n, const uint32_t *table);
+int port_queue_flow_update_resized(portid_t port_id, queueid_t queue_id,
+ bool postpone, uint32_t flow_id);
int port_flow_template_table_flush(portid_t port_id);
+int port_flow_template_table_resize_complete(portid_t port_id, uint32_t table_id);
+int port_flow_template_table_resize(portid_t port_id,
+ uint32_t table_id, uint32_t flows_num);
int port_queue_group_set_miss_actions(portid_t port_id, const struct rte_flow_attr *attr,
const struct rte_flow_action *actions);
int port_queue_flow_create(portid_t port_id, queueid_t queue_id,
@@ -55,6 +55,8 @@ New Features
Also, make sure to start the actual text at the margin.
=======================================================
+* **Added support for template API table resize.**
+
Removed Items
-------------
@@ -2984,12 +2984,21 @@ following sections.
- Create a table::
flow table {port_id} create
- [table_id {id}]
+ [table_id {id}] [resizable]
[group {group_id}] [priority {level}] [ingress] [egress] [transfer]
rules_number {number}
pattern_template {pattern_template_id}
actions_template {actions_template_id}
+- Resize a table::
+
+ flow template_table {port_id} resize
+ table_resize_id {id} table_resize_rules_num {number}
+
+- Complete table resize::
+
+ flow template_table {port_id} resize_complete table {table_id}
+
- Destroy a table::
flow table {port_id} destroy table {id} [...]
@@ -3010,6 +3019,10 @@ following sections.
pattern {item} [/ {item} [...]] / end
actions {action} [/ {action} [...]] / end
+- Enqueue flow update following table resize::
+
+ flow queue {port_id} update_resized {table_id} rule {rule_id}
+
- Enqueue destruction of specific flow rules::
flow queue {port_id} destroy {queue_id}
@@ -2572,6 +2572,39 @@ RTE_TRACE_POINT_FP(
rte_trace_point_emit_ptr(user_data);
rte_trace_point_emit_int(ret);
)
+
+RTE_TRACE_POINT_FP(
+ rte_flow_trace_template_table_resize,
+ RTE_TRACE_POINT_ARGS(uint16_t port_id,
+ struct rte_flow_template_table *table,
+ uint32_t nb_rules, int ret),
+ rte_trace_point_emit_u16(port_id);
+ rte_trace_point_emit_ptr(table);
+ rte_trace_point_emit_u32(nb_rules);
+ rte_trace_point_emit_int(ret);
+)
+
+RTE_TRACE_POINT_FP(
+ rte_flow_trace_async_update_resized,
+ RTE_TRACE_POINT_ARGS(uint16_t port_id, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow *rule, void *user_data, int ret),
+ rte_trace_point_emit_u16(port_id);
+ rte_trace_point_emit_u32(queue);
+ rte_trace_point_emit_ptr(attr);
+ rte_trace_point_emit_ptr(rule);
+ rte_trace_point_emit_ptr(user_data);
+ rte_trace_point_emit_int(ret);
+)
+
+RTE_TRACE_POINT_FP(
+ rte_flow_trace_table_resize_complete,
+ RTE_TRACE_POINT_ARGS(uint16_t port_id,
+ struct rte_flow_template_table *table, int ret),
+ rte_trace_point_emit_u16(port_id);
+ rte_trace_point_emit_ptr(table);
+ rte_trace_point_emit_int(ret);
+)
#ifdef __cplusplus
}
#endif
@@ -774,3 +774,12 @@ RTE_TRACE_POINT_REGISTER(rte_flow_trace_async_action_list_handle_destroy,
RTE_TRACE_POINT_REGISTER(rte_flow_trace_async_action_list_handle_query_update,
lib.ethdev.flow.async_action_list_handle_query_update)
+
+RTE_TRACE_POINT_REGISTER(rte_flow_trace_template_table_resize,
+ lib.ethdev.flow.template_table_resize)
+
+RTE_TRACE_POINT_REGISTER(rte_flow_trace_async_update_resized,
+ lib.ethdev.flow.async_update_resized)
+
+RTE_TRACE_POINT_REGISTER(rte_flow_trace_table_resize_complete,
+ lib.ethdev.flow.table_resize_complete)
@@ -2481,3 +2481,72 @@ rte_flow_calc_table_hash(uint16_t port_id, const struct rte_flow_template_table
hash, error);
return flow_err(port_id, ret, error);
}
+
+int
+rte_flow_template_table_resize(uint16_t port_id,
+ struct rte_flow_template_table *table,
+ uint32_t nb_rules,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_eth_dev *dev;
+ const struct rte_flow_ops *ops;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ ops = rte_flow_ops_get(port_id, error);
+ if (!ops || !ops->flow_template_table_resize)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "flow_template_table_resize not supported");
+ dev = &rte_eth_devices[port_id];
+ ret = ops->flow_template_table_resize(dev, table, nb_rules, error);
+ ret = flow_err(port_id, ret, error);
+ rte_flow_trace_template_table_resize(port_id, table, nb_rules, ret);
+ return ret;
+}
+
+int
+rte_flow_async_update_resized(uint16_t port_id, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow *rule, void *user_data,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_eth_dev *dev;
+ const struct rte_flow_ops *ops;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ ops = rte_flow_ops_get(port_id, error);
+ if (!ops || !ops->flow_update_resized)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "async_flow_async_transfer not supported");
+ dev = &rte_eth_devices[port_id];
+ ret = ops->flow_update_resized(dev, queue, attr, rule, user_data, error);
+ ret = flow_err(port_id, ret, error);
+ rte_flow_trace_async_update_resized(port_id, queue, attr,
+ rule, user_data, ret);
+ return ret;
+}
+
+int
+rte_flow_template_table_resize_complete(uint16_t port_id,
+ struct rte_flow_template_table *table,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_eth_dev *dev;
+ const struct rte_flow_ops *ops;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ ops = rte_flow_ops_get(port_id, error);
+ if (!ops || !ops->flow_template_table_resize_complete)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "flow_template_table_transfer_complete not supported");
+ dev = &rte_eth_devices[port_id];
+ ret = ops->flow_template_table_resize_complete(dev, table, error);
+ ret = flow_err(port_id, ret, error);
+ rte_flow_trace_table_resize_complete(port_id, table, ret);
+ return ret;
+}
@@ -5746,6 +5746,10 @@ struct rte_flow_template_table;
* if the hint is supported.
*/
#define RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG RTE_BIT32(1)
+/**
+ * Specialize table for resize.
+ */
+#define RTE_FLOW_TABLE_SPECIALIZE_RESIZABLE_TABLE RTE_BIT32(2)
/**@}*/
/**
@@ -5824,6 +5828,28 @@ struct rte_flow_template_table_attr {
enum rte_flow_table_hash_func hash_func;
};
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Query if a table can be resized
+ *
+ * @param port_id
+ * Port identifier of Ethernet device.
+ * @param tbl_attr
+ * Template table
+ *
+ * @return
+ * True if the table can be resized.
+ */
+static __rte_always_inline bool
+rte_flow_table_resizable(__rte_unused uint16_t port_id,
+ const struct rte_flow_template_table_attr *tbl_attr)
+{
+ return (tbl_attr->specialize &
+ RTE_FLOW_TABLE_SPECIALIZE_RESIZABLE_TABLE) != 0;
+}
+
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
@@ -6750,6 +6776,94 @@ rte_flow_calc_table_hash(uint16_t port_id, const struct rte_flow_template_table
const struct rte_flow_item pattern[], uint8_t pattern_template_index,
uint32_t *hash, struct rte_flow_error *error);
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Change template table flow rules capacity.
+ * PMD implementation must support table change to the new size.
+ *
+ * @param port_id
+ * Port identifier of Ethernet device.
+ * @param table
+ * Template table to modify.
+ * @param nb_rules
+ * New flow rules capacity.
+ * @param error
+ * Perform verbose error reporting if not NULL.
+ * PMDs initialize this structure in case of error only.
+ *
+ * @return
+ * - (0) if success.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-ENOTSUP) if underlying device does not support this functionality.
+ * - (-EINVAL) if *table* cannot be resized or resize to *nb_rules*
+ * is not supported in PMD.
+ */
+__rte_experimental
+int
+rte_flow_template_table_resize(uint16_t port_id,
+ struct rte_flow_template_table *table,
+ uint32_t nb_rules,
+ struct rte_flow_error *error);
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Following table resize, update flow resources in port.
+ *
+ * @param port_id
+ * Port identifier of Ethernet device.
+ * @param queue
+ * Flow queue for async operation.
+ * @param attr
+ * Async operation attributes.
+ * @param rule
+ * Flow rule to update.
+ * @param user_data
+ * The user data that will be returned on async completion event.
+ * @param error
+ * Perform verbose error reporting if not NULL.
+ * PMDs initialize this structure in case of error only.
+ *
+ * @return
+ * - (0) if success.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-ENOTSUP) if underlying device does not support this functionality.
+ * - (-EINVAL) if *rule* cannot be updated.
+ */
+__rte_experimental
+int
+rte_flow_async_update_resized(uint16_t port_id, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow *rule, void *user_data,
+ struct rte_flow_error *error);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Following table resize, notify port that all table flows were updated.
+ *
+ * @param port_id
+ * Port identifier of Ethernet device.
+ * @param table
+ * Template table that undergoing resize operation.
+ * @param error
+ * Perform verbose error reporting if not NULL.
+ * PMDs initialize this structure in case of error only.
+ *
+ * @return
+ * - (0) if success.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-ENOTSUP) if underlying device does not support this functionality.
+ * - (-EINVAL) PMD cannot complete table resize.
+ */
+__rte_experimental
+int
+rte_flow_template_table_resize_complete(uint16_t port_id,
+ struct rte_flow_template_table *table,
+ struct rte_flow_error *error);
#ifdef __cplusplus
}
#endif
@@ -370,6 +370,21 @@ struct rte_flow_ops {
(struct rte_eth_dev *dev, const struct rte_flow_template_table *table,
const struct rte_flow_item pattern[], uint8_t pattern_template_index,
uint32_t *hash, struct rte_flow_error *error);
+ /** @see rte_flow_template_table_resize() */
+ int (*flow_template_table_resize)(struct rte_eth_dev *dev,
+ struct rte_flow_template_table *table,
+ uint32_t nb_rules,
+ struct rte_flow_error *error);
+ /** @see rte_flow_async_update_resized() */
+ int (*flow_update_resized)(struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow *rule, void *user_data,
+ struct rte_flow_error *error);
+ /** @see rte_flow_template_table_resize_complete() */
+ int (*flow_template_table_resize_complete)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_template_table *table,
+ struct rte_flow_error *error);
};
/**
@@ -316,6 +316,11 @@ EXPERIMENTAL {
rte_eth_recycle_rx_queue_info_get;
rte_flow_group_set_miss_actions;
rte_flow_calc_table_hash;
+
+ # added in 24.03
+ rte_flow_template_table_resize;
+ rte_flow_async_update_resized;
+ rte_flow_template_table_resize_complete;
};
INTERNAL {