@@ -10,6 +10,7 @@
#include "ethdev_driver.h"
#include "ethdev_private.h"
+#include "rte_flow_driver.h"
/**
* A set of values to describe the possible states of a switch domain.
@@ -245,6 +246,8 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id);
+ rte_flow_fp_ops_reset(eth_dev->data->port_id);
+
rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
eth_dev->state = RTE_ETH_DEV_UNUSED;
@@ -26,6 +26,8 @@ int32_t rte_flow_dynf_metadata_offs = -1;
/* Mbuf dynamic field flag bit number for metadata. */
uint64_t rte_flow_dynf_metadata_mask;
+struct rte_flow_fp_ops rte_flow_fp_ops[RTE_MAX_ETHPORTS];
+
/**
* Flow elements description tables.
*/
@@ -2000,248 +2002,6 @@ rte_flow_group_set_miss_actions(uint16_t port_id,
NULL, rte_strerror(ENOTSUP));
}
-struct rte_flow *
-rte_flow_async_create(uint16_t port_id,
- uint32_t queue_id,
- const struct rte_flow_op_attr *op_attr,
- struct rte_flow_template_table *template_table,
- const struct rte_flow_item pattern[],
- uint8_t pattern_template_index,
- const struct rte_flow_action actions[],
- uint8_t actions_template_index,
- void *user_data,
- struct rte_flow_error *error)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
- struct rte_flow *flow;
-
- flow = ops->async_create(dev, queue_id,
- op_attr, template_table,
- pattern, pattern_template_index,
- actions, actions_template_index,
- user_data, error);
- if (flow == NULL)
- flow_err(port_id, -rte_errno, error);
-
- rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
- pattern, pattern_template_index, actions,
- actions_template_index, user_data, flow);
-
- return flow;
-}
-
-struct rte_flow *
-rte_flow_async_create_by_index(uint16_t port_id,
- uint32_t queue_id,
- const struct rte_flow_op_attr *op_attr,
- struct rte_flow_template_table *template_table,
- uint32_t rule_index,
- const struct rte_flow_action actions[],
- uint8_t actions_template_index,
- void *user_data,
- struct rte_flow_error *error)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
- struct rte_flow *flow;
-
- flow = ops->async_create_by_index(dev, queue_id,
- op_attr, template_table, rule_index,
- actions, actions_template_index,
- user_data, error);
- if (flow == NULL)
- flow_err(port_id, -rte_errno, error);
- return flow;
-}
-
-int
-rte_flow_async_destroy(uint16_t port_id,
- uint32_t queue_id,
- const struct rte_flow_op_attr *op_attr,
- struct rte_flow *flow,
- void *user_data,
- struct rte_flow_error *error)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
- int ret;
-
- ret = flow_err(port_id,
- ops->async_destroy(dev, queue_id,
- op_attr, flow,
- user_data, error),
- error);
-
- rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
- user_data, ret);
-
- return ret;
-}
-
-int
-rte_flow_async_actions_update(uint16_t port_id,
- uint32_t queue_id,
- const struct rte_flow_op_attr *op_attr,
- struct rte_flow *flow,
- const struct rte_flow_action actions[],
- uint8_t actions_template_index,
- void *user_data,
- struct rte_flow_error *error)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
- int ret;
-
- ret = flow_err(port_id,
- ops->async_actions_update(dev, queue_id, op_attr,
- flow, actions,
- actions_template_index,
- user_data, error),
- error);
-
- rte_flow_trace_async_actions_update(port_id, queue_id, op_attr, flow,
- actions, actions_template_index,
- user_data, ret);
-
- return ret;
-}
-
-int
-rte_flow_push(uint16_t port_id,
- uint32_t queue_id,
- struct rte_flow_error *error)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
- int ret;
-
- ret = flow_err(port_id,
- ops->push(dev, queue_id, error),
- error);
-
- rte_flow_trace_push(port_id, queue_id, ret);
-
- return ret;
-}
-
-int
-rte_flow_pull(uint16_t port_id,
- uint32_t queue_id,
- struct rte_flow_op_result res[],
- uint16_t n_res,
- struct rte_flow_error *error)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
- int ret;
- int rc;
-
- ret = ops->pull(dev, queue_id, res, n_res, error);
- rc = ret ? ret : flow_err(port_id, ret, error);
-
- rte_flow_trace_pull(port_id, queue_id, res, n_res, rc);
-
- return rc;
-}
-
-struct rte_flow_action_handle *
-rte_flow_async_action_handle_create(uint16_t port_id,
- uint32_t queue_id,
- const struct rte_flow_op_attr *op_attr,
- const struct rte_flow_indir_action_conf *indir_action_conf,
- const struct rte_flow_action *action,
- void *user_data,
- struct rte_flow_error *error)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
- struct rte_flow_action_handle *handle;
-
- handle = ops->async_action_handle_create(dev, queue_id, op_attr,
- indir_action_conf, action, user_data, error);
- if (handle == NULL)
- flow_err(port_id, -rte_errno, error);
-
- rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
- indir_action_conf, action,
- user_data, handle);
-
- return handle;
-}
-
-int
-rte_flow_async_action_handle_destroy(uint16_t port_id,
- uint32_t queue_id,
- const struct rte_flow_op_attr *op_attr,
- struct rte_flow_action_handle *action_handle,
- void *user_data,
- struct rte_flow_error *error)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
- int ret;
-
- ret = ops->async_action_handle_destroy(dev, queue_id, op_attr,
- action_handle, user_data, error);
- ret = flow_err(port_id, ret, error);
-
- rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
- action_handle, user_data, ret);
-
- return ret;
-}
-
-int
-rte_flow_async_action_handle_update(uint16_t port_id,
- uint32_t queue_id,
- const struct rte_flow_op_attr *op_attr,
- struct rte_flow_action_handle *action_handle,
- const void *update,
- void *user_data,
- struct rte_flow_error *error)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
- int ret;
-
- ret = ops->async_action_handle_update(dev, queue_id, op_attr,
- action_handle, update, user_data, error);
- ret = flow_err(port_id, ret, error);
-
- rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
- action_handle, update,
- user_data, ret);
-
- return ret;
-}
-
-int
-rte_flow_async_action_handle_query(uint16_t port_id,
- uint32_t queue_id,
- const struct rte_flow_op_attr *op_attr,
- const struct rte_flow_action_handle *action_handle,
- void *data,
- void *user_data,
- struct rte_flow_error *error)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
- int ret;
-
- if (unlikely(!ops))
- return -rte_errno;
- ret = ops->async_action_handle_query(dev, queue_id, op_attr,
- action_handle, data, user_data, error);
- ret = flow_err(port_id, ret, error);
-
- rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
- action_handle, data, user_data,
- ret);
-
- return ret;
-}
-
int
rte_flow_action_handle_query_update(uint16_t port_id,
struct rte_flow_action_handle *handle,
@@ -2267,35 +2027,6 @@ rte_flow_action_handle_query_update(uint16_t port_id,
return flow_err(port_id, ret, error);
}
-int
-rte_flow_async_action_handle_query_update(uint16_t port_id, uint32_t queue_id,
- const struct rte_flow_op_attr *attr,
- struct rte_flow_action_handle *handle,
- const void *update, void *query,
- enum rte_flow_query_update_mode mode,
- void *user_data,
- struct rte_flow_error *error)
-{
- int ret;
- struct rte_eth_dev *dev;
- const struct rte_flow_ops *ops;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- if (!handle)
- return -EINVAL;
- if (!update && !query)
- return -EINVAL;
- dev = &rte_eth_devices[port_id];
- ops = rte_flow_ops_get(port_id, error);
- if (!ops || !ops->async_action_handle_query_update)
- return -ENOTSUP;
- ret = ops->async_action_handle_query_update(dev, queue_id, attr,
- handle, update,
- query, mode,
- user_data, error);
- return flow_err(port_id, ret, error);
-}
-
struct rte_flow_action_list_handle *
rte_flow_action_list_handle_create(uint16_t port_id,
const
@@ -2345,64 +2076,6 @@ rte_flow_action_list_handle_destroy(uint16_t port_id,
return ret;
}
-struct rte_flow_action_list_handle *
-rte_flow_async_action_list_handle_create(uint16_t port_id, uint32_t queue_id,
- const struct rte_flow_op_attr *attr,
- const struct rte_flow_indir_action_conf *conf,
- const struct rte_flow_action *actions,
- void *user_data,
- struct rte_flow_error *error)
-{
- int ret;
- struct rte_eth_dev *dev;
- const struct rte_flow_ops *ops;
- struct rte_flow_action_list_handle *handle;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
- ops = rte_flow_ops_get(port_id, error);
- if (!ops || !ops->async_action_list_handle_create) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "action_list handle not supported");
- return NULL;
- }
- dev = &rte_eth_devices[port_id];
- handle = ops->async_action_list_handle_create(dev, queue_id, attr, conf,
- actions, user_data,
- error);
- ret = flow_err(port_id, -rte_errno, error);
- rte_flow_trace_async_action_list_handle_create(port_id, queue_id, attr,
- conf, actions, user_data,
- ret);
- return handle;
-}
-
-int
-rte_flow_async_action_list_handle_destroy(uint16_t port_id, uint32_t queue_id,
- const struct rte_flow_op_attr *op_attr,
- struct rte_flow_action_list_handle *handle,
- void *user_data, struct rte_flow_error *error)
-{
- int ret;
- struct rte_eth_dev *dev;
- const struct rte_flow_ops *ops;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- ops = rte_flow_ops_get(port_id, error);
- if (!ops || !ops->async_action_list_handle_destroy)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "async action_list handle not supported");
- dev = &rte_eth_devices[port_id];
- ret = ops->async_action_list_handle_destroy(dev, queue_id, op_attr,
- handle, user_data, error);
- ret = flow_err(port_id, ret, error);
- rte_flow_trace_async_action_list_handle_destroy(port_id, queue_id,
- op_attr, handle,
- user_data, ret);
- return ret;
-}
-
int
rte_flow_action_list_handle_query_update(uint16_t port_id,
const struct rte_flow_action_list_handle *handle,
@@ -2429,38 +2102,6 @@ rte_flow_action_list_handle_query_update(uint16_t port_id,
return ret;
}
-int
-rte_flow_async_action_list_handle_query_update(uint16_t port_id, uint32_t queue_id,
- const struct rte_flow_op_attr *attr,
- const struct rte_flow_action_list_handle *handle,
- const void **update, void **query,
- enum rte_flow_query_update_mode mode,
- void *user_data, struct rte_flow_error *error)
-{
- int ret;
- struct rte_eth_dev *dev;
- const struct rte_flow_ops *ops;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- ops = rte_flow_ops_get(port_id, error);
- if (!ops || !ops->async_action_list_handle_query_update)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "action_list async query_update not supported");
- dev = &rte_eth_devices[port_id];
- ret = ops->async_action_list_handle_query_update(dev, queue_id, attr,
- handle, update, query,
- mode, user_data,
- error);
- ret = flow_err(port_id, ret, error);
- rte_flow_trace_async_action_list_handle_query_update(port_id, queue_id,
- attr, handle,
- update, query,
- mode, user_data,
- ret);
- return ret;
-}
-
int
rte_flow_calc_table_hash(uint16_t port_id, const struct rte_flow_template_table *table,
const struct rte_flow_item pattern[], uint8_t pattern_template_index,
@@ -2481,3 +2122,277 @@ rte_flow_calc_table_hash(uint16_t port_id, const struct rte_flow_template_table
hash, error);
return flow_err(port_id, ret, error);
}
+
+static struct rte_flow *
+dummy_async_create(struct rte_eth_dev *dev __rte_unused,
+ uint32_t queue __rte_unused,
+ const struct rte_flow_op_attr *attr __rte_unused,
+ struct rte_flow_template_table *table __rte_unused,
+ const struct rte_flow_item items[] __rte_unused,
+ uint8_t pattern_template_index __rte_unused,
+ const struct rte_flow_action actions[] __rte_unused,
+ uint8_t action_template_index __rte_unused,
+ void *user_data __rte_unused,
+ struct rte_flow_error *error)
+{
+ rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENOSYS));
+ return NULL;
+}
+
+static struct rte_flow *
+dummy_async_create_by_index(struct rte_eth_dev *dev __rte_unused,
+ uint32_t queue __rte_unused,
+ const struct rte_flow_op_attr *attr __rte_unused,
+ struct rte_flow_template_table *table __rte_unused,
+ uint32_t rule_index __rte_unused,
+ const struct rte_flow_action actions[] __rte_unused,
+ uint8_t action_template_index __rte_unused,
+ void *user_data __rte_unused,
+ struct rte_flow_error *error)
+{
+ rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENOSYS));
+ return NULL;
+}
+
+static int
+dummy_async_actions_update(struct rte_eth_dev *dev __rte_unused,
+ uint32_t queue_id __rte_unused,
+ const struct rte_flow_op_attr *op_attr __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ const struct rte_flow_action actions[] __rte_unused,
+ uint8_t actions_template_index __rte_unused,
+ void *user_data __rte_unused,
+ struct rte_flow_error *error)
+{
+ return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENOSYS));
+}
+
+static int
+dummy_async_destroy(struct rte_eth_dev *dev __rte_unused,
+ uint32_t queue_id __rte_unused,
+ const struct rte_flow_op_attr *op_attr __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ void *user_data __rte_unused,
+ struct rte_flow_error *error)
+{
+ return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENOSYS));
+}
+
+static int
+dummy_push(struct rte_eth_dev *dev __rte_unused,
+ uint32_t queue_id __rte_unused,
+ struct rte_flow_error *error)
+{
+ return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENOSYS));
+}
+
+static int
+dummy_pull(struct rte_eth_dev *dev __rte_unused,
+ uint32_t queue_id __rte_unused,
+ struct rte_flow_op_result res[] __rte_unused,
+ uint16_t n_res __rte_unused,
+ struct rte_flow_error *error)
+{
+ return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENOSYS));
+}
+
+static struct rte_flow_action_handle *
+dummy_async_action_handle_create(
+ struct rte_eth_dev *dev __rte_unused,
+ uint32_t queue_id __rte_unused,
+ const struct rte_flow_op_attr *op_attr __rte_unused,
+ const struct rte_flow_indir_action_conf *indir_action_conf __rte_unused,
+ const struct rte_flow_action *action __rte_unused,
+ void *user_data __rte_unused,
+ struct rte_flow_error *error)
+{
+ rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENOSYS));
+ return NULL;
+}
+
+static int
+dummy_async_action_handle_destroy(
+ struct rte_eth_dev *dev __rte_unused,
+ uint32_t queue_id __rte_unused,
+ const struct rte_flow_op_attr *op_attr __rte_unused,
+ struct rte_flow_action_handle *action_handle __rte_unused,
+ void *user_data __rte_unused,
+ struct rte_flow_error *error)
+{
+ return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENOSYS));
+}
+
+static int
+dummy_async_action_handle_update(
+ struct rte_eth_dev *dev __rte_unused,
+ uint32_t queue_id __rte_unused,
+ const struct rte_flow_op_attr *op_attr __rte_unused,
+ struct rte_flow_action_handle *action_handle __rte_unused,
+ const void *update __rte_unused,
+ void *user_data __rte_unused,
+ struct rte_flow_error *error)
+{
+ return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENOSYS));
+}
+
+static int
+dummy_async_action_handle_query(
+ struct rte_eth_dev *dev __rte_unused,
+ uint32_t queue_id __rte_unused,
+ const struct rte_flow_op_attr *op_attr __rte_unused,
+ const struct rte_flow_action_handle *action_handle __rte_unused,
+ void *data __rte_unused,
+ void *user_data __rte_unused,
+ struct rte_flow_error *error)
+{
+ return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENOSYS));
+}
+
+static int
+dummy_async_action_handle_query_update(
+ struct rte_eth_dev *dev __rte_unused,
+ uint32_t queue_id __rte_unused,
+ const struct rte_flow_op_attr *attr __rte_unused,
+ struct rte_flow_action_handle *handle __rte_unused,
+ const void *update __rte_unused,
+ void *query __rte_unused,
+ enum rte_flow_query_update_mode mode __rte_unused,
+ void *user_data __rte_unused,
+ struct rte_flow_error *error)
+{
+ return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENOSYS));
+}
+
+static struct rte_flow_action_list_handle *
+dummy_async_action_list_handle_create(
+ struct rte_eth_dev *dev __rte_unused,
+ uint32_t queue_id __rte_unused,
+ const struct rte_flow_op_attr *attr __rte_unused,
+ const struct rte_flow_indir_action_conf *conf __rte_unused,
+ const struct rte_flow_action *actions __rte_unused,
+ void *user_data __rte_unused,
+ struct rte_flow_error *error)
+{
+ rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENOSYS));
+ return NULL;
+}
+
+static int
+dummy_async_action_list_handle_destroy(
+ struct rte_eth_dev *dev __rte_unused,
+ uint32_t queue_id __rte_unused,
+ const struct rte_flow_op_attr *op_attr __rte_unused,
+ struct rte_flow_action_list_handle *handle __rte_unused,
+ void *user_data __rte_unused,
+ struct rte_flow_error *error)
+{
+ return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENOSYS));
+}
+
+static int
+dummy_async_action_list_handle_query_update(
+ struct rte_eth_dev *dev __rte_unused,
+ uint32_t queue_id __rte_unused,
+ const struct rte_flow_op_attr *attr __rte_unused,
+ const struct rte_flow_action_list_handle *handle __rte_unused,
+ const void **update __rte_unused,
+ void **query __rte_unused,
+ enum rte_flow_query_update_mode mode __rte_unused,
+ void *user_data __rte_unused,
+ struct rte_flow_error *error)
+{
+ return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENOSYS));
+}
+
+int
+rte_flow_fp_ops_register(uint16_t port_id, const struct rte_flow_fp_ops *ops)
+{
+ struct rte_flow_fp_ops *port_ops;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ port_ops = &rte_flow_fp_ops[port_id];
+
+ if (ops->ctx) {
+ port_ops->ctx = ops->ctx;
+ } else {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+
+ if (ops->async_create_by_index)
+ port_ops->async_create_by_index = ops->async_create_by_index;
+ if (ops->async_create_by_index)
+ port_ops->async_create_by_index = ops->async_create_by_index;
+ if (ops->async_actions_update)
+ port_ops->async_actions_update = ops->async_actions_update;
+ if (ops->async_destroy)
+ port_ops->async_destroy = ops->async_destroy;
+ if (ops->push)
+ port_ops->push = ops->push;
+ if (ops->pull)
+ port_ops->pull = ops->pull;
+ if (ops->async_action_handle_create)
+ port_ops->async_action_handle_create = ops->async_action_handle_create;
+ if (ops->async_action_handle_destroy)
+ port_ops->async_action_handle_destroy = ops->async_action_handle_destroy;
+ if (ops->async_action_handle_update)
+ port_ops->async_action_handle_update = ops->async_action_handle_update;
+ if (ops->async_action_handle_query)
+ port_ops->async_action_handle_query = ops->async_action_handle_query;
+ if (ops->async_action_handle_query_update)
+ port_ops->async_action_handle_query_update = ops->async_action_handle_query_update;
+ if (ops->async_action_list_handle_create)
+ port_ops->async_action_list_handle_create = ops->async_action_list_handle_create;
+ if (ops->async_action_list_handle_destroy)
+ port_ops->async_action_list_handle_destroy = ops->async_action_list_handle_destroy;
+ if (ops->async_action_list_handle_query_update)
+ port_ops->async_action_list_handle_query_update =
+ ops->async_action_list_handle_query_update;
+
+ return 0;
+}
+
+void
+rte_flow_fp_ops_reset(uint16_t port_id)
+{
+ struct rte_flow_fp_ops *ops = &rte_flow_fp_ops[port_id];
+
+ ops->ctx = NULL;
+ ops->async_create = dummy_async_create;
+ ops->async_create_by_index = dummy_async_create_by_index;
+ ops->async_actions_update = dummy_async_actions_update;
+ ops->async_destroy = dummy_async_destroy;
+ ops->push = dummy_push;
+ ops->pull = dummy_pull;
+ ops->async_action_handle_create = dummy_async_action_handle_create;
+ ops->async_action_handle_destroy = dummy_async_action_handle_destroy;
+ ops->async_action_handle_update = dummy_async_action_handle_update;
+ ops->async_action_handle_query = dummy_async_action_handle_query;
+ ops->async_action_handle_query_update = dummy_async_action_handle_query_update;
+ ops->async_action_list_handle_create = dummy_async_action_list_handle_create;
+ ops->async_action_list_handle_destroy = dummy_async_action_list_handle_destroy;
+ ops->async_action_list_handle_query_update =
+ dummy_async_action_list_handle_query_update;
+}
+
+RTE_INIT(rte_flow_init_dummy_fp_ops) {
+ unsigned int port_id;
+
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; ++port_id)
+ rte_flow_fp_ops_reset(port_id);
+}
@@ -5921,6 +5921,195 @@ rte_flow_group_set_miss_actions(uint16_t port_id,
const struct rte_flow_action actions[],
struct rte_flow_error *error);
+/* Forward declaration for callback typedef definition. */
+struct rte_flow_op_attr;
+
+/** @internal Enqueue rule creation operation. */
+typedef struct rte_flow *(*rte_flow_async_create_t)(void *ctx,
+ uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow_template_table *table,
+ const struct rte_flow_item *items,
+ uint8_t pattern_template_index,
+ const struct rte_flow_action *actions,
+ uint8_t action_template_index,
+ void *user_data,
+ struct rte_flow_error *error);
+
+/** @internal Enqueue rule creation by index operation. */
+typedef struct rte_flow *(*rte_flow_async_create_by_index_t)(void *ctx,
+ uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow_template_table *table,
+ uint32_t rule_index,
+ const struct rte_flow_action *actions,
+ uint8_t action_template_index,
+ void *user_data,
+ struct rte_flow_error *error);
+
+/** @internal Enqueue rule update operation. */
+typedef int (*rte_flow_async_actions_update_t)(void *ctx,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ uint8_t actions_template_index,
+ void *user_data,
+ struct rte_flow_error *error);
+
+/** @internal Enqueue rule destruction operation. */
+typedef int (*rte_flow_async_destroy_t)(void *ctx,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow *flow,
+ void *user_data,
+ struct rte_flow_error *error);
+
+/** @internal Push all internally stored rules to the HW. */
+typedef int (*rte_flow_push_t)(void *ctx,
+ uint32_t queue_id,
+ struct rte_flow_error *error);
+
+/* Forward declaration for callback typedef definition. */
+struct rte_flow_op_result;
+
+/** @internal Pull the flow rule operations results from the HW. */
+typedef int (*rte_flow_pull_t)(void *ctx,
+ uint32_t queue_id,
+ struct rte_flow_op_result *res,
+ uint16_t n_res,
+ struct rte_flow_error *error);
+
+/** @internal Enqueue indirect action creation operation. */
+typedef struct rte_flow_action_handle *(*rte_flow_async_action_handle_create_t)(
+ void *ctx,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ const struct rte_flow_indir_action_conf *indir_action_conf,
+ const struct rte_flow_action *action,
+ void *user_data,
+ struct rte_flow_error *error);
+
+/** @internal Enqueue indirect action destruction operation. */
+typedef int (*rte_flow_async_action_handle_destroy_t)(void *ctx,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_handle *action_handle,
+ void *user_data,
+ struct rte_flow_error *error);
+
+/** @internal Enqueue indirect action update operation. */
+typedef int (*rte_flow_async_action_handle_update_t)(void *ctx,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_handle *action_handle,
+ const void *update,
+ void *user_data,
+ struct rte_flow_error *error);
+
+/** @internal Enqueue indirect action query operation. */
+typedef int (*rte_flow_async_action_handle_query_t)
+ (void *ctx,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ const struct rte_flow_action_handle *action_handle,
+ void *data,
+ void *user_data,
+ struct rte_flow_error *error);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Query and update operational mode.
+ *
+ * @see rte_flow_action_handle_query_update()
+ * @see rte_flow_async_action_handle_query_update()
+ */
+enum rte_flow_query_update_mode {
+ /* Default query_update operational mode.
+ * If both `update` and `query` parameters are not NULL
+ * the call updates and queries action in default port order.
+ * If `update` parameter is NULL the call queries action.
+ * If `query` parameter is NULL the call updates action.
+ */
+ RTE_FLOW_QU_DEFAULT,
+ /* Force port to query action before update. */
+ RTE_FLOW_QU_QUERY_FIRST,
+ /* Force port to update action before update. */
+ RTE_FLOW_QU_UPDATE_FIRST,
+};
+
+/** @internal Enqueue indirect action query and/or update operation. */
+typedef int (*rte_flow_async_action_handle_query_update_t)(struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow_action_handle *handle,
+ const void *update, void *query,
+ enum rte_flow_query_update_mode mode,
+ void *user_data,
+ struct rte_flow_error *error);
+
+/* Forward declaration. */
+struct rte_flow_action_list_handle;
+
+/** @internal Enqueue indirect action list creation operation. */
+typedef struct rte_flow_action_list_handle *(*rte_flow_async_action_list_handle_create_t)(
+ struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data,
+ struct rte_flow_error *error);
+
+/** @internal Enqueue indirect action list destruction operation. */
+typedef int (*rte_flow_async_action_list_handle_destroy_t)(
+ struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_list_handle *handle,
+ void *user_data,
+ struct rte_flow_error *error);
+
+/** @internal Enqueue indirect action list query and/or update operation. */
+typedef int (*rte_flow_async_action_list_handle_query_update_t)(
+ struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update,
+ void **query,
+ enum rte_flow_query_update_mode mode,
+ void *user_data,
+ struct rte_flow_error *error);
+
+/**
+ * @internal
+ *
+ * Fast path async flow functions and related data are held in a flat array, one entry per ethdev.
+ * It is assumed that each entry is read-only and cache aligned.
+ */
+struct rte_flow_fp_ops {
+ void *ctx;
+ rte_flow_async_create_t async_create;
+ rte_flow_async_create_by_index_t async_create_by_index;
+ rte_flow_async_actions_update_t async_actions_update;
+ rte_flow_async_destroy_t async_destroy;
+ rte_flow_push_t push;
+ rte_flow_pull_t pull;
+ rte_flow_async_action_handle_create_t async_action_handle_create;
+ rte_flow_async_action_handle_destroy_t async_action_handle_destroy;
+ rte_flow_async_action_handle_update_t async_action_handle_update;
+ rte_flow_async_action_handle_query_t async_action_handle_query;
+ rte_flow_async_action_handle_query_update_t async_action_handle_query_update;
+ rte_flow_async_action_list_handle_create_t async_action_list_handle_create;
+ rte_flow_async_action_list_handle_destroy_t async_action_list_handle_destroy;
+ rte_flow_async_action_list_handle_query_update_t async_action_list_handle_query_update;
+} __rte_cache_aligned;
+
+extern struct rte_flow_fp_ops rte_flow_fp_ops[RTE_MAX_ETHPORTS];
+
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
@@ -5973,7 +6162,7 @@ struct rte_flow_op_attr {
* Only completion result indicates that if there was success or failure.
*/
__rte_experimental
-struct rte_flow *
+static inline struct rte_flow *
rte_flow_async_create(uint16_t port_id,
uint32_t queue_id,
const struct rte_flow_op_attr *op_attr,
@@ -5983,7 +6172,31 @@ rte_flow_async_create(uint16_t port_id,
const struct rte_flow_action actions[],
uint8_t actions_template_index,
void *user_data,
- struct rte_flow_error *error);
+ struct rte_flow_error *error)
+{
+ struct rte_flow_fp_ops *ops;
+ struct rte_flow *flow;
+
+#ifdef RTE_FLOW_DEBUG
+ if (port_id >= RTE_MAX_ETHPORTS) {
+ rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENODEV));
+ return NULL;
+ }
+#endif
+
+ ops = &rte_flow_fp_ops[port_id];
+ flow = ops->async_create(ops->ctx, queue_id,
+ op_attr, template_table,
+ pattern, pattern_template_index,
+ actions, actions_template_index,
+ user_data, error);
+
+ rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
+ pattern, pattern_template_index, actions,
+ actions_template_index, user_data, flow);
+ return flow;
+}
/**
* @warning
@@ -6018,7 +6231,7 @@ rte_flow_async_create(uint16_t port_id,
* Only completion result indicates that if there was success or failure.
*/
__rte_experimental
-struct rte_flow *
+static inline struct rte_flow *
rte_flow_async_create_by_index(uint16_t port_id,
uint32_t queue_id,
const struct rte_flow_op_attr *op_attr,
@@ -6027,7 +6240,31 @@ rte_flow_async_create_by_index(uint16_t port_id,
const struct rte_flow_action actions[],
uint8_t actions_template_index,
void *user_data,
- struct rte_flow_error *error);
+ struct rte_flow_error *error)
+{
+ struct rte_flow_fp_ops *ops;
+ struct rte_flow *flow;
+
+#ifdef RTE_FLOW_DEBUG
+ if (port_id >= RTE_MAX_ETHPORTS) {
+ rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENODEV));
+ return NULL;
+ }
+#endif
+
+ ops = &rte_flow_fp_ops[port_id];
+ flow = ops->async_create_by_index(ops->ctx, queue_id,
+ op_attr, template_table,
+ rule_index,
+ actions, actions_template_index,
+ user_data, error);
+
+ rte_flow_trace_async_create_by_index(port_id, queue_id, op_attr, template_table, rule_index,
+ actions, actions_template_index, user_data, flow);
+
+ return flow;
+}
/**
* @warning
@@ -6059,13 +6296,33 @@ rte_flow_async_create_by_index(uint16_t port_id,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
__rte_experimental
-int
+static inline int
rte_flow_async_destroy(uint16_t port_id,
uint32_t queue_id,
const struct rte_flow_op_attr *op_attr,
struct rte_flow *flow,
void *user_data,
- struct rte_flow_error *error);
+ struct rte_flow_error *error)
+{
+ struct rte_flow_fp_ops *ops;
+ int ret;
+
+#ifdef RTE_FLOW_DEBUG
+ if (port_id >= RTE_MAX_ETHPORTS)
+ return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENODEV));
+#endif
+
+ ops = &rte_flow_fp_ops[port_id];
+ ret = ops->async_destroy(ops->ctx, queue_id,
+ op_attr, flow,
+ user_data, error);
+
+ rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
+ user_data, ret);
+
+ return ret;
+}
/**
* @warning
@@ -6096,7 +6353,7 @@ rte_flow_async_destroy(uint16_t port_id,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
__rte_experimental
-int
+static inline int
rte_flow_async_actions_update(uint16_t port_id,
uint32_t queue_id,
const struct rte_flow_op_attr *op_attr,
@@ -6104,7 +6361,29 @@ rte_flow_async_actions_update(uint16_t port_id,
const struct rte_flow_action actions[],
uint8_t actions_template_index,
void *user_data,
- struct rte_flow_error *error);
+ struct rte_flow_error *error)
+{
+ struct rte_flow_fp_ops *ops;
+ int ret;
+
+#ifdef RTE_FLOW_DEBUG
+ if (port_id >= RTE_MAX_ETHPORTS)
+ return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENODEV));
+#endif
+
+ ops = &rte_flow_fp_ops[port_id];
+ ret = ops->async_actions_update(ops->ctx, queue_id,
+ op_attr, flow,
+ actions, actions_template_index,
+ user_data, error);
+
+ rte_flow_trace_async_actions_update(port_id, queue_id, op_attr, flow,
+ actions, actions_template_index,
+ user_data, ret);
+
+ return ret;
+}
/**
* @warning
@@ -6127,10 +6406,27 @@ rte_flow_async_actions_update(uint16_t port_id,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
__rte_experimental
-int
+static inline int
rte_flow_push(uint16_t port_id,
uint32_t queue_id,
- struct rte_flow_error *error);
+ struct rte_flow_error *error)
+{
+ struct rte_flow_fp_ops *ops;
+ int ret;
+
+#ifdef RTE_FLOW_DEBUG
+ if (port_id >= RTE_MAX_ETHPORTS)
+ return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENODEV));
+#endif
+
+ ops = &rte_flow_fp_ops[port_id];
+ ret = ops->push(ops->ctx, queue_id, error);
+
+ rte_flow_trace_push(port_id, queue_id, ret);
+
+ return ret;
+}
/**
* @warning
@@ -6193,12 +6489,29 @@ struct rte_flow_op_result {
* a negative errno value otherwise and rte_errno is set.
*/
__rte_experimental
-int
+static inline int
rte_flow_pull(uint16_t port_id,
uint32_t queue_id,
struct rte_flow_op_result res[],
uint16_t n_res,
- struct rte_flow_error *error);
+ struct rte_flow_error *error)
+{
+ struct rte_flow_fp_ops *ops;
+ int ret;
+
+#ifdef RTE_FLOW_DEBUG
+ if (port_id >= RTE_MAX_ETHPORTS)
+ return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENODEV));
+#endif
+
+ ops = &rte_flow_fp_ops[port_id];
+ ret = ops->pull(ops->ctx, queue_id, res, n_res, error);
+
+ rte_flow_trace_pull(port_id, queue_id, res, n_res, ret);
+
+ return ret;
+}
/**
* @warning
@@ -6227,14 +6540,37 @@ rte_flow_pull(uint16_t port_id,
* A valid handle in case of success, NULL otherwise and rte_errno is set.
*/
__rte_experimental
-struct rte_flow_action_handle *
+static inline struct rte_flow_action_handle *
rte_flow_async_action_handle_create(uint16_t port_id,
uint32_t queue_id,
const struct rte_flow_op_attr *op_attr,
const struct rte_flow_indir_action_conf *indir_action_conf,
const struct rte_flow_action *action,
void *user_data,
- struct rte_flow_error *error);
+ struct rte_flow_error *error)
+{
+ struct rte_flow_fp_ops *ops;
+ struct rte_flow_action_handle *handle;
+
+#ifdef RTE_FLOW_DEBUG
+ if (port_id >= RTE_MAX_ETHPORTS)
+ rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENODEV));
+ return NULL;
+ }
+#endif
+
+ ops = &rte_flow_fp_ops[port_id];
+ handle = ops->async_action_handle_create(ops->ctx, queue_id,
+ op_attr, indir_action_conf, action,
+ user_data, error);
+
+ rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
+ indir_action_conf, action,
+ user_data, handle);
+
+ return handle;
+}
/**
* @warning
@@ -6262,13 +6598,32 @@ rte_flow_async_action_handle_create(uint16_t port_id,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
__rte_experimental
-int
+static inline int
rte_flow_async_action_handle_destroy(uint16_t port_id,
uint32_t queue_id,
const struct rte_flow_op_attr *op_attr,
struct rte_flow_action_handle *action_handle,
void *user_data,
- struct rte_flow_error *error);
+ struct rte_flow_error *error)
+{
+ struct rte_flow_fp_ops *ops;
+ int ret;
+
+#ifdef RTE_FLOW_DEBUG
+ if (port_id >= RTE_MAX_ETHPORTS)
+ return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENODEV));
+#endif
+
+ ops = &rte_flow_fp_ops[port_id];
+ ret = ops->async_action_handle_destroy(ops->ctx, queue_id, op_attr, action_handle,
+ user_data, error);
+
+ rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
+ action_handle, user_data, ret);
+
+ return ret;
+}
/**
* @warning
@@ -6301,14 +6656,34 @@ rte_flow_async_action_handle_destroy(uint16_t port_id,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
__rte_experimental
-int
+static inline int
rte_flow_async_action_handle_update(uint16_t port_id,
uint32_t queue_id,
const struct rte_flow_op_attr *op_attr,
struct rte_flow_action_handle *action_handle,
const void *update,
void *user_data,
- struct rte_flow_error *error);
+ struct rte_flow_error *error)
+{
+ struct rte_flow_fp_ops *ops;
+ int ret;
+
+#ifdef RTE_FLOW_DEBUG
+ if (port_id >= RTE_MAX_ETHPORTS)
+ return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENODEV));
+#endif
+
+ ops = &rte_flow_fp_ops[port_id];
+ ret = ops->async_action_handle_update(ops->ctx, queue_id, op_attr, action_handle, update,
+ user_data, error);
+
+ rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
+ action_handle, update,
+ user_data, ret);
+
+ return ret;
+}
/**
* @warning
@@ -6345,28 +6720,34 @@ rte_flow_async_action_handle_update(uint16_t port_id,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
__rte_experimental
-int
+static inline int
rte_flow_async_action_handle_query(uint16_t port_id,
uint32_t queue_id,
const struct rte_flow_op_attr *op_attr,
const struct rte_flow_action_handle *action_handle,
void *data,
void *user_data,
- struct rte_flow_error *error);
+ struct rte_flow_error *error)
+{
+ struct rte_flow_fp_ops *ops;
+ int ret;
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Query and update operational mode.
- *
- * @see rte_flow_action_handle_query_update()
- * @see rte_flow_async_action_handle_query_update()
- */
-enum rte_flow_query_update_mode {
- RTE_FLOW_QU_QUERY_FIRST = 1, /**< Query before update. */
- RTE_FLOW_QU_UPDATE_FIRST, /**< Query after update. */
-};
+#ifdef RTE_FLOW_DEBUG
+ if (port_id >= RTE_MAX_ETHPORTS)
+ return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENODEV));
+#endif
+
+ ops = &rte_flow_fp_ops[port_id];
+ ret = ops->async_action_handle_query(ops->ctx, queue_id, op_attr, action_handle, data,
+ user_data, error);
+
+ rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
+ action_handle, data, user_data,
+ ret);
+
+ return ret;
+}
/**
* @warning
@@ -6445,16 +6826,35 @@ rte_flow_action_handle_query_update(uint16_t port_id,
* both *update* and *query* are NULL.
*/
__rte_experimental
-int
+static inline int
rte_flow_async_action_handle_query_update(uint16_t port_id, uint32_t queue_id,
const struct rte_flow_op_attr *attr,
struct rte_flow_action_handle *handle,
const void *update, void *query,
enum rte_flow_query_update_mode mode,
void *user_data,
- struct rte_flow_error *error);
+ struct rte_flow_error *error)
+{
+ struct rte_flow_fp_ops *ops;
+ int ret;
-struct rte_flow_action_list_handle;
+#ifdef RTE_FLOW_DEBUG
+ if (port_id >= RTE_MAX_ETHPORTS)
+ return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENODEV));
+#endif
+
+ ops = &rte_flow_fp_ops[port_id];
+ ret = ops->async_action_handle_query_update(ops->ctx, queue_id, attr, handle,
+ update, query, mode,
+ user_data, error);
+
+ rte_flow_trace_async_action_handle_query_update(port_id, queue_id, attr,
+ handle, update, query, mode,
+ user_data, ret);
+
+ return ret;
+}
/**
* @warning
@@ -6548,13 +6948,36 @@ rte_flow_action_list_handle_create(uint16_t port_id,
* - (-ENOTSUP) if *action* list element valid but unsupported.
*/
__rte_experimental
-struct rte_flow_action_list_handle *
+static inline struct rte_flow_action_list_handle *
rte_flow_async_action_list_handle_create(uint16_t port_id, uint32_t queue_id,
const struct rte_flow_op_attr *attr,
const struct rte_flow_indir_action_conf *conf,
const struct rte_flow_action *actions,
void *user_data,
- struct rte_flow_error *error);
+ struct rte_flow_error *error)
+{
+ struct rte_flow_fp_ops *ops;
+ struct rte_flow_action_list_handle *handle;
+
+#ifdef RTE_FLOW_DEBUG
+ if (port_id >= RTE_MAX_ETHPORTS) {
+ rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENODEV));
+ return NULL;
+ }
+#endif
+
+ ops = &rte_flow_fp_ops[port_id];
+ handle = ops->async_action_list_handle_create(ops->ctx, queue_id,
+ attr, conf, actions,
+ user_data, error);
+
+ rte_flow_trace_async_action_list_handle_create(port_id, queue_id, attr,
+ conf, actions, user_data,
+ handle);
+
+ return handle;
+}
/**
* @warning
@@ -6614,12 +7037,32 @@ rte_flow_action_list_handle_destroy(uint16_t port_id,
* - (-EBUSY) if actions list pointed by *action* handle still used
*/
__rte_experimental
-int
+static inline int
rte_flow_async_action_list_handle_destroy
(uint16_t port_id, uint32_t queue_id,
const struct rte_flow_op_attr *op_attr,
struct rte_flow_action_list_handle *handle,
- void *user_data, struct rte_flow_error *error);
+ void *user_data, struct rte_flow_error *error)
+{
+ struct rte_flow_fp_ops *ops;
+ int ret;
+
+#ifdef RTE_FLOW_DEBUG
+ if (port_id >= RTE_MAX_ETHPORTS)
+ return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENODEV));
+#endif
+
+ ops = &rte_flow_fp_ops[port_id];
+ ret = ops->async_action_list_handle_destroy(ops->ctx, queue_id, op_attr, handle,
+ user_data, error);
+
+ rte_flow_trace_async_action_list_handle_destroy(port_id, queue_id,
+ op_attr, handle,
+ user_data, ret);
+
+ return ret;
+}
/**
* @warning
@@ -6709,14 +7152,37 @@ rte_flow_action_list_handle_query_update(uint16_t port_id,
* both *update* and *query* are NULL.
*/
__rte_experimental
-int
+static inline int
rte_flow_async_action_list_handle_query_update(uint16_t port_id, uint32_t queue_id,
const struct rte_flow_op_attr *attr,
const struct rte_flow_action_list_handle *handle,
const void **update, void **query,
enum rte_flow_query_update_mode mode,
void *user_data,
- struct rte_flow_error *error);
+ struct rte_flow_error *error)
+{
+ struct rte_flow_fp_ops *ops;
+ int ret;
+
+#ifdef RTE_FLOW_DEBUG
+ if (port_id >= RTE_MAX_ETHPORTS)
+ return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENODEV));
+#endif
+
+ ops = &rte_flow_fp_ops[port_id];
+ ret = ops->async_action_list_handle_query_update(ops->ctx, queue_id, attr, handle,
+ update, query, mode,
+ user_data, error);
+
+ rte_flow_trace_async_action_list_handle_query_update(port_id, queue_id,
+ attr, handle,
+ update, query,
+ mode, user_data,
+ ret);
+
+ return ret;
+}
/**
* @warning
@@ -234,122 +234,12 @@ struct rte_flow_ops {
const struct rte_flow_group_attr *attr,
const struct rte_flow_action actions[],
struct rte_flow_error *err);
- /** See rte_flow_async_create() */
- struct rte_flow *(*async_create)
- (struct rte_eth_dev *dev,
- uint32_t queue_id,
- const struct rte_flow_op_attr *op_attr,
- struct rte_flow_template_table *template_table,
- const struct rte_flow_item pattern[],
- uint8_t pattern_template_index,
- const struct rte_flow_action actions[],
- uint8_t actions_template_index,
- void *user_data,
- struct rte_flow_error *err);
- /** See rte_flow_async_create_by_index() */
- struct rte_flow *(*async_create_by_index)
- (struct rte_eth_dev *dev,
- uint32_t queue_id,
- const struct rte_flow_op_attr *op_attr,
- struct rte_flow_template_table *template_table,
- uint32_t rule_index,
- const struct rte_flow_action actions[],
- uint8_t actions_template_index,
- void *user_data,
- struct rte_flow_error *err);
- /** See rte_flow_async_destroy() */
- int (*async_destroy)
- (struct rte_eth_dev *dev,
- uint32_t queue_id,
- const struct rte_flow_op_attr *op_attr,
- struct rte_flow *flow,
- void *user_data,
- struct rte_flow_error *err);
- /** See rte_flow_push() */
- int (*push)
- (struct rte_eth_dev *dev,
- uint32_t queue_id,
- struct rte_flow_error *err);
- /** See rte_flow_pull() */
- int (*pull)
- (struct rte_eth_dev *dev,
- uint32_t queue_id,
- struct rte_flow_op_result res[],
- uint16_t n_res,
- struct rte_flow_error *error);
- /** See rte_flow_async_action_handle_create() */
- struct rte_flow_action_handle *(*async_action_handle_create)
- (struct rte_eth_dev *dev,
- uint32_t queue_id,
- const struct rte_flow_op_attr *op_attr,
- const struct rte_flow_indir_action_conf *indir_action_conf,
- const struct rte_flow_action *action,
- void *user_data,
- struct rte_flow_error *err);
- /** See rte_flow_async_action_handle_destroy() */
- int (*async_action_handle_destroy)
- (struct rte_eth_dev *dev,
- uint32_t queue_id,
- const struct rte_flow_op_attr *op_attr,
- struct rte_flow_action_handle *action_handle,
- void *user_data,
- struct rte_flow_error *error);
- /** See rte_flow_async_action_handle_update() */
- int (*async_action_handle_update)
- (struct rte_eth_dev *dev,
- uint32_t queue_id,
- const struct rte_flow_op_attr *op_attr,
- struct rte_flow_action_handle *action_handle,
- const void *update,
- void *user_data,
- struct rte_flow_error *error);
- /** See rte_flow_async_action_handle_query() */
- int (*async_action_handle_query)
- (struct rte_eth_dev *dev,
- uint32_t queue_id,
- const struct rte_flow_op_attr *op_attr,
- const struct rte_flow_action_handle *action_handle,
- void *data,
- void *user_data,
- struct rte_flow_error *error);
- /** See rte_flow_async_action_handle_query_update */
- int (*async_action_handle_query_update)
- (struct rte_eth_dev *dev, uint32_t queue_id,
- const struct rte_flow_op_attr *op_attr,
- struct rte_flow_action_handle *action_handle,
- const void *update, void *query,
- enum rte_flow_query_update_mode qu_mode,
- void *user_data, struct rte_flow_error *error);
/** See rte_flow_actions_update(). */
int (*actions_update)
(struct rte_eth_dev *dev,
struct rte_flow *flow,
const struct rte_flow_action actions[],
struct rte_flow_error *error);
- /** See rte_flow_async_actions_update() */
- int (*async_actions_update)
- (struct rte_eth_dev *dev,
- uint32_t queue_id,
- const struct rte_flow_op_attr *op_attr,
- struct rte_flow *flow,
- const struct rte_flow_action actions[],
- uint8_t actions_template_index,
- void *user_data,
- struct rte_flow_error *error);
- /** @see rte_flow_async_action_list_handle_create() */
- struct rte_flow_action_list_handle *
- (*async_action_list_handle_create)
- (struct rte_eth_dev *dev, uint32_t queue_id,
- const struct rte_flow_op_attr *attr,
- const struct rte_flow_indir_action_conf *conf,
- const struct rte_flow_action *actions,
- void *user_data, struct rte_flow_error *error);
- /** @see rte_flow_async_action_list_handle_destroy() */
- int (*async_action_list_handle_destroy)
- (struct rte_eth_dev *dev, uint32_t queue_id,
- const struct rte_flow_op_attr *op_attr,
- struct rte_flow_action_list_handle *action_handle,
- void *user_data, struct rte_flow_error *error);
/** @see rte_flow_action_list_handle_query_update() */
int (*action_list_handle_query_update)
(struct rte_eth_dev *dev,
@@ -357,14 +247,6 @@ struct rte_flow_ops {
const void **update, void **query,
enum rte_flow_query_update_mode mode,
struct rte_flow_error *error);
- /** @see rte_flow_async_action_list_handle_query_update() */
- int (*async_action_list_handle_query_update)
- (struct rte_eth_dev *dev, uint32_t queue_id,
- const struct rte_flow_op_attr *attr,
- const struct rte_flow_action_list_handle *handle,
- const void **update, void **query,
- enum rte_flow_query_update_mode mode,
- void *user_data, struct rte_flow_error *error);
/** @see rte_flow_calc_table_hash() */
int (*flow_calc_table_hash)
(struct rte_eth_dev *dev, const struct rte_flow_template_table *table,
@@ -394,6 +276,32 @@ rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error);
int
rte_flow_restore_info_dynflag_register(void);
+/**
+ * Register fast path flow operations for a given port.
+ *
+ * Whenever a new device is probed the PMD is responsible for calling this function
+ * to provide relevant fast path flow operations implementation.
+ *
+ * @param port_id
+ * Port identifier.
+ * @param ops
+ * Pointer to fast path flow operations container.
+ *
+ * @return
+ * 0 on success. Negative errno otherwise
+ */
+int
+rte_flow_fp_ops_register(uint16_t port_id, const struct rte_flow_fp_ops *ops);
+
+/**
+ * Provides dummy callbacks for fast path flow API functions.
+ *
+ * @param port_id
+ * Port identifier.
+ */
+void
+rte_flow_fp_ops_reset(uint16_t port_id);
+
#ifdef __cplusplus
}
#endif