@@ -55,6 +55,10 @@ New Features
Also, make sure to start the actual text at the margin.
=======================================================
+* **Updated Intel iavf driver.**
+
+ * Added flow subscription support.
+
Removed Items
-------------
@@ -148,6 +148,13 @@ struct iavf_fdir_info {
struct iavf_fdir_conf conf;
};
+struct iavf_fsub_conf {
+ struct virtchnl_flow_sub sub_fltr;
+ struct virtchnl_flow_unsub unsub_fltr;
+ uint64_t input_set;
+ uint32_t flow_id;
+};
+
struct iavf_qv_map {
uint16_t queue_id;
uint16_t vector_id;
@@ -482,4 +489,7 @@ int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
extern const struct rte_tm_ops iavf_tm_ops;
int iavf_get_ptp_cap(struct iavf_adapter *adapter);
int iavf_get_phc_time(struct iavf_rx_queue *rxq);
+int iavf_flow_sub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter);
+int iavf_flow_unsub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter);
+int iavf_flow_sub_check(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter);
#endif /* _IAVF_ETHDEV_H_ */
@@ -1551,6 +1551,7 @@ iavf_fdir_parse(struct iavf_adapter *ad,
uint32_t array_len,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
+ uint32_t priority,
void **meta,
struct rte_flow_error *error)
{
@@ -1561,6 +1562,9 @@ iavf_fdir_parse(struct iavf_adapter *ad,
memset(filter, 0, sizeof(*filter));
+ if (priority >= 1)
+ return -rte_errno;
+
item = iavf_search_pattern_match_item(pattern, array, array_len, error);
if (!item)
return -rte_errno;
new file mode 100644
@@ -0,0 +1,746 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <iavf.h>
+#include "iavf_generic_flow.h"
+
+#define MAX_QGRP_NUM_TYPE 7
+#define IAVF_IPV6_ADDR_LENGTH 16
+#define MAX_INPUT_SET_BYTE 32
+
+#define IAVF_FLTR_RX BIT(0)
+#define IAVF_FLTR_TX BIT(1)
+#define IAVF_FLTR_TX_RX (IAVF_FLTR_RX | IAVF_FLTR_TX)
+
+#define IAVF_SW_INSET_ETHER ( \
+ IAVF_INSET_DMAC | IAVF_INSET_SMAC | IAVF_INSET_ETHERTYPE)
+#define IAVF_SW_INSET_MAC_IPV4 ( \
+ IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+ IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS)
+#define IAVF_SW_INSET_MAC_VLAN_IPV4 ( \
+ IAVF_SW_INSET_MAC_IPV4 | IAVF_INSET_VLAN_OUTER)
+#define IAVF_SW_INSET_MAC_IPV4_TCP ( \
+ IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+ IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS | \
+ IAVF_INSET_TCP_DST_PORT | IAVF_INSET_TCP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV4_UDP ( \
+ IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+ IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS | \
+ IAVF_INSET_UDP_DST_PORT | IAVF_INSET_UDP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV6 ( \
+ IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+ IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+ IAVF_INSET_IPV6_NEXT_HDR)
+#define IAVF_SW_INSET_MAC_IPV6_TCP ( \
+ IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+ IAVF_INSET_IPV6_HOP_LIMIT | IAVF_INSET_IPV6_TC | \
+ IAVF_INSET_TCP_DST_PORT | IAVF_INSET_TCP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV6_UDP ( \
+ IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+ IAVF_INSET_IPV6_HOP_LIMIT | IAVF_INSET_IPV6_TC | \
+ IAVF_INSET_UDP_DST_PORT | IAVF_INSET_UDP_SRC_PORT)
+
+
+static struct iavf_flow_parser iavf_fsub_parser;
+
+static struct
+iavf_pattern_match_item iavf_fsub_pattern_list[] = {
+ {iavf_pattern_ethertype, IAVF_SW_INSET_ETHER, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv4, IAVF_SW_INSET_MAC_IPV4, IAVF_INSET_NONE},
+ {iavf_pattern_eth_vlan_ipv4, IAVF_SW_INSET_MAC_VLAN_IPV4, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv4_udp, IAVF_SW_INSET_MAC_IPV4_UDP, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv4_tcp, IAVF_SW_INSET_MAC_IPV4_TCP, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv6, IAVF_SW_INSET_MAC_IPV6, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv6_udp, IAVF_SW_INSET_MAC_IPV6_UDP, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv6_tcp, IAVF_SW_INSET_MAC_IPV6_TCP, IAVF_INSET_NONE},
+};
+
+static int
+iavf_fsub_create(struct iavf_adapter *ad, struct rte_flow *flow,
+ void *meta, struct rte_flow_error *error)
+{
+ struct iavf_fsub_conf *filter = meta;
+ struct iavf_fsub_conf *rule;
+ int ret;
+
+ rule = rte_zmalloc("fsub_entry", sizeof(*rule), 0);
+ if (!rule) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory for fsub rule");
+ return -rte_errno;
+ }
+
+ ret = iavf_flow_sub(ad, filter);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to subscribe flow rule.");
+ goto free_entry;
+ }
+
+ rte_memcpy(rule, filter, sizeof(*rule));
+ flow->rule = rule;
+
+ return 0;
+
+free_entry:
+ rte_free(rule);
+ return -rte_errno;
+}
+
+static int
+iavf_fsub_destroy(struct iavf_adapter *ad, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct iavf_fsub_conf *filter;
+ int ret;
+
+ filter = (struct iavf_fsub_conf *)flow->rule;
+
+ ret = iavf_flow_unsub(ad, filter);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to unsubscribe flow rule.");
+ return -rte_errno;
+ }
+
+ flow->rule = NULL;
+ rte_free(filter);
+
+ return ret;
+}
+
+static int
+iavf_fsub_validation(struct iavf_adapter *ad,
+ __rte_unused struct rte_flow *flow,
+ void *meta,
+ struct rte_flow_error *error)
+{
+ struct iavf_fsub_conf *filter = meta;
+ int ret;
+
+ ret = iavf_flow_sub_check(ad, filter);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to validate filter rule.");
+ return -rte_errno;
+ }
+
+ return 0;
+};
+
+static int
+iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
+ const uint64_t input_set_mask,
+ struct rte_flow_error *error,
+ struct iavf_fsub_conf *filter)
+{
+ struct virtchnl_proto_hdrs *hdrs = &filter->sub_fltr.proto_hdrs;
+ enum rte_flow_item_type item_type;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+ const struct rte_flow_item *item = pattern;
+ struct virtchnl_proto_hdr_w_msk *hdr, *hdr1 = NULL;
+ uint64_t outer_input_set = IAVF_INSET_NONE;
+ uint64_t *input = NULL;
+ uint16_t input_set_byte = 0;
+ uint16_t j;
+ uint32_t layer = 0;
+
+ for (item = pattern; item->type !=
+ RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not support range");
+ return false;
+ }
+ item_type = item->type;
+
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ hdr1 = &hdrs->proto_hdr_w_msk[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
+
+ if (eth_spec && eth_mask) {
+ input = &outer_input_set;
+
+ if (!rte_is_zero_ether_addr(ð_mask->dst)) {
+ *input |= IAVF_INSET_DMAC;
+ input_set_byte += 6;
+ } else {
+ /* flow subscribe filter will add dst mac in kernel */
+ input_set_byte += 6;
+ }
+
+ if (!rte_is_zero_ether_addr(ð_mask->src)) {
+ *input |= IAVF_INSET_DMAC;
+ input_set_byte += 6;
+ }
+
+ if (eth_mask->type) {
+ *input |= IAVF_INSET_ETHERTYPE;
+ input_set_byte += 2;
+ }
+
+ rte_memcpy(hdr1->buffer_spec, eth_spec,
+ sizeof(struct rte_ether_hdr));
+ rte_memcpy(hdr1->buffer_mask, eth_mask,
+ sizeof(struct rte_ether_hdr));
+ } else {
+ /* flow subscribe filter will add dst mac in kernel */
+ input_set_byte += 6;
+ }
+
+ hdrs->count = ++layer;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
+
+ hdr = &hdrs->proto_hdr_w_msk[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
+
+ if (ipv4_spec && ipv4_mask) {
+ input = &outer_input_set;
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid IPv4 mask.");
+ return false;
+ }
+
+ if (ipv4_mask->hdr.src_addr) {
+ *input |= IAVF_INSET_IPV4_SRC;
+ input_set_byte += 2;
+ }
+ if (ipv4_mask->hdr.dst_addr) {
+ *input |= IAVF_INSET_IPV4_DST;
+ input_set_byte += 2;
+ }
+ if (ipv4_mask->hdr.time_to_live) {
+ *input |= IAVF_INSET_IPV4_TTL;
+ input_set_byte++;
+ }
+ if (ipv4_mask->hdr.next_proto_id) {
+ *input |= IAVF_INSET_IPV4_PROTO;
+ input_set_byte++;
+ }
+ if (ipv4_mask->hdr.type_of_service) {
+ *input |= IAVF_INSET_IPV4_TOS;
+ input_set_byte++;
+ }
+
+ rte_memcpy(hdr->buffer_spec, &ipv4_spec->hdr,
+ sizeof(ipv4_spec->hdr));
+ rte_memcpy(hdr->buffer_mask, &ipv4_mask->hdr,
+ sizeof(ipv4_spec->hdr));
+ }
+
+ hdrs->count = ++layer;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ ipv6_spec = item->spec;
+ ipv6_mask = item->mask;
+
+ hdr = &hdrs->proto_hdr_w_msk[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
+
+ if (ipv6_spec && ipv6_mask) {
+ input = &outer_input_set;
+
+ if (ipv6_mask->hdr.payload_len) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid IPv6 mask");
+ return false;
+ }
+
+ for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+ if (ipv6_mask->hdr.src_addr[j]) {
+ *input |= IAVF_INSET_IPV6_SRC;
+ break;
+ }
+ }
+ for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+ if (ipv6_mask->hdr.dst_addr[j]) {
+ *input |= IAVF_INSET_IPV6_DST;
+ break;
+ }
+ }
+
+ for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+ if (ipv6_mask->hdr.src_addr[j])
+ input_set_byte++;
+
+ if (ipv6_mask->hdr.dst_addr[j])
+ input_set_byte++;
+ }
+
+ if (ipv6_mask->hdr.proto) {
+ *input |= IAVF_INSET_IPV6_NEXT_HDR;
+ input_set_byte++;
+ }
+ if (ipv6_mask->hdr.hop_limits) {
+ *input |= IAVF_INSET_IPV6_HOP_LIMIT;
+ input_set_byte++;
+ }
+ if (ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK)) {
+ *input |= IAVF_INSET_IPV6_TC;
+ input_set_byte += 4;
+ }
+
+ rte_memcpy(hdr->buffer_spec, &ipv6_spec->hdr,
+ sizeof(ipv6_spec->hdr));
+ rte_memcpy(hdr->buffer_mask, &ipv6_mask->hdr,
+ sizeof(ipv6_spec->hdr));
+ }
+
+ hdrs->count = ++layer;
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = item->spec;
+ udp_mask = item->mask;
+
+ hdr = &hdrs->proto_hdr_w_msk[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
+
+ if (udp_spec && udp_mask) {
+ input = &outer_input_set;
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid UDP mask");
+ return false;
+ }
+
+ if (udp_mask->hdr.src_port) {
+ *input |= IAVF_INSET_UDP_SRC_PORT;
+ input_set_byte += 2;
+ }
+ if (udp_mask->hdr.dst_port) {
+ *input |= IAVF_INSET_UDP_DST_PORT;
+ input_set_byte += 2;
+ }
+
+ rte_memcpy(hdr->buffer_spec, &udp_spec->hdr,
+ sizeof(udp_spec->hdr));
+ rte_memcpy(hdr->buffer_mask, &udp_mask->hdr,
+ sizeof(udp_mask->hdr));
+ }
+
+ hdrs->count = ++layer;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+
+ hdr = &hdrs->proto_hdr_w_msk[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
+
+ if (tcp_spec && tcp_mask) {
+ input = &outer_input_set;
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid TCP mask");
+ return false;
+ }
+
+ if (tcp_mask->hdr.src_port) {
+ *input |= IAVF_INSET_TCP_SRC_PORT;
+ input_set_byte += 2;
+ }
+ if (tcp_mask->hdr.dst_port) {
+ *input |= IAVF_INSET_TCP_DST_PORT;
+ input_set_byte += 2;
+ }
+
+ rte_memcpy(hdr->buffer_spec, &tcp_spec->hdr,
+ sizeof(tcp_spec->hdr));
+ rte_memcpy(hdr->buffer_mask, &tcp_mask->hdr,
+ sizeof(tcp_mask->hdr));
+ }
+
+ hdrs->count = ++layer;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
+
+ hdr = &hdrs->proto_hdr_w_msk[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, S_VLAN);
+
+ if (vlan_spec && vlan_spec) {
+ input = &outer_input_set;
+
+ *input |= IAVF_INSET_VLAN_OUTER;
+
+ if (vlan_mask->tci)
+ input_set_byte += 2;
+
+ if (vlan_mask->inner_type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VLAN input set.");
+ return false;
+ }
+
+ rte_memcpy(hdr->buffer_spec, &vlan_spec->hdr,
+ sizeof(vlan_spec->hdr));
+ rte_memcpy(hdr->buffer_mask, &vlan_mask->hdr,
+ sizeof(vlan_mask->hdr));
+ }
+
+ hdrs->count = ++layer;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, pattern,
+ "Invalid pattern item.");
+ return -rte_errno;
+ }
+ }
+
+ hdrs->count += VIRTCHNL_MAX_NUM_PROTO_HDRS;
+
+ if (input_set_byte > MAX_INPUT_SET_BYTE) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "too much input set");
+ return -rte_errno;
+ }
+
+ if (!outer_input_set || (outer_input_set & ~input_set_mask))
+ return -rte_errno;
+
+ return 0;
+}
+
+static int
+iavf_fsub_parse_action(struct iavf_adapter *ad,
+ const struct rte_flow_action *actions,
+ uint32_t priority,
+ struct rte_flow_error *error,
+ struct iavf_fsub_conf *filter)
+{
+ const struct rte_flow_action *action;
+ const struct rte_flow_action_vf *act_vf;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_rss *act_qgrop;
+ struct virtchnl_filter_action *filter_action;
+ uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
+ 2, 4, 8, 16, 32, 64, 128};
+ uint16_t i, num = 0, dest_num = 0, vf_num = 0;
+
+ for (action = actions; action->type !=
+ RTE_FLOW_ACTION_TYPE_END; action++) {
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_VF:
+ vf_num++;
+ filter_action = &filter->sub_fltr.actions.actions[num];
+
+ act_vf = action->conf;
+ if (act_vf->id != ad->hw.device_id &&
+ !act_vf->original) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid vf id");
+ return -rte_errno;
+ }
+
+ filter->sub_fltr.actions.count = ++num;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ dest_num++;
+ filter_action = &filter->sub_fltr.actions.actions[num];
+
+ act_q = action->conf;
+ if (act_q->index >= ad->dev_data->nb_rx_queues)
+ goto error;
+
+ filter_action->type = VIRTCHNL_ACTION_QUEUE;
+ filter_action->act_conf.queue.index = act_q->index;
+ filter->sub_fltr.actions.count = ++num;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ dest_num++;
+ filter_action = &filter->sub_fltr.actions.actions[num];
+
+ act_qgrop = action->conf;
+ if (act_qgrop->queue_num <= 1)
+ goto error;
+
+ filter_action->type = VIRTCHNL_ACTION_Q_REGION;
+ filter_action->act_conf.queue.index =
+ act_qgrop->queue[0];
+ for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
+ if (act_qgrop->queue_num ==
+ valid_qgrop_number[i])
+ break;
+ }
+
+ if (i == MAX_QGRP_NUM_TYPE)
+ goto error;
+
+ if ((act_qgrop->queue[0] + act_qgrop->queue_num) >
+ ad->dev_data->nb_rx_queues)
+ goto error1;
+
+ for (i = 0; i < act_qgrop->queue_num - 1; i++)
+ if (act_qgrop->queue[i + 1] !=
+ act_qgrop->queue[i] + 1)
+ goto error2;
+
+ filter_action->act_conf.queue.region = act_qgrop->queue_num;
+ filter->sub_fltr.actions.count = ++num;
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid action type");
+ return -rte_errno;
+ }
+ }
+
+ /* 0 denotes lowest priority of recipe and highest priority
+ * of rte_flow. Change rte_flow priority into recipe priority.
+ */
+ filter->sub_fltr.priority = priority;
+
+ if (num > VIRTCHNL_MAX_NUM_ACTIONS) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Action numbers exceed the maximum value");
+ return -rte_errno;
+ }
+
+ if (vf_num == 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid action, vf action must be added");
+ return -rte_errno;
+ }
+
+ if (dest_num >= 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Unsupported action combination");
+ return -rte_errno;
+ }
+
+ return 0;
+error:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid action type or queue number");
+ return -rte_errno;
+
+error1:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid queue region indexes");
+ return -rte_errno;
+
+error2:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Discontinuous queue region");
+ return -rte_errno;
+}
+
+static int
+iavf_fsub_check_action(const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *action;
+ enum rte_flow_action_type action_type;
+ uint16_t actions_num = 0;
+ bool vf_valid = false;
+ bool queue_valid = false;
+
+ for (action = actions; action->type !=
+ RTE_FLOW_ACTION_TYPE_END; action++) {
+ action_type = action->type;
+ switch (action_type) {
+ case RTE_FLOW_ACTION_TYPE_VF:
+ vf_valid = true;
+ actions_num++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ queue_valid = true;
+ actions_num++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ actions_num++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ continue;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid action type");
+ return -rte_errno;
+ }
+ }
+
+ if (!((actions_num == 1 && !queue_valid) ||
+ (actions_num == 2 && vf_valid && queue_valid))) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid action number");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+iavf_fsub_parse(struct iavf_adapter *ad,
+ struct iavf_pattern_match_item *array,
+ uint32_t array_len,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ uint32_t priority,
+ void **meta,
+ struct rte_flow_error *error)
+{
+ struct iavf_fsub_conf *filter;
+ struct iavf_pattern_match_item *pattern_match_item = NULL;
+ int ret = 0;
+
+ filter = rte_zmalloc(NULL, sizeof(*filter), 0);
+ if (!filter) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "No memory for iavf_fsub_conf_ptr");
+ goto error;
+ }
+
+ /* search flow subscribe pattern */
+ pattern_match_item = iavf_search_pattern_match_item(pattern, array,
+ array_len, error);
+ if (!pattern_match_item)
+ return -rte_errno;
+
+ /* parse flow subscribe pattern */
+ ret = iavf_fsub_parse_pattern(pattern,
+ pattern_match_item->input_set_mask,
+ error, filter);
+ if (ret)
+ goto error;
+
+ /* check flow subscribe pattern action */
+ ret = iavf_fsub_check_action(actions, error);
+ if (ret)
+ goto error;
+
+ /* parse flow subscribe pattern action */
+ ret = iavf_fsub_parse_action((void *)ad, actions, priority,
+ error, filter);
+ if (ret)
+ goto error;
+
+ if (meta)
+ *meta = filter;
+
+error:
+ rte_free(pattern_match_item);
+ return ret;
+}
+
+static int
+iavf_fsub_init(struct iavf_adapter *ad)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+ struct iavf_flow_parser *parser;
+
+ if (!vf->vf_res)
+ return -EINVAL;
+
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FSUB_PF)
+ parser = &iavf_fsub_parser;
+ else
+ return -ENOTSUP;
+
+ return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_fsub_uninit(struct iavf_adapter *ad)
+{
+ iavf_unregister_parser(&iavf_fsub_parser, ad);
+}
+
+static struct
+iavf_flow_engine iavf_fsub_engine = {
+ .init = iavf_fsub_init,
+ .uninit = iavf_fsub_uninit,
+ .create = iavf_fsub_create,
+ .destroy = iavf_fsub_destroy,
+ .validation = iavf_fsub_validation,
+ .type = IAVF_FLOW_ENGINE_FSUB,
+};
+
+static struct
+iavf_flow_parser iavf_fsub_parser = {
+ .engine = &iavf_fsub_engine,
+ .array = iavf_fsub_pattern_list,
+ .array_len = RTE_DIM(iavf_fsub_pattern_list),
+ .parse_pattern_action = iavf_fsub_parse,
+ .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
+};
+
+RTE_INIT(iavf_fsub_engine_init)
+{
+ iavf_register_flow_engine(&iavf_fsub_engine);
+}
@@ -1785,6 +1785,7 @@ enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv6_tcp[] = {
typedef struct iavf_flow_engine * (*parse_engine_t)(struct iavf_adapter *ad,
struct rte_flow *flow,
struct iavf_parser_list *parser_list,
+ uint32_t priority,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
@@ -1866,6 +1867,8 @@ iavf_register_parser(struct iavf_flow_parser *parser,
{
struct iavf_parser_list *list = NULL;
struct iavf_flow_parser_node *parser_node;
+ struct iavf_flow_parser_node *existing_node;
+ void *temp;
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
parser_node = rte_zmalloc("iavf_parser", sizeof(*parser_node), 0);
@@ -1880,14 +1883,26 @@ iavf_register_parser(struct iavf_flow_parser *parser,
TAILQ_INSERT_TAIL(list, parser_node, node);
} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
list = &vf->dist_parser_list;
+ RTE_TAILQ_FOREACH_SAFE(existing_node, list, node, temp) {
+ if (existing_node->parser->engine->type ==
+ IAVF_FLOW_ENGINE_FSUB) {
+ TAILQ_INSERT_AFTER(list, existing_node,
+ parser_node, node);
+ goto DONE;
+ }
+ }
TAILQ_INSERT_HEAD(list, parser_node, node);
} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
list = &vf->ipsec_crypto_parser_list;
TAILQ_INSERT_HEAD(list, parser_node, node);
+ } else if (parser->engine->type == IAVF_FLOW_ENGINE_FSUB) {
+ list = &vf->dist_parser_list;
+ TAILQ_INSERT_HEAD(list, parser_node, node);
} else {
return -EINVAL;
}
+DONE:
return 0;
}
@@ -1902,7 +1917,8 @@ iavf_unregister_parser(struct iavf_flow_parser *parser,
if (parser->engine->type == IAVF_FLOW_ENGINE_HASH)
list = &vf->rss_parser_list;
- else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR)
+ else if ((parser->engine->type == IAVF_FLOW_ENGINE_FDIR) ||
+ (parser->engine->type == IAVF_FLOW_ENGINE_FSUB))
list = &vf->dist_parser_list;
if (list == NULL)
@@ -1936,11 +1952,11 @@ iavf_flow_valid_attr(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* Not supported */
- if (attr->priority) {
+ /* support priority for flow subscribe */
+ if (attr->priority > 1) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
- attr, "Not support priority.");
+ attr, "Only support priority 0 and 1.");
return -rte_errno;
}
@@ -2083,6 +2099,7 @@ static struct iavf_flow_engine *
iavf_parse_engine_create(struct iavf_adapter *ad,
struct rte_flow *flow,
struct iavf_parser_list *parser_list,
+ uint32_t priority,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
@@ -2096,7 +2113,7 @@ iavf_parse_engine_create(struct iavf_adapter *ad,
if (parser_node->parser->parse_pattern_action(ad,
parser_node->parser->array,
parser_node->parser->array_len,
- pattern, actions, &meta, error) < 0)
+ pattern, actions, priority, &meta, error) < 0)
continue;
engine = parser_node->parser->engine;
@@ -2112,6 +2129,7 @@ static struct iavf_flow_engine *
iavf_parse_engine_validate(struct iavf_adapter *ad,
struct rte_flow *flow,
struct iavf_parser_list *parser_list,
+ uint32_t priority,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
@@ -2125,7 +2143,7 @@ iavf_parse_engine_validate(struct iavf_adapter *ad,
if (parser_node->parser->parse_pattern_action(ad,
parser_node->parser->array,
parser_node->parser->array_len,
- pattern, actions, &meta, error) < 0)
+ pattern, actions, priority, &meta, error) < 0)
continue;
engine = parser_node->parser->engine;
@@ -2186,18 +2204,18 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
- *engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list, pattern,
- actions, error);
+ *engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list,
+ attr->priority, pattern, actions, error);
if (*engine)
return 0;
- *engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
- actions, error);
+ *engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list,
+ attr->priority, pattern, actions, error);
if (*engine)
return 0;
*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
- pattern, actions, error);
+ attr->priority, pattern, actions, error);
if (*engine)
return 0;
@@ -471,6 +471,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
uint32_t array_len,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
+ uint32_t priority,
void **meta,
struct rte_flow_error *error);
@@ -480,6 +481,7 @@ enum iavf_flow_engine_type {
IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
IAVF_FLOW_ENGINE_FDIR,
IAVF_FLOW_ENGINE_HASH,
+ IAVF_FLOW_ENGINE_FSUB,
IAVF_FLOW_ENGINE_MAX,
};
@@ -86,6 +86,7 @@ iavf_hash_parse_pattern_action(struct iavf_adapter *ad,
uint32_t array_len,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
+ uint32_t priority,
void **meta,
struct rte_flow_error *error);
@@ -1509,6 +1510,7 @@ iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad,
uint32_t array_len,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
+ uint32_t priority,
void **meta,
struct rte_flow_error *error)
{
@@ -1517,6 +1519,9 @@ iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad,
uint64_t phint = IAVF_PHINT_NONE;
int ret = 0;
+ if (priority >= 1)
+ return -rte_errno;
+
rss_meta_ptr = rte_zmalloc(NULL, sizeof(*rss_meta_ptr), 0);
if (!rss_meta_ptr) {
rte_flow_error_set(error, EINVAL,
@@ -1932,16 +1932,20 @@ static struct iavf_flow_engine iavf_ipsec_flow_engine = {
static int
iavf_ipsec_flow_parse(struct iavf_adapter *ad,
- struct iavf_pattern_match_item *array,
- uint32_t array_len,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- void **meta,
- struct rte_flow_error *error)
+ struct iavf_pattern_match_item *array,
+ uint32_t array_len,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ uint32_t priority,
+ void **meta,
+ struct rte_flow_error *error)
{
struct iavf_pattern_match_item *item = NULL;
int ret = -1;
+ if (priority >= 1)
+ return -rte_errno;
+
item = iavf_search_pattern_match_item(pattern, array, array_len, error);
if (item && item->meta) {
uint32_t type = (uint64_t)(item->meta);
@@ -502,6 +502,7 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
VIRTCHNL_VF_OFFLOAD_FDIR_PF |
VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
+ VIRTCHNL_VF_OFFLOAD_FSUB_PF |
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
VIRTCHNL_VF_OFFLOAD_CRC |
VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
@@ -1533,6 +1534,138 @@ iavf_fdir_check(struct iavf_adapter *adapter,
return 0;
}
+int
+iavf_flow_sub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_flow_sub *fsub_cfg;
+ struct iavf_cmd_info args;
+ int err;
+
+ filter->sub_fltr.vsi_id = vf->vsi_res->vsi_id;
+ filter->sub_fltr.validate_only = 0;
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL_OP_FLOW_SUBSCRIBE;
+ args.in_args = (uint8_t *)(&filter->sub_fltr);
+ args.in_args_size = sizeof(*(&filter->sub_fltr));
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ err = iavf_execute_vf_cmd(adapter, &args, 0);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of "
+ "OP_FLOW_SUBSCRIBE");
+
+ fsub_cfg = (struct virtchnl_flow_sub *)args.out_buffer;
+ filter->flow_id = fsub_cfg->flow_id;
+
+ if (fsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
+ PMD_DRV_LOG(INFO, "Succeed in adding rule request by PF");
+ } else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE) {
+ PMD_DRV_LOG(ERR, "Failed to add rule request due to no hw "
+ "resource");
+ err = -1;
+ } else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_EXIST) {
+ PMD_DRV_LOG(ERR, "Failed to add rule request due to the rule "
+ "is already existed");
+ err = -1;
+ } else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_INVALID) {
+ PMD_DRV_LOG(ERR, "Failed to add rule request due to the hw "
+ "doesn't support");
+ err = -1;
+ } else {
+ PMD_DRV_LOG(ERR, "Failed to add rule request due to other "
+ "reasons");
+ err = -1;
+ }
+
+ return err;
+}
+
+int
+iavf_flow_unsub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_flow_unsub *unsub_cfg;
+ struct iavf_cmd_info args;
+ int err;
+
+ filter->unsub_fltr.vsi_id = vf->vsi_res->vsi_id;
+ filter->unsub_fltr.flow_id = filter->flow_id;
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL_OP_FLOW_UNSUBSCRIBE;
+ args.in_args = (uint8_t *)(&filter->unsub_fltr);
+ args.in_args_size = sizeof(filter->unsub_fltr);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ err = iavf_execute_vf_cmd(adapter, &args, 0);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of "
+ "OP_FLOW_UNSUBSCRIBE");
+
+ unsub_cfg = (struct virtchnl_flow_unsub *)args.out_buffer;
+
+ if (unsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
+ PMD_DRV_LOG(INFO, "Succeed in deleting rule request by PF");
+ } else if (unsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST) {
+ PMD_DRV_LOG(ERR, "Failed to delete rule request due to this "
+ "rule doesn't exist");
+ err = -1;
+ } else {
+ PMD_DRV_LOG(ERR, "Failed to delete rule request due to other "
+ "reasons");
+ err = -1;
+ }
+
+ return err;
+}
+
+int
+iavf_flow_sub_check(struct iavf_adapter *adapter,
+ struct iavf_fsub_conf *filter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_flow_sub *fsub_cfg;
+
+ struct iavf_cmd_info args;
+ int err;
+
+ filter->sub_fltr.vsi_id = vf->vsi_res->vsi_id;
+ filter->sub_fltr.validate_only = 1;
+
+ args.ops = VIRTCHNL_OP_FLOW_SUBSCRIBE;
+ args.in_args = (uint8_t *)(&filter->sub_fltr);
+ args.in_args_size = sizeof(*(&filter->sub_fltr));
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ err = iavf_execute_vf_cmd(adapter, &args, 0);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to check flow director rule");
+ return err;
+ }
+
+ fsub_cfg = (struct virtchnl_flow_sub *)args.out_buffer;
+
+ if (fsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
+ PMD_DRV_LOG(INFO, "Succeed in checking rule request by PF");
+ } else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_INVALID) {
+ PMD_DRV_LOG(ERR, "Failed to check rule request due to "
+ "parameters validation or HW doesn't "
+ "support");
+ err = -1;
+ } else {
+ PMD_DRV_LOG(ERR, "Failed to check rule request due to other "
+ "reasons");
+ err = -1;
+ }
+
+ return 0;
+}
+
int
iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
struct virtchnl_rss_cfg *rss_cfg, bool add)
@@ -16,6 +16,7 @@ sources = files(
'iavf_hash.c',
'iavf_tm.c',
'iavf_ipsec_crypto.c',
+ 'iavf_fsub.c',
)
if arch_subdir == 'x86'