get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/41877/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 41877,
    "url": "http://patchwork.dpdk.org/api/patches/41877/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20180628201549.3507-12-ajit.khaparde@broadcom.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20180628201549.3507-12-ajit.khaparde@broadcom.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20180628201549.3507-12-ajit.khaparde@broadcom.com",
    "date": "2018-06-28T20:15:37",
    "name": "[v2,11/23] net/bnxt: refactor filter/flow",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "897a9f59297019045bf75246166a7563e454e875",
    "submitter": {
        "id": 501,
        "url": "http://patchwork.dpdk.org/api/people/501/?format=api",
        "name": "Ajit Khaparde",
        "email": "ajit.khaparde@broadcom.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patchwork.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20180628201549.3507-12-ajit.khaparde@broadcom.com/mbox/",
    "series": [
        {
            "id": 300,
            "url": "http://patchwork.dpdk.org/api/series/300/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=300",
            "date": "2018-06-28T20:15:29",
            "name": "bnxt patchset",
            "version": 2,
            "mbox": "http://patchwork.dpdk.org/series/300/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/41877/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/41877/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 2E7291B585;\n\tThu, 28 Jun 2018 22:16:35 +0200 (CEST)",
            "from rnd-relay.smtp.broadcom.com (rnd-relay.smtp.broadcom.com\n\t[192.19.229.170]) by dpdk.org (Postfix) with ESMTP id A73B61B50F\n\tfor <dev@dpdk.org>; Thu, 28 Jun 2018 22:15:58 +0200 (CEST)",
            "from nis-sj1-27.broadcom.com (nis-sj1-27.lvn.broadcom.net\n\t[10.75.144.136])\n\tby rnd-relay.smtp.broadcom.com (Postfix) with ESMTP id 0E36930C035;\n\tThu, 28 Jun 2018 13:15:56 -0700 (PDT)",
            "from C02VPB22HTD6.dhcp.broadcom.net (c02vpb22htd6.dhcp.broadcom.net\n\t[10.136.50.120])\n\tby nis-sj1-27.broadcom.com (Postfix) with ESMTP id 73A73AC079C;\n\tThu, 28 Jun 2018 13:15:55 -0700 (PDT)"
        ],
        "DKIM-Filter": "OpenDKIM Filter v2.10.3 rnd-relay.smtp.broadcom.com 0E36930C035",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple; d=broadcom.com;\n\ts=dkimrelay; t=1530216956;\n\tbh=8lIFFSf4GGMVjkviDBtMshFcnJkwm+IJNlGz5pk+udM=;\n\th=From:To:Cc:Subject:Date:In-Reply-To:References:From;\n\tb=NHPBlTtgnVySqiKaNSMyClhojM6BmCRHnds0MaMWjomgrPVV8yTO+8HWxx0RWF0GY\n\t2IC94AZjU70TeX4CriPkuEbzQ3fofDHkMJ7HXQtrjKp5bafGhZE/IUXhmKu88OBC5c\n\tYLV0Jl1i94BYemLIiV8xQnu1ankHz0JVvJvKHgWE=",
        "From": "Ajit Khaparde <ajit.khaparde@broadcom.com>",
        "To": "dev@dpdk.org",
        "Cc": "ferruh.yigit@intel.com, Michael Wildt <michael.wildt@broadcom.com>,\n\tScott Branden <scott.branden@broadcom.com>",
        "Date": "Thu, 28 Jun 2018 13:15:37 -0700",
        "Message-Id": "<20180628201549.3507-12-ajit.khaparde@broadcom.com>",
        "X-Mailer": "git-send-email 2.15.2 (Apple Git-101.1)",
        "In-Reply-To": "<20180628201549.3507-1-ajit.khaparde@broadcom.com>",
        "References": "<f02289d7-4111-ab00-9ce9-00dd5c8b3c18@intel.com>\n\t<20180628201549.3507-1-ajit.khaparde@broadcom.com>",
        "Subject": "[dpdk-dev] [PATCH v2 11/23] net/bnxt: refactor filter/flow",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "In preparation of more rte_flow support it has been decided to\nseparate out filter and flow into their own files. Functionally the\nsame.\n\nSigned-off-by: Michael Wildt <michael.wildt@broadcom.com>\nSigned-off-by: Scott Branden <scott.branden@broadcom.com>\nSigned-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>\n--\nv1->v2: Fix commit log.\n---\n drivers/net/bnxt/Makefile      |    1 +\n drivers/net/bnxt/bnxt_filter.c | 1060 ------------------------------------\n drivers/net/bnxt/bnxt_flow.c   | 1167 ++++++++++++++++++++++++++++++++++++++++\n 3 files changed, 1168 insertions(+), 1060 deletions(-)\n create mode 100644 drivers/net/bnxt/bnxt_flow.c",
    "diff": "diff --git a/drivers/net/bnxt/Makefile b/drivers/net/bnxt/Makefile\nindex 80db03ea8..8be3cb0e4 100644\n--- a/drivers/net/bnxt/Makefile\n+++ b/drivers/net/bnxt/Makefile\n@@ -29,6 +29,7 @@ EXPORT_MAP := rte_pmd_bnxt_version.map\n SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_cpr.c\n SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_ethdev.c\n SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_filter.c\n+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_flow.c\n SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_hwrm.c\n SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_ring.c\n SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_rxq.c\ndiff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c\nindex 72989ab67..31757d32c 100644\n--- a/drivers/net/bnxt/bnxt_filter.c\n+++ b/drivers/net/bnxt/bnxt_filter.c\n@@ -180,1063 +180,3 @@ void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)\n {\n \tSTAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);\n }\n-\n-static int\n-bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,\n-\t\t\tconst struct rte_flow_item pattern[],\n-\t\t\tconst struct rte_flow_action actions[],\n-\t\t\tstruct rte_flow_error *error)\n-{\n-\tif (!pattern) {\n-\t\trte_flow_error_set(error, EINVAL,\n-\t\t\tRTE_FLOW_ERROR_TYPE_ITEM_NUM,\n-\t\t\tNULL, \"NULL pattern.\");\n-\t\treturn -rte_errno;\n-\t}\n-\n-\tif (!actions) {\n-\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION_NUM,\n-\t\t\t\t   NULL, \"NULL action.\");\n-\t\treturn -rte_errno;\n-\t}\n-\n-\tif (!attr) {\n-\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR,\n-\t\t\t\t   NULL, \"NULL attribute.\");\n-\t\treturn -rte_errno;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static const struct rte_flow_item *\n-nxt_non_void_pattern(const struct rte_flow_item *cur)\n-{\n-\twhile (1) {\n-\t\tif (cur->type != RTE_FLOW_ITEM_TYPE_VOID)\n-\t\t\treturn cur;\n-\t\tcur++;\n-\t}\n-}\n-\n-static const struct rte_flow_action *\n-nxt_non_void_action(const struct rte_flow_action *cur)\n-{\n-\twhile (1) {\n-\t\tif (cur->type != RTE_FLOW_ACTION_TYPE_VOID)\n-\t\t\treturn cur;\n-\t\tcur++;\n-\t}\n-}\n-\n-static int\n-bnxt_filter_type_check(const struct rte_flow_item pattern[],\n-\t\t       struct rte_flow_error *error __rte_unused)\n-{\n-\tconst struct rte_flow_item *item = nxt_non_void_pattern(pattern);\n-\tint use_ntuple = 1;\n-\n-\twhile (item->type != RTE_FLOW_ITEM_TYPE_END) {\n-\t\tswitch (item->type) {\n-\t\tcase RTE_FLOW_ITEM_TYPE_ETH:\n-\t\t\tuse_ntuple = 1;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_VLAN:\n-\t\t\tuse_ntuple = 0;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV4:\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV6:\n-\t\tcase RTE_FLOW_ITEM_TYPE_TCP:\n-\t\tcase RTE_FLOW_ITEM_TYPE_UDP:\n-\t\t\t/* FALLTHROUGH */\n-\t\t\t/* need ntuple match, reset exact match */\n-\t\t\tif (!use_ntuple) {\n-\t\t\t\tPMD_DRV_LOG(ERR,\n-\t\t\t\t\t\"VLAN flow cannot use NTUPLE filter\\n\");\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t\t   item,\n-\t\t\t\t\t\t   \"Cannot use VLAN with NTUPLE\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\t\t\tuse_ntuple |= 1;\n-\t\t\tbreak;\n-\t\tdefault:\n-\t\t\tPMD_DRV_LOG(ERR, \"Unknown Flow type\");\n-\t\t\tuse_ntuple |= 1;\n-\t\t}\n-\t\titem++;\n-\t}\n-\treturn use_ntuple;\n-}\n-\n-static int\n-bnxt_validate_and_parse_flow_type(struct bnxt *bp,\n-\t\t\t\t  const struct rte_flow_attr *attr,\n-\t\t\t\t  const struct rte_flow_item pattern[],\n-\t\t\t\t  struct rte_flow_error *error,\n-\t\t\t\t  struct bnxt_filter_info *filter)\n-{\n-\tconst struct rte_flow_item *item = nxt_non_void_pattern(pattern);\n-\tconst struct rte_flow_item_vlan *vlan_spec, *vlan_mask;\n-\tconst struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;\n-\tconst struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;\n-\tconst struct rte_flow_item_tcp *tcp_spec, *tcp_mask;\n-\tconst struct rte_flow_item_udp *udp_spec, *udp_mask;\n-\tconst struct rte_flow_item_eth *eth_spec, *eth_mask;\n-\tconst struct rte_flow_item_nvgre *nvgre_spec;\n-\tconst struct rte_flow_item_nvgre *nvgre_mask;\n-\tconst struct rte_flow_item_vxlan *vxlan_spec;\n-\tconst struct rte_flow_item_vxlan *vxlan_mask;\n-\tuint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};\n-\tuint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};\n-\tconst struct rte_flow_item_vf *vf_spec;\n-\tuint32_t tenant_id_be = 0;\n-\tbool vni_masked = 0;\n-\tbool tni_masked = 0;\n-\tuint32_t vf = 0;\n-\tint use_ntuple;\n-\tuint32_t en = 0;\n-\tuint32_t en_ethertype;\n-\tint dflt_vnic;\n-\n-\tuse_ntuple = bnxt_filter_type_check(pattern, error);\n-\tPMD_DRV_LOG(DEBUG, \"Use NTUPLE %d\\n\", use_ntuple);\n-\tif (use_ntuple < 0)\n-\t\treturn use_ntuple;\n-\n-\tfilter->filter_type = use_ntuple ?\n-\t\tHWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;\n-\ten_ethertype = use_ntuple ?\n-\t\tNTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :\n-\t\tEM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;\n-\n-\twhile (item->type != RTE_FLOW_ITEM_TYPE_END) {\n-\t\tif (item->last) {\n-\t\t\t/* last or range is NOT supported as match criteria */\n-\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t   item,\n-\t\t\t\t\t   \"No support for range\");\n-\t\t\treturn -rte_errno;\n-\t\t}\n-\t\tif (!item->spec || !item->mask) {\n-\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t   item,\n-\t\t\t\t\t   \"spec/mask is NULL\");\n-\t\t\treturn -rte_errno;\n-\t\t}\n-\t\tswitch (item->type) {\n-\t\tcase RTE_FLOW_ITEM_TYPE_ETH:\n-\t\t\teth_spec = item->spec;\n-\t\t\teth_mask = item->mask;\n-\n-\t\t\t/* Source MAC address mask cannot be partially set.\n-\t\t\t * Should be All 0's or all 1's.\n-\t\t\t * Destination MAC address mask must not be partially\n-\t\t\t * set. Should be all 1's or all 0's.\n-\t\t\t */\n-\t\t\tif ((!is_zero_ether_addr(&eth_mask->src) &&\n-\t\t\t     !is_broadcast_ether_addr(&eth_mask->src)) ||\n-\t\t\t    (!is_zero_ether_addr(&eth_mask->dst) &&\n-\t\t\t     !is_broadcast_ether_addr(&eth_mask->dst))) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t\t   item,\n-\t\t\t\t\t\t   \"MAC_addr mask not valid\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\n-\t\t\t/* Mask is not allowed. Only exact matches are */\n-\t\t\tif (eth_mask->type &&\n-\t\t\t    eth_mask->type != RTE_BE16(0xffff)) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t\t   item,\n-\t\t\t\t\t\t   \"ethertype mask not valid\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\n-\t\t\tif (is_broadcast_ether_addr(&eth_mask->dst)) {\n-\t\t\t\trte_memcpy(filter->dst_macaddr,\n-\t\t\t\t\t   &eth_spec->dst, 6);\n-\t\t\t\ten |= use_ntuple ?\n-\t\t\t\t\tNTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :\n-\t\t\t\t\tEM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;\n-\t\t\t}\n-\t\t\tif (is_broadcast_ether_addr(&eth_mask->src)) {\n-\t\t\t\trte_memcpy(filter->src_macaddr,\n-\t\t\t\t\t   &eth_spec->src, 6);\n-\t\t\t\ten |= use_ntuple ?\n-\t\t\t\t\tNTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :\n-\t\t\t\t\tEM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;\n-\t\t\t} /*\n-\t\t\t   * else {\n-\t\t\t   *  RTE_LOG(ERR, PMD, \"Handle this condition\\n\");\n-\t\t\t   * }\n-\t\t\t   */\n-\t\t\tif (eth_mask->type) {\n-\t\t\t\tfilter->ethertype =\n-\t\t\t\t\trte_be_to_cpu_16(eth_spec->type);\n-\t\t\t\ten |= en_ethertype;\n-\t\t\t}\n-\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_VLAN:\n-\t\t\tvlan_spec = item->spec;\n-\t\t\tvlan_mask = item->mask;\n-\t\t\tif (en & en_ethertype) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t\t   item,\n-\t\t\t\t\t\t   \"VLAN TPID matching is not\"\n-\t\t\t\t\t\t   \" supported\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\t\t\tif (vlan_mask->tci &&\n-\t\t\t    vlan_mask->tci == RTE_BE16(0x0fff)) {\n-\t\t\t\t/* Only the VLAN ID can be matched. */\n-\t\t\t\tfilter->l2_ovlan =\n-\t\t\t\t\trte_be_to_cpu_16(vlan_spec->tci &\n-\t\t\t\t\t\t\t RTE_BE16(0x0fff));\n-\t\t\t\ten |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;\n-\t\t\t} else if (vlan_mask->tci) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t\t   item,\n-\t\t\t\t\t\t   \"VLAN mask is invalid\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\t\t\tif (vlan_mask->inner_type &&\n-\t\t\t    vlan_mask->inner_type != RTE_BE16(0xffff)) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t\t   item,\n-\t\t\t\t\t\t   \"inner ethertype mask not\"\n-\t\t\t\t\t\t   \" valid\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\t\t\tif (vlan_mask->inner_type) {\n-\t\t\t\tfilter->ethertype =\n-\t\t\t\t\trte_be_to_cpu_16(vlan_spec->inner_type);\n-\t\t\t\ten |= en_ethertype;\n-\t\t\t}\n-\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV4:\n-\t\t\t/* If mask is not involved, we could use EM filters. */\n-\t\t\tipv4_spec = item->spec;\n-\t\t\tipv4_mask = item->mask;\n-\t\t\t/* Only IP DST and SRC fields are maskable. */\n-\t\t\tif (ipv4_mask->hdr.version_ihl ||\n-\t\t\t    ipv4_mask->hdr.type_of_service ||\n-\t\t\t    ipv4_mask->hdr.total_length ||\n-\t\t\t    ipv4_mask->hdr.packet_id ||\n-\t\t\t    ipv4_mask->hdr.fragment_offset ||\n-\t\t\t    ipv4_mask->hdr.time_to_live ||\n-\t\t\t    ipv4_mask->hdr.next_proto_id ||\n-\t\t\t    ipv4_mask->hdr.hdr_checksum) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t   item,\n-\t\t\t\t\t   \"Invalid IPv4 mask.\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\t\t\tfilter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;\n-\t\t\tfilter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;\n-\t\t\tif (use_ntuple)\n-\t\t\t\ten |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |\n-\t\t\t\t\tNTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;\n-\t\t\telse\n-\t\t\t\ten |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |\n-\t\t\t\t\tEM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;\n-\t\t\tif (ipv4_mask->hdr.src_addr) {\n-\t\t\t\tfilter->src_ipaddr_mask[0] =\n-\t\t\t\t\tipv4_mask->hdr.src_addr;\n-\t\t\t\ten |= !use_ntuple ? 0 :\n-\t\t\t\t     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;\n-\t\t\t}\n-\t\t\tif (ipv4_mask->hdr.dst_addr) {\n-\t\t\t\tfilter->dst_ipaddr_mask[0] =\n-\t\t\t\t\tipv4_mask->hdr.dst_addr;\n-\t\t\t\ten |= !use_ntuple ? 0 :\n-\t\t\t\t     NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;\n-\t\t\t}\n-\t\t\tfilter->ip_addr_type = use_ntuple ?\n-\t\t\t HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :\n-\t\t\t HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;\n-\t\t\tif (ipv4_spec->hdr.next_proto_id) {\n-\t\t\t\tfilter->ip_protocol =\n-\t\t\t\t\tipv4_spec->hdr.next_proto_id;\n-\t\t\t\tif (use_ntuple)\n-\t\t\t\t\ten |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;\n-\t\t\t\telse\n-\t\t\t\t\ten |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;\n-\t\t\t}\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV6:\n-\t\t\tipv6_spec = item->spec;\n-\t\t\tipv6_mask = item->mask;\n-\n-\t\t\t/* Only IP DST and SRC fields are maskable. */\n-\t\t\tif (ipv6_mask->hdr.vtc_flow ||\n-\t\t\t    ipv6_mask->hdr.payload_len ||\n-\t\t\t    ipv6_mask->hdr.proto ||\n-\t\t\t    ipv6_mask->hdr.hop_limits) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t   item,\n-\t\t\t\t\t   \"Invalid IPv6 mask.\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\n-\t\t\tif (use_ntuple)\n-\t\t\t\ten |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |\n-\t\t\t\t\tNTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;\n-\t\t\telse\n-\t\t\t\ten |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |\n-\t\t\t\t\tEM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;\n-\t\t\trte_memcpy(filter->src_ipaddr,\n-\t\t\t\t   ipv6_spec->hdr.src_addr, 16);\n-\t\t\trte_memcpy(filter->dst_ipaddr,\n-\t\t\t\t   ipv6_spec->hdr.dst_addr, 16);\n-\t\t\tif (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,\n-\t\t\t\t\t\t   16)) {\n-\t\t\t\trte_memcpy(filter->src_ipaddr_mask,\n-\t\t\t\t\t   ipv6_mask->hdr.src_addr, 16);\n-\t\t\t\ten |= !use_ntuple ? 0 :\n-\t\t\t\t    NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;\n-\t\t\t}\n-\t\t\tif (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,\n-\t\t\t\t\t\t   16)) {\n-\t\t\t\trte_memcpy(filter->dst_ipaddr_mask,\n-\t\t\t\t\t   ipv6_mask->hdr.dst_addr, 16);\n-\t\t\t\ten |= !use_ntuple ? 0 :\n-\t\t\t\t     NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;\n-\t\t\t}\n-\t\t\tfilter->ip_addr_type = use_ntuple ?\n-\t\t\t\tNTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :\n-\t\t\t\tEM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_TCP:\n-\t\t\ttcp_spec = item->spec;\n-\t\t\ttcp_mask = item->mask;\n-\n-\t\t\t/* Check TCP mask. Only DST & SRC ports are maskable */\n-\t\t\tif (tcp_mask->hdr.sent_seq ||\n-\t\t\t    tcp_mask->hdr.recv_ack ||\n-\t\t\t    tcp_mask->hdr.data_off ||\n-\t\t\t    tcp_mask->hdr.tcp_flags ||\n-\t\t\t    tcp_mask->hdr.rx_win ||\n-\t\t\t    tcp_mask->hdr.cksum ||\n-\t\t\t    tcp_mask->hdr.tcp_urp) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t   item,\n-\t\t\t\t\t   \"Invalid TCP mask\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\t\t\tfilter->src_port = tcp_spec->hdr.src_port;\n-\t\t\tfilter->dst_port = tcp_spec->hdr.dst_port;\n-\t\t\tif (use_ntuple)\n-\t\t\t\ten |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |\n-\t\t\t\t\tNTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;\n-\t\t\telse\n-\t\t\t\ten |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |\n-\t\t\t\t\tEM_FLOW_ALLOC_INPUT_EN_DST_PORT;\n-\t\t\tif (tcp_mask->hdr.dst_port) {\n-\t\t\t\tfilter->dst_port_mask = tcp_mask->hdr.dst_port;\n-\t\t\t\ten |= !use_ntuple ? 0 :\n-\t\t\t\t  NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;\n-\t\t\t}\n-\t\t\tif (tcp_mask->hdr.src_port) {\n-\t\t\t\tfilter->src_port_mask = tcp_mask->hdr.src_port;\n-\t\t\t\ten |= !use_ntuple ? 0 :\n-\t\t\t\t  NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;\n-\t\t\t}\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_UDP:\n-\t\t\tudp_spec = item->spec;\n-\t\t\tudp_mask = item->mask;\n-\n-\t\t\tif (udp_mask->hdr.dgram_len ||\n-\t\t\t    udp_mask->hdr.dgram_cksum) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t   item,\n-\t\t\t\t\t   \"Invalid UDP mask\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\n-\t\t\tfilter->src_port = udp_spec->hdr.src_port;\n-\t\t\tfilter->dst_port = udp_spec->hdr.dst_port;\n-\t\t\tif (use_ntuple)\n-\t\t\t\ten |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |\n-\t\t\t\t\tNTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;\n-\t\t\telse\n-\t\t\t\ten |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |\n-\t\t\t\t\tEM_FLOW_ALLOC_INPUT_EN_DST_PORT;\n-\n-\t\t\tif (udp_mask->hdr.dst_port) {\n-\t\t\t\tfilter->dst_port_mask = udp_mask->hdr.dst_port;\n-\t\t\t\ten |= !use_ntuple ? 0 :\n-\t\t\t\t  NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;\n-\t\t\t}\n-\t\t\tif (udp_mask->hdr.src_port) {\n-\t\t\t\tfilter->src_port_mask = udp_mask->hdr.src_port;\n-\t\t\t\ten |= !use_ntuple ? 0 :\n-\t\t\t\t  NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;\n-\t\t\t}\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\n-\t\t\tvxlan_spec = item->spec;\n-\t\t\tvxlan_mask = item->mask;\n-\t\t\t/* Check if VXLAN item is used to describe protocol.\n-\t\t\t * If yes, both spec and mask should be NULL.\n-\t\t\t * If no, both spec and mask shouldn't be NULL.\n-\t\t\t */\n-\t\t\tif ((!vxlan_spec && vxlan_mask) ||\n-\t\t\t    (vxlan_spec && !vxlan_mask)) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t   item,\n-\t\t\t\t\t   \"Invalid VXLAN item\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\n-\t\t\tif (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||\n-\t\t\t    vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||\n-\t\t\t    vxlan_spec->flags != 0x8) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t   item,\n-\t\t\t\t\t   \"Invalid VXLAN item\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\n-\t\t\t/* Check if VNI is masked. */\n-\t\t\tif (vxlan_spec && vxlan_mask) {\n-\t\t\t\tvni_masked =\n-\t\t\t\t\t!!memcmp(vxlan_mask->vni, vni_mask,\n-\t\t\t\t\t\t RTE_DIM(vni_mask));\n-\t\t\t\tif (vni_masked) {\n-\t\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t\t   item,\n-\t\t\t\t\t\t   \"Invalid VNI mask\");\n-\t\t\t\t\treturn -rte_errno;\n-\t\t\t\t}\n-\n-\t\t\t\trte_memcpy(((uint8_t *)&tenant_id_be + 1),\n-\t\t\t\t\t   vxlan_spec->vni, 3);\n-\t\t\t\tfilter->vni =\n-\t\t\t\t\trte_be_to_cpu_32(tenant_id_be);\n-\t\t\t\tfilter->tunnel_type =\n-\t\t\t\t CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;\n-\t\t\t}\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_NVGRE:\n-\t\t\tnvgre_spec = item->spec;\n-\t\t\tnvgre_mask = item->mask;\n-\t\t\t/* Check if NVGRE item is used to describe protocol.\n-\t\t\t * If yes, both spec and mask should be NULL.\n-\t\t\t * If no, both spec and mask shouldn't be NULL.\n-\t\t\t */\n-\t\t\tif ((!nvgre_spec && nvgre_mask) ||\n-\t\t\t    (nvgre_spec && !nvgre_mask)) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t   item,\n-\t\t\t\t\t   \"Invalid NVGRE item\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\n-\t\t\tif (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||\n-\t\t\t    nvgre_spec->protocol != 0x6558) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t   item,\n-\t\t\t\t\t   \"Invalid NVGRE item\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\n-\t\t\tif (nvgre_spec && nvgre_mask) {\n-\t\t\t\ttni_masked =\n-\t\t\t\t\t!!memcmp(nvgre_mask->tni, tni_mask,\n-\t\t\t\t\t\t RTE_DIM(tni_mask));\n-\t\t\t\tif (tni_masked) {\n-\t\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t\t       RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t\t       item,\n-\t\t\t\t\t\t       \"Invalid TNI mask\");\n-\t\t\t\t\treturn -rte_errno;\n-\t\t\t\t}\n-\t\t\t\trte_memcpy(((uint8_t *)&tenant_id_be + 1),\n-\t\t\t\t\t   nvgre_spec->tni, 3);\n-\t\t\t\tfilter->vni =\n-\t\t\t\t\trte_be_to_cpu_32(tenant_id_be);\n-\t\t\t\tfilter->tunnel_type =\n-\t\t\t\t CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;\n-\t\t\t}\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_VF:\n-\t\t\tvf_spec = item->spec;\n-\t\t\tvf = vf_spec->id;\n-\t\t\tif (!BNXT_PF(bp)) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t   item,\n-\t\t\t\t\t   \"Configuring on a VF!\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\n-\t\t\tif (vf >= bp->pdev->max_vfs) {\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t   item,\n-\t\t\t\t\t   \"Incorrect VF id!\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\n-\t\t\tif (!attr->transfer) {\n-\t\t\t\trte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t   item,\n-\t\t\t\t\t   \"Matching VF traffic without\"\n-\t\t\t\t\t   \" affecting it (transfer attribute)\"\n-\t\t\t\t\t   \" is unsupported\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\n-\t\t\tfilter->mirror_vnic_id =\n-\t\t\tdflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);\n-\t\t\tif (dflt_vnic < 0) {\n-\t\t\t\t/* This simply indicates there's no driver\n-\t\t\t\t * loaded. This is not an error.\n-\t\t\t\t */\n-\t\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t   item,\n-\t\t\t\t\t   \"Unable to get default VNIC for VF\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\t\t\tfilter->mirror_vnic_id = dflt_vnic;\n-\t\t\ten |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;\n-\t\t\tbreak;\n-\t\tdefault:\n-\t\t\tbreak;\n-\t\t}\n-\t\titem++;\n-\t}\n-\tfilter->enables = en;\n-\n-\treturn 0;\n-}\n-\n-/* Parse attributes */\n-static int\n-bnxt_flow_parse_attr(const struct rte_flow_attr *attr,\n-\t\t     struct rte_flow_error *error)\n-{\n-\t/* Must be input direction */\n-\tif (!attr->ingress) {\n-\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,\n-\t\t\t\t   attr, \"Only support ingress.\");\n-\t\treturn -rte_errno;\n-\t}\n-\n-\t/* Not supported */\n-\tif (attr->egress) {\n-\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,\n-\t\t\t\t   attr, \"No support for egress.\");\n-\t\treturn -rte_errno;\n-\t}\n-\n-\t/* Not supported */\n-\tif (attr->priority) {\n-\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,\n-\t\t\t\t   attr, \"No support for priority.\");\n-\t\treturn -rte_errno;\n-\t}\n-\n-\t/* Not supported */\n-\tif (attr->group) {\n-\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,\n-\t\t\t\t   attr, \"No support for group.\");\n-\t\treturn -rte_errno;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-struct bnxt_filter_info *\n-bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,\n-\t\t   struct bnxt_vnic_info *vnic)\n-{\n-\tstruct bnxt_filter_info *filter1, *f0;\n-\tstruct bnxt_vnic_info *vnic0;\n-\tint rc;\n-\n-\tvnic0 = STAILQ_FIRST(&bp->ff_pool[0]);\n-\tf0 = STAILQ_FIRST(&vnic0->filter);\n-\n-\t//This flow has same DST MAC as the port/l2 filter.\n-\tif (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)\n-\t\treturn f0;\n-\n-\t//This flow needs DST MAC which is not same as port/l2\n-\tPMD_DRV_LOG(DEBUG, \"Create L2 filter for DST MAC\\n\");\n-\tfilter1 = bnxt_get_unused_filter(bp);\n-\tif (filter1 == NULL)\n-\t\treturn NULL;\n-\tfilter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;\n-\tfilter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |\n-\t\t\tL2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;\n-\tmemcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);\n-\tmemset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);\n-\trc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,\n-\t\t\t\t     filter1);\n-\tif (rc) {\n-\t\tbnxt_free_filter(bp, filter1);\n-\t\treturn NULL;\n-\t}\n-\treturn filter1;\n-}\n-\n-static int\n-bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,\n-\t\t\t     const struct rte_flow_item pattern[],\n-\t\t\t     const struct rte_flow_action actions[],\n-\t\t\t     const struct rte_flow_attr *attr,\n-\t\t\t     struct rte_flow_error *error,\n-\t\t\t     struct bnxt_filter_info *filter)\n-{\n-\tconst struct rte_flow_action *act = nxt_non_void_action(actions);\n-\tstruct bnxt *bp = (struct bnxt *)dev->data->dev_private;\n-\tconst struct rte_flow_action_queue *act_q;\n-\tconst struct rte_flow_action_vf *act_vf;\n-\tstruct bnxt_vnic_info *vnic, *vnic0;\n-\tstruct bnxt_filter_info *filter1;\n-\tuint32_t vf = 0;\n-\tint dflt_vnic;\n-\tint rc;\n-\n-\tif (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {\n-\t\tPMD_DRV_LOG(ERR, \"Cannot create flow on RSS queues\\n\");\n-\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n-\t\t\t\t   \"Cannot create flow on RSS queues\");\n-\t\trc = -rte_errno;\n-\t\tgoto ret;\n-\t}\n-\n-\trc = bnxt_validate_and_parse_flow_type(bp, attr, pattern, error,\n-\t\t\t\t\t       filter);\n-\tif (rc != 0)\n-\t\tgoto ret;\n-\n-\trc = bnxt_flow_parse_attr(attr, error);\n-\tif (rc != 0)\n-\t\tgoto ret;\n-\t//Since we support ingress attribute only - right now.\n-\tif (filter->filter_type == HWRM_CFA_EM_FILTER)\n-\t\tfilter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;\n-\n-\tswitch (act->type) {\n-\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n-\t\t/* Allow this flow. Redirect to a VNIC. */\n-\t\tact_q = (const struct rte_flow_action_queue *)act->conf;\n-\t\tif (act_q->index >= bp->rx_nr_rings) {\n-\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, act,\n-\t\t\t\t\t   \"Invalid queue ID.\");\n-\t\t\trc = -rte_errno;\n-\t\t\tgoto ret;\n-\t\t}\n-\t\tPMD_DRV_LOG(DEBUG, \"Queue index %d\\n\", act_q->index);\n-\n-\t\tvnic0 = STAILQ_FIRST(&bp->ff_pool[0]);\n-\t\tvnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);\n-\t\tif (vnic == NULL) {\n-\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, act,\n-\t\t\t\t\t   \"No matching VNIC for queue ID.\");\n-\t\t\trc = -rte_errno;\n-\t\t\tgoto ret;\n-\t\t}\n-\t\tfilter->dst_id = vnic->fw_vnic_id;\n-\t\tfilter1 = bnxt_get_l2_filter(bp, filter, vnic);\n-\t\tif (filter1 == NULL) {\n-\t\t\trc = -ENOSPC;\n-\t\t\tgoto ret;\n-\t\t}\n-\t\tfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;\n-\t\tPMD_DRV_LOG(DEBUG, \"VNIC found\\n\");\n-\t\tbreak;\n-\tcase RTE_FLOW_ACTION_TYPE_DROP:\n-\t\tvnic0 = STAILQ_FIRST(&bp->ff_pool[0]);\n-\t\tfilter1 = bnxt_get_l2_filter(bp, filter, vnic0);\n-\t\tif (filter1 == NULL) {\n-\t\t\trc = -ENOSPC;\n-\t\t\tgoto ret;\n-\t\t}\n-\t\tfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;\n-\t\tif (filter->filter_type == HWRM_CFA_EM_FILTER)\n-\t\t\tfilter->flags =\n-\t\t\t\tHWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;\n-\t\telse\n-\t\t\tfilter->flags =\n-\t\t\t\tHWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;\n-\t\tbreak;\n-\tcase RTE_FLOW_ACTION_TYPE_COUNT:\n-\t\tvnic0 = STAILQ_FIRST(&bp->ff_pool[0]);\n-\t\tfilter1 = bnxt_get_l2_filter(bp, filter, vnic0);\n-\t\tif (filter1 == NULL) {\n-\t\t\trc = -ENOSPC;\n-\t\t\tgoto ret;\n-\t\t}\n-\t\tfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;\n-\t\tfilter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;\n-\t\tbreak;\n-\tcase RTE_FLOW_ACTION_TYPE_VF:\n-\t\tact_vf = (const struct rte_flow_action_vf *)act->conf;\n-\t\tvf = act_vf->id;\n-\t\tif (!BNXT_PF(bp)) {\n-\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t   act,\n-\t\t\t\t   \"Configuring on a VF!\");\n-\t\t\trc = -rte_errno;\n-\t\t\tgoto ret;\n-\t\t}\n-\n-\t\tif (vf >= bp->pdev->max_vfs) {\n-\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t   act,\n-\t\t\t\t   \"Incorrect VF id!\");\n-\t\t\trc = -rte_errno;\n-\t\t\tgoto ret;\n-\t\t}\n-\n-\t\tfilter->mirror_vnic_id =\n-\t\tdflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);\n-\t\tif (dflt_vnic < 0) {\n-\t\t\t/* This simply indicates there's no driver loaded.\n-\t\t\t * This is not an error.\n-\t\t\t */\n-\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t   act,\n-\t\t\t\t   \"Unable to get default VNIC for VF\");\n-\t\t\trc = -rte_errno;\n-\t\t\tgoto ret;\n-\t\t}\n-\t\tfilter->mirror_vnic_id = dflt_vnic;\n-\t\tfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;\n-\n-\t\tvnic0 = STAILQ_FIRST(&bp->ff_pool[0]);\n-\t\tfilter1 = bnxt_get_l2_filter(bp, filter, vnic0);\n-\t\tif (filter1 == NULL) {\n-\t\t\trc = -ENOSPC;\n-\t\t\tgoto ret;\n-\t\t}\n-\t\tfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;\n-\t\tbreak;\n-\n-\tdefault:\n-\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, act,\n-\t\t\t\t   \"Invalid action.\");\n-\t\trc = -rte_errno;\n-\t\tgoto ret;\n-\t}\n-\n-\tact = nxt_non_void_action(++act);\n-\tif (act->type != RTE_FLOW_ACTION_TYPE_END) {\n-\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t   act, \"Invalid action.\");\n-\t\trc = -rte_errno;\n-\t\tgoto ret;\n-\t}\n-ret:\n-\treturn rc;\n-}\n-\n-static int\n-bnxt_flow_validate(struct rte_eth_dev *dev,\n-\t\tconst struct rte_flow_attr *attr,\n-\t\tconst struct rte_flow_item pattern[],\n-\t\tconst struct rte_flow_action actions[],\n-\t\tstruct rte_flow_error *error)\n-{\n-\tstruct bnxt *bp = (struct bnxt *)dev->data->dev_private;\n-\tstruct bnxt_filter_info *filter;\n-\tint ret = 0;\n-\n-\tret = bnxt_flow_agrs_validate(attr, pattern, actions, error);\n-\tif (ret != 0)\n-\t\treturn ret;\n-\n-\tfilter = bnxt_get_unused_filter(bp);\n-\tif (filter == NULL) {\n-\t\tPMD_DRV_LOG(ERR, \"Not enough resources for a new flow.\\n\");\n-\t\treturn -ENOMEM;\n-\t}\n-\n-\tret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,\n-\t\t\t\t\t   error, filter);\n-\t/* No need to hold on to this filter if we are just validating flow */\n-\tfilter->fw_l2_filter_id = UINT64_MAX;\n-\tbnxt_free_filter(bp, filter);\n-\n-\treturn ret;\n-}\n-\n-static int\n-bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)\n-{\n-\tstruct bnxt_filter_info *mf;\n-\tstruct rte_flow *flow;\n-\tint i;\n-\n-\tfor (i = bp->nr_vnics - 1; i >= 0; i--) {\n-\t\tstruct bnxt_vnic_info *vnic = &bp->vnic_info[i];\n-\n-\t\tSTAILQ_FOREACH(flow, &vnic->flow_list, next) {\n-\t\t\tmf = flow->filter;\n-\n-\t\t\tif (mf->filter_type == nf->filter_type &&\n-\t\t\t    mf->flags == nf->flags &&\n-\t\t\t    mf->src_port == nf->src_port &&\n-\t\t\t    mf->src_port_mask == nf->src_port_mask &&\n-\t\t\t    mf->dst_port == nf->dst_port &&\n-\t\t\t    mf->dst_port_mask == nf->dst_port_mask &&\n-\t\t\t    mf->ip_protocol == nf->ip_protocol &&\n-\t\t\t    mf->ip_addr_type == nf->ip_addr_type &&\n-\t\t\t    mf->ethertype == nf->ethertype &&\n-\t\t\t    mf->vni == nf->vni &&\n-\t\t\t    mf->tunnel_type == nf->tunnel_type &&\n-\t\t\t    mf->l2_ovlan == nf->l2_ovlan &&\n-\t\t\t    mf->l2_ovlan_mask == nf->l2_ovlan_mask &&\n-\t\t\t    mf->l2_ivlan == nf->l2_ivlan &&\n-\t\t\t    mf->l2_ivlan_mask == nf->l2_ivlan_mask &&\n-\t\t\t    !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&\n-\t\t\t    !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,\n-\t\t\t\t    ETHER_ADDR_LEN) &&\n-\t\t\t    !memcmp(mf->src_macaddr, nf->src_macaddr,\n-\t\t\t\t    ETHER_ADDR_LEN) &&\n-\t\t\t    !memcmp(mf->dst_macaddr, nf->dst_macaddr,\n-\t\t\t\t    ETHER_ADDR_LEN) &&\n-\t\t\t    !memcmp(mf->src_ipaddr, nf->src_ipaddr,\n-\t\t\t\t    sizeof(nf->src_ipaddr)) &&\n-\t\t\t    !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,\n-\t\t\t\t    sizeof(nf->src_ipaddr_mask)) &&\n-\t\t\t    !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,\n-\t\t\t\t    sizeof(nf->dst_ipaddr)) &&\n-\t\t\t    !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,\n-\t\t\t\t    sizeof(nf->dst_ipaddr_mask))) {\n-\t\t\t\tif (mf->dst_id == nf->dst_id)\n-\t\t\t\t\treturn -EEXIST;\n-\t\t\t\t/* Same Flow, Different queue\n-\t\t\t\t * Clear the old ntuple filter\n-\t\t\t\t */\n-\t\t\t\tif (nf->filter_type == HWRM_CFA_EM_FILTER)\n-\t\t\t\t\tbnxt_hwrm_clear_em_filter(bp, mf);\n-\t\t\t\tif (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)\n-\t\t\t\t\tbnxt_hwrm_clear_ntuple_filter(bp, mf);\n-\t\t\t\t/* Free the old filter, update flow\n-\t\t\t\t * with new filter\n-\t\t\t\t */\n-\t\t\t\tbnxt_free_filter(bp, mf);\n-\t\t\t\tflow->filter = nf;\n-\t\t\t\treturn -EXDEV;\n-\t\t\t}\n-\t\t}\n-\t}\n-\treturn 0;\n-}\n-\n-static struct rte_flow *\n-bnxt_flow_create(struct rte_eth_dev *dev,\n-\t\t  const struct rte_flow_attr *attr,\n-\t\t  const struct rte_flow_item pattern[],\n-\t\t  const struct rte_flow_action actions[],\n-\t\t  struct rte_flow_error *error)\n-{\n-\tstruct bnxt *bp = (struct bnxt *)dev->data->dev_private;\n-\tstruct bnxt_filter_info *filter;\n-\tstruct bnxt_vnic_info *vnic = NULL;\n-\tbool update_flow = false;\n-\tstruct rte_flow *flow;\n-\tunsigned int i;\n-\tint ret = 0;\n-\n-\tflow = rte_zmalloc(\"bnxt_flow\", sizeof(struct rte_flow), 0);\n-\tif (!flow) {\n-\t\trte_flow_error_set(error, ENOMEM,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n-\t\t\t\t   \"Failed to allocate memory\");\n-\t\treturn flow;\n-\t}\n-\n-\tret = bnxt_flow_agrs_validate(attr, pattern, actions, error);\n-\tif (ret != 0) {\n-\t\tPMD_DRV_LOG(ERR, \"Not a validate flow.\\n\");\n-\t\tgoto free_flow;\n-\t}\n-\n-\tfilter = bnxt_get_unused_filter(bp);\n-\tif (filter == NULL) {\n-\t\tPMD_DRV_LOG(ERR, \"Not enough resources for a new flow.\\n\");\n-\t\tgoto free_flow;\n-\t}\n-\n-\tret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,\n-\t\t\t\t\t   error, filter);\n-\tif (ret != 0)\n-\t\tgoto free_filter;\n-\n-\tret = bnxt_match_filter(bp, filter);\n-\tif (ret == -EEXIST) {\n-\t\tPMD_DRV_LOG(DEBUG, \"Flow already exists.\\n\");\n-\t\t/* Clear the filter that was created as part of\n-\t\t * validate_and_parse_flow() above\n-\t\t */\n-\t\tbnxt_hwrm_clear_l2_filter(bp, filter);\n-\t\tgoto free_filter;\n-\t} else if (ret == -EXDEV) {\n-\t\tPMD_DRV_LOG(DEBUG, \"Flow with same pattern exists\");\n-\t\tPMD_DRV_LOG(DEBUG, \"Updating with different destination\\n\");\n-\t\tupdate_flow = true;\n-\t}\n-\n-\tif (filter->filter_type == HWRM_CFA_EM_FILTER) {\n-\t\tfilter->enables |=\n-\t\t\tHWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;\n-\t\tret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);\n-\t}\n-\tif (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {\n-\t\tfilter->enables |=\n-\t\t\tHWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;\n-\t\tret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);\n-\t}\n-\n-\tfor (i = 0; i < bp->nr_vnics; i++) {\n-\t\tvnic = &bp->vnic_info[i];\n-\t\tif (filter->dst_id == vnic->fw_vnic_id)\n-\t\t\tbreak;\n-\t}\n-\n-\tif (!ret) {\n-\t\tflow->filter = filter;\n-\t\tflow->vnic = vnic;\n-\t\tif (update_flow) {\n-\t\t\tret = -EXDEV;\n-\t\t\tgoto free_flow;\n-\t\t}\n-\t\tPMD_DRV_LOG(ERR, \"Successfully created flow.\\n\");\n-\t\tSTAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);\n-\t\treturn flow;\n-\t}\n-free_filter:\n-\tbnxt_free_filter(bp, filter);\n-free_flow:\n-\tif (ret == -EEXIST)\n-\t\trte_flow_error_set(error, ret,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n-\t\t\t\t   \"Matching Flow exists.\");\n-\telse if (ret == -EXDEV)\n-\t\trte_flow_error_set(error, ret,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n-\t\t\t\t   \"Flow with pattern exists, updating destination queue\");\n-\telse\n-\t\trte_flow_error_set(error, -ret,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n-\t\t\t\t   \"Failed to create flow.\");\n-\trte_free(flow);\n-\tflow = NULL;\n-\treturn flow;\n-}\n-\n-static int\n-bnxt_flow_destroy(struct rte_eth_dev *dev,\n-\t\t  struct rte_flow *flow,\n-\t\t  struct rte_flow_error *error)\n-{\n-\tstruct bnxt *bp = (struct bnxt *)dev->data->dev_private;\n-\tstruct bnxt_filter_info *filter = flow->filter;\n-\tstruct bnxt_vnic_info *vnic = flow->vnic;\n-\tint ret = 0;\n-\n-\tret = bnxt_match_filter(bp, filter);\n-\tif (ret == 0)\n-\t\tPMD_DRV_LOG(ERR, \"Could not find matching flow\\n\");\n-\tif (filter->filter_type == HWRM_CFA_EM_FILTER)\n-\t\tret = bnxt_hwrm_clear_em_filter(bp, filter);\n-\tif (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)\n-\t\tret = bnxt_hwrm_clear_ntuple_filter(bp, filter);\n-\telse\n-\t\tret = bnxt_hwrm_clear_l2_filter(bp, filter);\n-\tif (!ret) {\n-\t\tSTAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);\n-\t\trte_free(flow);\n-\t} else {\n-\t\trte_flow_error_set(error, -ret,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n-\t\t\t\t   \"Failed to destroy flow.\");\n-\t}\n-\n-\treturn ret;\n-}\n-\n-static int\n-bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)\n-{\n-\tstruct bnxt *bp = (struct bnxt *)dev->data->dev_private;\n-\tstruct bnxt_vnic_info *vnic;\n-\tstruct rte_flow *flow;\n-\tunsigned int i;\n-\tint ret = 0;\n-\n-\tfor (i = 0; i < bp->nr_vnics; i++) {\n-\t\tvnic = &bp->vnic_info[i];\n-\t\tSTAILQ_FOREACH(flow, &vnic->flow_list, next) {\n-\t\t\tstruct bnxt_filter_info *filter = flow->filter;\n-\n-\t\t\tif (filter->filter_type == HWRM_CFA_EM_FILTER)\n-\t\t\t\tret = bnxt_hwrm_clear_em_filter(bp, filter);\n-\t\t\tif (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)\n-\t\t\t\tret = bnxt_hwrm_clear_ntuple_filter(bp, filter);\n-\n-\t\t\tif (ret) {\n-\t\t\t\trte_flow_error_set(error, -ret,\n-\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_HANDLE,\n-\t\t\t\t\t\t   NULL,\n-\t\t\t\t\t\t   \"Failed to flush flow in HW.\");\n-\t\t\t\treturn -rte_errno;\n-\t\t\t}\n-\n-\t\t\tSTAILQ_REMOVE(&vnic->flow_list, flow,\n-\t\t\t\t      rte_flow, next);\n-\t\t\trte_free(flow);\n-\t\t}\n-\t}\n-\n-\treturn ret;\n-}\n-\n-const struct rte_flow_ops bnxt_flow_ops = {\n-\t.validate = bnxt_flow_validate,\n-\t.create = bnxt_flow_create,\n-\t.destroy = bnxt_flow_destroy,\n-\t.flush = bnxt_flow_flush,\n-};\ndiff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c\nnew file mode 100644\nindex 000000000..a491e9dbf\n--- /dev/null\n+++ b/drivers/net/bnxt/bnxt_flow.c\n@@ -0,0 +1,1167 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2014-2018 Broadcom\n+ * All rights reserved.\n+ */\n+\n+#include <sys/queue.h>\n+\n+#include <rte_log.h>\n+#include <rte_malloc.h>\n+#include <rte_flow.h>\n+#include <rte_flow_driver.h>\n+#include <rte_tailq.h>\n+\n+#include \"bnxt.h\"\n+#include \"bnxt_filter.h\"\n+#include \"bnxt_hwrm.h\"\n+#include \"bnxt_vnic.h\"\n+#include \"bnxt_util.h\"\n+#include \"hsi_struct_def_dpdk.h\"\n+\n+static int\n+bnxt_flow_args_validate(const struct rte_flow_attr *attr,\n+\t\t\tconst struct rte_flow_item pattern[],\n+\t\t\tconst struct rte_flow_action actions[],\n+\t\t\tstruct rte_flow_error *error)\n+{\n+\tif (!pattern) {\n+\t\trte_flow_error_set(error,\n+\t\t\t\t   EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM_NUM,\n+\t\t\t\t   NULL,\n+\t\t\t\t   \"NULL pattern.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (!actions) {\n+\t\trte_flow_error_set(error,\n+\t\t\t\t   EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION_NUM,\n+\t\t\t\t   NULL,\n+\t\t\t\t   \"NULL action.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (!attr) {\n+\t\trte_flow_error_set(error,\n+\t\t\t\t   EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR,\n+\t\t\t\t   NULL,\n+\t\t\t\t   \"NULL attribute.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static const struct rte_flow_item *\n+bnxt_flow_non_void_item(const struct rte_flow_item *cur)\n+{\n+\twhile (1) {\n+\t\tif (cur->type != RTE_FLOW_ITEM_TYPE_VOID)\n+\t\t\treturn cur;\n+\t\tcur++;\n+\t}\n+}\n+\n+static const struct rte_flow_action *\n+bnxt_flow_non_void_action(const struct rte_flow_action *cur)\n+{\n+\twhile (1) {\n+\t\tif (cur->type != RTE_FLOW_ACTION_TYPE_VOID)\n+\t\t\treturn cur;\n+\t\tcur++;\n+\t}\n+}\n+\n+static int\n+bnxt_filter_type_check(const struct rte_flow_item pattern[],\n+\t\t       struct rte_flow_error *error __rte_unused)\n+{\n+\tconst struct rte_flow_item *item =\n+\t\tbnxt_flow_non_void_item(pattern);\n+\tint use_ntuple = 1;\n+\n+\twhile (item->type != RTE_FLOW_ITEM_TYPE_END) {\n+\t\tswitch (item->type) {\n+\t\tcase RTE_FLOW_ITEM_TYPE_ETH:\n+\t\t\tuse_ntuple = 1;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_VLAN:\n+\t\t\tuse_ntuple = 0;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_IPV4:\n+\t\tcase RTE_FLOW_ITEM_TYPE_IPV6:\n+\t\tcase RTE_FLOW_ITEM_TYPE_TCP:\n+\t\tcase RTE_FLOW_ITEM_TYPE_UDP:\n+\t\t\t/* FALLTHROUGH */\n+\t\t\t/* need ntuple match, reset exact match */\n+\t\t\tif (!use_ntuple) {\n+\t\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t\t\"VLAN flow cannot use NTUPLE filter\\n\");\n+\t\t\t\trte_flow_error_set\n+\t\t\t\t\t(error,\n+\t\t\t\t\t EINVAL,\n+\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t item,\n+\t\t\t\t\t \"Cannot use VLAN with NTUPLE\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\t\t\tuse_ntuple |= 1;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tPMD_DRV_LOG(ERR, \"Unknown Flow type\\n\");\n+\t\t\tuse_ntuple |= 1;\n+\t\t}\n+\t\titem++;\n+\t}\n+\treturn use_ntuple;\n+}\n+\n+static int\n+bnxt_validate_and_parse_flow_type(struct bnxt *bp,\n+\t\t\t\t  const struct rte_flow_attr *attr,\n+\t\t\t\t  const struct rte_flow_item pattern[],\n+\t\t\t\t  struct rte_flow_error *error,\n+\t\t\t\t  struct bnxt_filter_info *filter)\n+{\n+\tconst struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);\n+\tconst struct rte_flow_item_vlan *vlan_spec, *vlan_mask;\n+\tconst struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;\n+\tconst struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;\n+\tconst struct rte_flow_item_tcp *tcp_spec, *tcp_mask;\n+\tconst struct rte_flow_item_udp *udp_spec, *udp_mask;\n+\tconst struct rte_flow_item_eth *eth_spec, *eth_mask;\n+\tconst struct rte_flow_item_nvgre *nvgre_spec;\n+\tconst struct rte_flow_item_nvgre *nvgre_mask;\n+\tconst struct rte_flow_item_vxlan *vxlan_spec;\n+\tconst struct rte_flow_item_vxlan *vxlan_mask;\n+\tuint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};\n+\tuint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};\n+\tconst struct rte_flow_item_vf *vf_spec;\n+\tuint32_t tenant_id_be = 0;\n+\tbool vni_masked = 0;\n+\tbool tni_masked = 0;\n+\tuint32_t vf = 0;\n+\tint use_ntuple;\n+\tuint32_t en = 0;\n+\tuint32_t en_ethertype;\n+\tint dflt_vnic;\n+\n+\tuse_ntuple = bnxt_filter_type_check(pattern, error);\n+\tPMD_DRV_LOG(DEBUG, \"Use NTUPLE %d\\n\", use_ntuple);\n+\tif (use_ntuple < 0)\n+\t\treturn use_ntuple;\n+\n+\tfilter->filter_type = use_ntuple ?\n+\t\tHWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;\n+\ten_ethertype = use_ntuple ?\n+\t\tNTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :\n+\t\tEM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;\n+\n+\twhile (item->type != RTE_FLOW_ITEM_TYPE_END) {\n+\t\tif (item->last) {\n+\t\t\t/* last or range is NOT supported as match criteria */\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t   item,\n+\t\t\t\t\t   \"No support for range\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\n+\t\tif (!item->spec || !item->mask) {\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t   item,\n+\t\t\t\t\t   \"spec/mask is NULL\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\n+\t\tswitch (item->type) {\n+\t\tcase RTE_FLOW_ITEM_TYPE_ETH:\n+\t\t\teth_spec = item->spec;\n+\t\t\teth_mask = item->mask;\n+\n+\t\t\t/* Source MAC address mask cannot be partially set.\n+\t\t\t * Should be All 0's or all 1's.\n+\t\t\t * Destination MAC address mask must not be partially\n+\t\t\t * set. Should be all 1's or all 0's.\n+\t\t\t */\n+\t\t\tif ((!is_zero_ether_addr(&eth_mask->src) &&\n+\t\t\t     !is_broadcast_ether_addr(&eth_mask->src)) ||\n+\t\t\t    (!is_zero_ether_addr(&eth_mask->dst) &&\n+\t\t\t     !is_broadcast_ether_addr(&eth_mask->dst))) {\n+\t\t\t\trte_flow_error_set(error,\n+\t\t\t\t\t\t   EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item,\n+\t\t\t\t\t\t   \"MAC_addr mask not valid\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\n+\t\t\t/* Mask is not allowed. Only exact matches are */\n+\t\t\tif (eth_mask->type &&\n+\t\t\t    eth_mask->type != RTE_BE16(0xffff)) {\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item,\n+\t\t\t\t\t\t   \"ethertype mask not valid\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\n+\t\t\tif (is_broadcast_ether_addr(&eth_mask->dst)) {\n+\t\t\t\trte_memcpy(filter->dst_macaddr,\n+\t\t\t\t\t   &eth_spec->dst, 6);\n+\t\t\t\ten |= use_ntuple ?\n+\t\t\t\t\tNTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :\n+\t\t\t\t\tEM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;\n+\t\t\t}\n+\n+\t\t\tif (is_broadcast_ether_addr(&eth_mask->src)) {\n+\t\t\t\trte_memcpy(filter->src_macaddr,\n+\t\t\t\t\t   &eth_spec->src, 6);\n+\t\t\t\ten |= use_ntuple ?\n+\t\t\t\t\tNTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :\n+\t\t\t\t\tEM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;\n+\t\t\t} /*\n+\t\t\t   * else {\n+\t\t\t   *  PMD_DRV_LOG(ERR, \"Handle this condition\\n\");\n+\t\t\t   * }\n+\t\t\t   */\n+\t\t\tif (eth_mask->type) {\n+\t\t\t\tfilter->ethertype =\n+\t\t\t\t\trte_be_to_cpu_16(eth_spec->type);\n+\t\t\t\ten |= en_ethertype;\n+\t\t\t}\n+\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_VLAN:\n+\t\t\tvlan_spec = item->spec;\n+\t\t\tvlan_mask = item->mask;\n+\t\t\tif (en & en_ethertype) {\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item,\n+\t\t\t\t\t\t   \"VLAN TPID matching is not\"\n+\t\t\t\t\t\t   \" supported\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\t\t\tif (vlan_mask->tci &&\n+\t\t\t    vlan_mask->tci == RTE_BE16(0x0fff)) {\n+\t\t\t\t/* Only the VLAN ID can be matched. */\n+\t\t\t\tfilter->l2_ovlan =\n+\t\t\t\t\trte_be_to_cpu_16(vlan_spec->tci &\n+\t\t\t\t\t\t\t RTE_BE16(0x0fff));\n+\t\t\t\ten |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;\n+\t\t\t} else {\n+\t\t\t\trte_flow_error_set(error,\n+\t\t\t\t\t\t   EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item,\n+\t\t\t\t\t\t   \"VLAN mask is invalid\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\t\t\tif (vlan_mask->inner_type &&\n+\t\t\t    vlan_mask->inner_type != RTE_BE16(0xffff)) {\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item,\n+\t\t\t\t\t\t   \"inner ethertype mask not\"\n+\t\t\t\t\t\t   \" valid\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\t\t\tif (vlan_mask->inner_type) {\n+\t\t\t\tfilter->ethertype =\n+\t\t\t\t\trte_be_to_cpu_16(vlan_spec->inner_type);\n+\t\t\t\ten |= en_ethertype;\n+\t\t\t}\n+\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_IPV4:\n+\t\t\t/* If mask is not involved, we could use EM filters. */\n+\t\t\tipv4_spec = item->spec;\n+\t\t\tipv4_mask = item->mask;\n+\t\t\t/* Only IP DST and SRC fields are maskable. */\n+\t\t\tif (ipv4_mask->hdr.version_ihl ||\n+\t\t\t    ipv4_mask->hdr.type_of_service ||\n+\t\t\t    ipv4_mask->hdr.total_length ||\n+\t\t\t    ipv4_mask->hdr.packet_id ||\n+\t\t\t    ipv4_mask->hdr.fragment_offset ||\n+\t\t\t    ipv4_mask->hdr.time_to_live ||\n+\t\t\t    ipv4_mask->hdr.next_proto_id ||\n+\t\t\t    ipv4_mask->hdr.hdr_checksum) {\n+\t\t\t\trte_flow_error_set(error,\n+\t\t\t\t\t\t   EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item,\n+\t\t\t\t\t\t   \"Invalid IPv4 mask.\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\n+\t\t\tfilter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;\n+\t\t\tfilter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;\n+\n+\t\t\tif (use_ntuple)\n+\t\t\t\ten |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |\n+\t\t\t\t\tNTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;\n+\t\t\telse\n+\t\t\t\ten |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |\n+\t\t\t\t\tEM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;\n+\n+\t\t\tif (ipv4_mask->hdr.src_addr) {\n+\t\t\t\tfilter->src_ipaddr_mask[0] =\n+\t\t\t\t\tipv4_mask->hdr.src_addr;\n+\t\t\t\ten |= !use_ntuple ? 0 :\n+\t\t\t\t     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;\n+\t\t\t}\n+\n+\t\t\tif (ipv4_mask->hdr.dst_addr) {\n+\t\t\t\tfilter->dst_ipaddr_mask[0] =\n+\t\t\t\t\tipv4_mask->hdr.dst_addr;\n+\t\t\t\ten |= !use_ntuple ? 0 :\n+\t\t\t\t     NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;\n+\t\t\t}\n+\n+\t\t\tfilter->ip_addr_type = use_ntuple ?\n+\t\t\t HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :\n+\t\t\t HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;\n+\n+\t\t\tif (ipv4_spec->hdr.next_proto_id) {\n+\t\t\t\tfilter->ip_protocol =\n+\t\t\t\t\tipv4_spec->hdr.next_proto_id;\n+\t\t\t\tif (use_ntuple)\n+\t\t\t\t\ten |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;\n+\t\t\t\telse\n+\t\t\t\t\ten |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;\n+\t\t\t}\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_IPV6:\n+\t\t\tipv6_spec = item->spec;\n+\t\t\tipv6_mask = item->mask;\n+\n+\t\t\t/* Only IP DST and SRC fields are maskable. */\n+\t\t\tif (ipv6_mask->hdr.vtc_flow ||\n+\t\t\t    ipv6_mask->hdr.payload_len ||\n+\t\t\t    ipv6_mask->hdr.proto ||\n+\t\t\t    ipv6_mask->hdr.hop_limits) {\n+\t\t\t\trte_flow_error_set(error,\n+\t\t\t\t\t\t   EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item,\n+\t\t\t\t\t\t   \"Invalid IPv6 mask.\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\n+\t\t\tif (use_ntuple)\n+\t\t\t\ten |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |\n+\t\t\t\t\tNTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;\n+\t\t\telse\n+\t\t\t\ten |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |\n+\t\t\t\t\tEM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;\n+\n+\t\t\trte_memcpy(filter->src_ipaddr,\n+\t\t\t\t   ipv6_spec->hdr.src_addr, 16);\n+\t\t\trte_memcpy(filter->dst_ipaddr,\n+\t\t\t\t   ipv6_spec->hdr.dst_addr, 16);\n+\n+\t\t\tif (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,\n+\t\t\t\t\t\t   16)) {\n+\t\t\t\trte_memcpy(filter->src_ipaddr_mask,\n+\t\t\t\t\t   ipv6_mask->hdr.src_addr, 16);\n+\t\t\t\ten |= !use_ntuple ? 0 :\n+\t\t\t\t    NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;\n+\t\t\t}\n+\n+\t\t\tif (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,\n+\t\t\t\t\t\t   16)) {\n+\t\t\t\trte_memcpy(filter->dst_ipaddr_mask,\n+\t\t\t\t\t   ipv6_mask->hdr.dst_addr, 16);\n+\t\t\t\ten |= !use_ntuple ? 0 :\n+\t\t\t\t     NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;\n+\t\t\t}\n+\n+\t\t\tfilter->ip_addr_type = use_ntuple ?\n+\t\t\t\tNTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :\n+\t\t\t\tEM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_TCP:\n+\t\t\ttcp_spec = item->spec;\n+\t\t\ttcp_mask = item->mask;\n+\n+\t\t\t/* Check TCP mask. Only DST & SRC ports are maskable */\n+\t\t\tif (tcp_mask->hdr.sent_seq ||\n+\t\t\t    tcp_mask->hdr.recv_ack ||\n+\t\t\t    tcp_mask->hdr.data_off ||\n+\t\t\t    tcp_mask->hdr.tcp_flags ||\n+\t\t\t    tcp_mask->hdr.rx_win ||\n+\t\t\t    tcp_mask->hdr.cksum ||\n+\t\t\t    tcp_mask->hdr.tcp_urp) {\n+\t\t\t\trte_flow_error_set(error,\n+\t\t\t\t\t\t   EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item,\n+\t\t\t\t\t\t   \"Invalid TCP mask\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\n+\t\t\tfilter->src_port = tcp_spec->hdr.src_port;\n+\t\t\tfilter->dst_port = tcp_spec->hdr.dst_port;\n+\n+\t\t\tif (use_ntuple)\n+\t\t\t\ten |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |\n+\t\t\t\t\tNTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;\n+\t\t\telse\n+\t\t\t\ten |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |\n+\t\t\t\t\tEM_FLOW_ALLOC_INPUT_EN_DST_PORT;\n+\n+\t\t\tif (tcp_mask->hdr.dst_port) {\n+\t\t\t\tfilter->dst_port_mask = tcp_mask->hdr.dst_port;\n+\t\t\t\ten |= !use_ntuple ? 0 :\n+\t\t\t\t  NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;\n+\t\t\t}\n+\n+\t\t\tif (tcp_mask->hdr.src_port) {\n+\t\t\t\tfilter->src_port_mask = tcp_mask->hdr.src_port;\n+\t\t\t\ten |= !use_ntuple ? 0 :\n+\t\t\t\t  NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;\n+\t\t\t}\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_UDP:\n+\t\t\tudp_spec = item->spec;\n+\t\t\tudp_mask = item->mask;\n+\n+\t\t\tif (udp_mask->hdr.dgram_len ||\n+\t\t\t    udp_mask->hdr.dgram_cksum) {\n+\t\t\t\trte_flow_error_set(error,\n+\t\t\t\t\t\t   EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item,\n+\t\t\t\t\t\t   \"Invalid UDP mask\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\n+\t\t\tfilter->src_port = udp_spec->hdr.src_port;\n+\t\t\tfilter->dst_port = udp_spec->hdr.dst_port;\n+\n+\t\t\tif (use_ntuple)\n+\t\t\t\ten |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |\n+\t\t\t\t\tNTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;\n+\t\t\telse\n+\t\t\t\ten |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |\n+\t\t\t\t\tEM_FLOW_ALLOC_INPUT_EN_DST_PORT;\n+\n+\t\t\tif (udp_mask->hdr.dst_port) {\n+\t\t\t\tfilter->dst_port_mask = udp_mask->hdr.dst_port;\n+\t\t\t\ten |= !use_ntuple ? 0 :\n+\t\t\t\t  NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;\n+\t\t\t}\n+\n+\t\t\tif (udp_mask->hdr.src_port) {\n+\t\t\t\tfilter->src_port_mask = udp_mask->hdr.src_port;\n+\t\t\t\ten |= !use_ntuple ? 0 :\n+\t\t\t\t  NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;\n+\t\t\t}\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\n+\t\t\tvxlan_spec = item->spec;\n+\t\t\tvxlan_mask = item->mask;\n+\t\t\t/* Check if VXLAN item is used to describe protocol.\n+\t\t\t * If yes, both spec and mask should be NULL.\n+\t\t\t * If no, both spec and mask shouldn't be NULL.\n+\t\t\t */\n+\t\t\tif ((!vxlan_spec && vxlan_mask) ||\n+\t\t\t    (vxlan_spec && !vxlan_mask)) {\n+\t\t\t\trte_flow_error_set(error,\n+\t\t\t\t\t\t   EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item,\n+\t\t\t\t\t\t   \"Invalid VXLAN item\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\n+\t\t\tif (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||\n+\t\t\t    vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||\n+\t\t\t    vxlan_spec->flags != 0x8) {\n+\t\t\t\trte_flow_error_set(error,\n+\t\t\t\t\t\t   EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item,\n+\t\t\t\t\t\t   \"Invalid VXLAN item\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\n+\t\t\t/* Check if VNI is masked. */\n+\t\t\tif (vxlan_spec && vxlan_mask) {\n+\t\t\t\tvni_masked =\n+\t\t\t\t\t!!memcmp(vxlan_mask->vni, vni_mask,\n+\t\t\t\t\t\t RTE_DIM(vni_mask));\n+\t\t\t\tif (vni_masked) {\n+\t\t\t\t\trte_flow_error_set\n+\t\t\t\t\t\t(error,\n+\t\t\t\t\t\t EINVAL,\n+\t\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t item,\n+\t\t\t\t\t\t \"Invalid VNI mask\");\n+\t\t\t\t\treturn -rte_errno;\n+\t\t\t\t}\n+\n+\t\t\t\trte_memcpy(((uint8_t *)&tenant_id_be + 1),\n+\t\t\t\t\t   vxlan_spec->vni, 3);\n+\t\t\t\tfilter->vni =\n+\t\t\t\t\trte_be_to_cpu_32(tenant_id_be);\n+\t\t\t\tfilter->tunnel_type =\n+\t\t\t\t CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;\n+\t\t\t}\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_NVGRE:\n+\t\t\tnvgre_spec = item->spec;\n+\t\t\tnvgre_mask = item->mask;\n+\t\t\t/* Check if NVGRE item is used to describe protocol.\n+\t\t\t * If yes, both spec and mask should be NULL.\n+\t\t\t * If no, both spec and mask shouldn't be NULL.\n+\t\t\t */\n+\t\t\tif ((!nvgre_spec && nvgre_mask) ||\n+\t\t\t    (nvgre_spec && !nvgre_mask)) {\n+\t\t\t\trte_flow_error_set(error,\n+\t\t\t\t\t\t   EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item,\n+\t\t\t\t\t\t   \"Invalid NVGRE item\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\n+\t\t\tif (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||\n+\t\t\t    nvgre_spec->protocol != 0x6558) {\n+\t\t\t\trte_flow_error_set(error,\n+\t\t\t\t\t\t   EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item,\n+\t\t\t\t\t\t   \"Invalid NVGRE item\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\n+\t\t\tif (nvgre_spec && nvgre_mask) {\n+\t\t\t\ttni_masked =\n+\t\t\t\t\t!!memcmp(nvgre_mask->tni, tni_mask,\n+\t\t\t\t\t\t RTE_DIM(tni_mask));\n+\t\t\t\tif (tni_masked) {\n+\t\t\t\t\trte_flow_error_set\n+\t\t\t\t\t\t(error,\n+\t\t\t\t\t\t EINVAL,\n+\t\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t item,\n+\t\t\t\t\t\t \"Invalid TNI mask\");\n+\t\t\t\t\treturn -rte_errno;\n+\t\t\t\t}\n+\t\t\t\trte_memcpy(((uint8_t *)&tenant_id_be + 1),\n+\t\t\t\t\t   nvgre_spec->tni, 3);\n+\t\t\t\tfilter->vni =\n+\t\t\t\t\trte_be_to_cpu_32(tenant_id_be);\n+\t\t\t\tfilter->tunnel_type =\n+\t\t\t\t CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;\n+\t\t\t}\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_VF:\n+\t\t\tvf_spec = item->spec;\n+\t\t\tvf = vf_spec->id;\n+\n+\t\t\tif (!BNXT_PF(bp)) {\n+\t\t\t\trte_flow_error_set(error,\n+\t\t\t\t\t\t   EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item,\n+\t\t\t\t\t\t   \"Configuring on a VF!\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\n+\t\t\tif (vf >= bp->pdev->max_vfs) {\n+\t\t\t\trte_flow_error_set(error,\n+\t\t\t\t\t\t   EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item,\n+\t\t\t\t\t\t   \"Incorrect VF id!\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\n+\t\t\tif (!attr->transfer) {\n+\t\t\t\trte_flow_error_set(error,\n+\t\t\t\t\t\t   ENOTSUP,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t   item,\n+\t\t\t\t\t\t   \"Matching VF traffic without\"\n+\t\t\t\t\t\t   \" affecting it (transfer attribute)\"\n+\t\t\t\t\t\t   \" is unsupported\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\n+\t\t\tfilter->mirror_vnic_id =\n+\t\t\tdflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);\n+\t\t\tif (dflt_vnic < 0) {\n+\t\t\t\t/* This simply indicates there's no driver\n+\t\t\t\t * loaded. This is not an error.\n+\t\t\t\t */\n+\t\t\t\trte_flow_error_set\n+\t\t\t\t\t(error,\n+\t\t\t\t\t EINVAL,\n+\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t item,\n+\t\t\t\t\t \"Unable to get default VNIC for VF\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\n+\t\t\tfilter->mirror_vnic_id = dflt_vnic;\n+\t\t\ten |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tbreak;\n+\t\t}\n+\t\titem++;\n+\t}\n+\tfilter->enables = en;\n+\n+\treturn 0;\n+}\n+\n+/* Parse attributes */\n+static int\n+bnxt_flow_parse_attr(const struct rte_flow_attr *attr,\n+\t\t     struct rte_flow_error *error)\n+{\n+\t/* Must be input direction */\n+\tif (!attr->ingress) {\n+\t\trte_flow_error_set(error,\n+\t\t\t\t   EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,\n+\t\t\t\t   attr,\n+\t\t\t\t   \"Only support ingress.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* Not supported */\n+\tif (attr->egress) {\n+\t\trte_flow_error_set(error,\n+\t\t\t\t   EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,\n+\t\t\t\t   attr,\n+\t\t\t\t   \"No support for egress.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* Not supported */\n+\tif (attr->priority) {\n+\t\trte_flow_error_set(error,\n+\t\t\t\t   EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,\n+\t\t\t\t   attr,\n+\t\t\t\t   \"No support for priority.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* Not supported */\n+\tif (attr->group) {\n+\t\trte_flow_error_set(error,\n+\t\t\t\t   EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,\n+\t\t\t\t   attr,\n+\t\t\t\t   \"No support for group.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+struct bnxt_filter_info *\n+bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,\n+\t\t   struct bnxt_vnic_info *vnic)\n+{\n+\tstruct bnxt_filter_info *filter1, *f0;\n+\tstruct bnxt_vnic_info *vnic0;\n+\tint rc;\n+\n+\tvnic0 = STAILQ_FIRST(&bp->ff_pool[0]);\n+\tf0 = STAILQ_FIRST(&vnic0->filter);\n+\n+\t/* This flow has same DST MAC as the port/l2 filter. */\n+\tif (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)\n+\t\treturn f0;\n+\n+\t/* This flow needs DST MAC which is not same as port/l2 */\n+\tPMD_DRV_LOG(DEBUG, \"Create L2 filter for DST MAC\\n\");\n+\tfilter1 = bnxt_get_unused_filter(bp);\n+\tif (filter1 == NULL)\n+\t\treturn NULL;\n+\n+\tfilter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;\n+\tfilter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |\n+\t\t\tL2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;\n+\tmemcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);\n+\tmemset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);\n+\trc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,\n+\t\t\t\t     filter1);\n+\tif (rc) {\n+\t\tbnxt_free_filter(bp, filter1);\n+\t\treturn NULL;\n+\t}\n+\treturn filter1;\n+}\n+\n+static int\n+bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,\n+\t\t\t     const struct rte_flow_item pattern[],\n+\t\t\t     const struct rte_flow_action actions[],\n+\t\t\t     const struct rte_flow_attr *attr,\n+\t\t\t     struct rte_flow_error *error,\n+\t\t\t     struct bnxt_filter_info *filter)\n+{\n+\tconst struct rte_flow_action *act =\n+\t\tbnxt_flow_non_void_action(actions);\n+\tstruct bnxt *bp = (struct bnxt *)dev->data->dev_private;\n+\tconst struct rte_flow_action_queue *act_q;\n+\tconst struct rte_flow_action_vf *act_vf;\n+\tstruct bnxt_vnic_info *vnic, *vnic0;\n+\tstruct bnxt_filter_info *filter1;\n+\tuint32_t vf = 0;\n+\tint dflt_vnic;\n+\tint rc;\n+\n+\tif (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {\n+\t\tPMD_DRV_LOG(ERR, \"Cannot create flow on RSS queues\\n\");\n+\t\trte_flow_error_set(error,\n+\t\t\t\t   EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t   NULL,\n+\t\t\t\t   \"Cannot create flow on RSS queues\");\n+\t\trc = -rte_errno;\n+\t\tgoto ret;\n+\t}\n+\n+\trc =\n+\tbnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);\n+\tif (rc != 0)\n+\t\tgoto ret;\n+\n+\trc = bnxt_flow_parse_attr(attr, error);\n+\tif (rc != 0)\n+\t\tgoto ret;\n+\n+\t/* Since we support ingress attribute only - right now. */\n+\tif (filter->filter_type == HWRM_CFA_EM_FILTER)\n+\t\tfilter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;\n+\n+\tswitch (act->type) {\n+\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n+\t\t/* Allow this flow. Redirect to a VNIC. */\n+\t\tact_q = (const struct rte_flow_action_queue *)act->conf;\n+\t\tif (act_q->index >= bp->rx_nr_rings) {\n+\t\t\trte_flow_error_set(error,\n+\t\t\t\t\t   EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t   act,\n+\t\t\t\t\t   \"Invalid queue ID.\");\n+\t\t\trc = -rte_errno;\n+\t\t\tgoto ret;\n+\t\t}\n+\t\tPMD_DRV_LOG(DEBUG, \"Queue index %d\\n\", act_q->index);\n+\n+\t\tvnic0 = STAILQ_FIRST(&bp->ff_pool[0]);\n+\t\tvnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);\n+\t\tif (vnic == NULL) {\n+\t\t\trte_flow_error_set(error,\n+\t\t\t\t\t   EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t   act,\n+\t\t\t\t\t   \"No matching VNIC for queue ID.\");\n+\t\t\trc = -rte_errno;\n+\t\t\tgoto ret;\n+\t\t}\n+\n+\t\tfilter->dst_id = vnic->fw_vnic_id;\n+\t\tfilter1 = bnxt_get_l2_filter(bp, filter, vnic);\n+\t\tif (filter1 == NULL) {\n+\t\t\trc = -ENOSPC;\n+\t\t\tgoto ret;\n+\t\t}\n+\n+\t\tfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;\n+\t\tPMD_DRV_LOG(DEBUG, \"VNIC found\\n\");\n+\t\tbreak;\n+\tcase RTE_FLOW_ACTION_TYPE_DROP:\n+\t\tvnic0 = STAILQ_FIRST(&bp->ff_pool[0]);\n+\t\tfilter1 = bnxt_get_l2_filter(bp, filter, vnic0);\n+\t\tif (filter1 == NULL) {\n+\t\t\trc = -ENOSPC;\n+\t\t\tgoto ret;\n+\t\t}\n+\n+\t\tfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;\n+\t\tif (filter->filter_type == HWRM_CFA_EM_FILTER)\n+\t\t\tfilter->flags =\n+\t\t\t\tHWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;\n+\t\telse\n+\t\t\tfilter->flags =\n+\t\t\t\tHWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;\n+\t\tbreak;\n+\tcase RTE_FLOW_ACTION_TYPE_COUNT:\n+\t\tvnic0 = STAILQ_FIRST(&bp->ff_pool[0]);\n+\t\tfilter1 = bnxt_get_l2_filter(bp, filter, vnic0);\n+\t\tif (filter1 == NULL) {\n+\t\t\trc = -ENOSPC;\n+\t\t\tgoto ret;\n+\t\t}\n+\n+\t\tfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;\n+\t\tfilter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;\n+\t\tbreak;\n+\tcase RTE_FLOW_ACTION_TYPE_VF:\n+\t\tact_vf = (const struct rte_flow_action_vf *)act->conf;\n+\t\tvf = act_vf->id;\n+\n+\t\tif (!BNXT_PF(bp)) {\n+\t\t\trte_flow_error_set(error,\n+\t\t\t\t\t   EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t   act,\n+\t\t\t\t\t   \"Configuring on a VF!\");\n+\t\t\trc = -rte_errno;\n+\t\t\tgoto ret;\n+\t\t}\n+\n+\t\tif (vf >= bp->pdev->max_vfs) {\n+\t\t\trte_flow_error_set(error,\n+\t\t\t\t\t   EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t   act,\n+\t\t\t\t\t   \"Incorrect VF id!\");\n+\t\t\trc = -rte_errno;\n+\t\t\tgoto ret;\n+\t\t}\n+\n+\t\tfilter->mirror_vnic_id =\n+\t\tdflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);\n+\t\tif (dflt_vnic < 0) {\n+\t\t\t/* This simply indicates there's no driver loaded.\n+\t\t\t * This is not an error.\n+\t\t\t */\n+\t\t\trte_flow_error_set(error,\n+\t\t\t\t\t   EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t   act,\n+\t\t\t\t\t   \"Unable to get default VNIC for VF\");\n+\t\t\trc = -rte_errno;\n+\t\t\tgoto ret;\n+\t\t}\n+\n+\t\tfilter->mirror_vnic_id = dflt_vnic;\n+\t\tfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;\n+\n+\t\tvnic0 = STAILQ_FIRST(&bp->ff_pool[0]);\n+\t\tfilter1 = bnxt_get_l2_filter(bp, filter, vnic0);\n+\t\tif (filter1 == NULL) {\n+\t\t\trc = -ENOSPC;\n+\t\t\tgoto ret;\n+\t\t}\n+\n+\t\tfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;\n+\t\tbreak;\n+\n+\tdefault:\n+\t\trte_flow_error_set(error,\n+\t\t\t\t   EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t   act,\n+\t\t\t\t   \"Invalid action.\");\n+\t\trc = -rte_errno;\n+\t\tgoto ret;\n+\t}\n+\n+\tif (filter1) {\n+\t\tbnxt_free_filter(bp, filter1);\n+\t\tfilter1->fw_l2_filter_id = -1;\n+\t}\n+\n+\tact = bnxt_flow_non_void_action(++act);\n+\tif (act->type != RTE_FLOW_ACTION_TYPE_END) {\n+\t\trte_flow_error_set(error,\n+\t\t\t\t   EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t   act,\n+\t\t\t\t   \"Invalid action.\");\n+\t\trc = -rte_errno;\n+\t\tgoto ret;\n+\t}\n+ret:\n+\treturn rc;\n+}\n+\n+static int\n+bnxt_flow_validate(struct rte_eth_dev *dev,\n+\t\t   const struct rte_flow_attr *attr,\n+\t\t   const struct rte_flow_item pattern[],\n+\t\t   const struct rte_flow_action actions[],\n+\t\t   struct rte_flow_error *error)\n+{\n+\tstruct bnxt *bp = (struct bnxt *)dev->data->dev_private;\n+\tstruct bnxt_filter_info *filter;\n+\tint ret = 0;\n+\n+\tret = bnxt_flow_args_validate(attr, pattern, actions, error);\n+\tif (ret != 0)\n+\t\treturn ret;\n+\n+\tfilter = bnxt_get_unused_filter(bp);\n+\tif (filter == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"Not enough resources for a new flow.\\n\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,\n+\t\t\t\t\t   error, filter);\n+\t/* No need to hold on to this filter if we are just validating flow */\n+\tfilter->fw_l2_filter_id = UINT64_MAX;\n+\tbnxt_free_filter(bp, filter);\n+\n+\treturn ret;\n+}\n+\n+static int\n+bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)\n+{\n+\tstruct bnxt_filter_info *mf;\n+\tstruct rte_flow *flow;\n+\tint i;\n+\n+\tfor (i = bp->nr_vnics - 1; i >= 0; i--) {\n+\t\tstruct bnxt_vnic_info *vnic = &bp->vnic_info[i];\n+\n+\t\tSTAILQ_FOREACH(flow, &vnic->flow_list, next) {\n+\t\t\tmf = flow->filter;\n+\n+\t\t\tif (mf->filter_type == nf->filter_type &&\n+\t\t\t    mf->flags == nf->flags &&\n+\t\t\t    mf->src_port == nf->src_port &&\n+\t\t\t    mf->src_port_mask == nf->src_port_mask &&\n+\t\t\t    mf->dst_port == nf->dst_port &&\n+\t\t\t    mf->dst_port_mask == nf->dst_port_mask &&\n+\t\t\t    mf->ip_protocol == nf->ip_protocol &&\n+\t\t\t    mf->ip_addr_type == nf->ip_addr_type &&\n+\t\t\t    mf->ethertype == nf->ethertype &&\n+\t\t\t    mf->vni == nf->vni &&\n+\t\t\t    mf->tunnel_type == nf->tunnel_type &&\n+\t\t\t    mf->l2_ovlan == nf->l2_ovlan &&\n+\t\t\t    mf->l2_ovlan_mask == nf->l2_ovlan_mask &&\n+\t\t\t    mf->l2_ivlan == nf->l2_ivlan &&\n+\t\t\t    mf->l2_ivlan_mask == nf->l2_ivlan_mask &&\n+\t\t\t    !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&\n+\t\t\t    !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,\n+\t\t\t\t    ETHER_ADDR_LEN) &&\n+\t\t\t    !memcmp(mf->src_macaddr, nf->src_macaddr,\n+\t\t\t\t    ETHER_ADDR_LEN) &&\n+\t\t\t    !memcmp(mf->dst_macaddr, nf->dst_macaddr,\n+\t\t\t\t    ETHER_ADDR_LEN) &&\n+\t\t\t    !memcmp(mf->src_ipaddr, nf->src_ipaddr,\n+\t\t\t\t    sizeof(nf->src_ipaddr)) &&\n+\t\t\t    !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,\n+\t\t\t\t    sizeof(nf->src_ipaddr_mask)) &&\n+\t\t\t    !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,\n+\t\t\t\t    sizeof(nf->dst_ipaddr)) &&\n+\t\t\t    !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,\n+\t\t\t\t    sizeof(nf->dst_ipaddr_mask))) {\n+\t\t\t\tif (mf->dst_id == nf->dst_id)\n+\t\t\t\t\treturn -EEXIST;\n+\t\t\t\t/* Same Flow, Different queue\n+\t\t\t\t * Clear the old ntuple filter\n+\t\t\t\t */\n+\t\t\t\tif (nf->filter_type == HWRM_CFA_EM_FILTER)\n+\t\t\t\t\tbnxt_hwrm_clear_em_filter(bp, mf);\n+\t\t\t\tif (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)\n+\t\t\t\t\tbnxt_hwrm_clear_ntuple_filter(bp, mf);\n+\t\t\t\t/* Free the old filter, update flow\n+\t\t\t\t * with new filter\n+\t\t\t\t */\n+\t\t\t\tbnxt_free_filter(bp, mf);\n+\t\t\t\tflow->filter = nf;\n+\t\t\t\treturn -EXDEV;\n+\t\t\t}\n+\t\t}\n+\t}\n+\treturn 0;\n+}\n+\n+static struct rte_flow *\n+bnxt_flow_create(struct rte_eth_dev *dev,\n+\t\t const struct rte_flow_attr *attr,\n+\t\t const struct rte_flow_item pattern[],\n+\t\t const struct rte_flow_action actions[],\n+\t\t struct rte_flow_error *error)\n+{\n+\tstruct bnxt *bp = (struct bnxt *)dev->data->dev_private;\n+\tstruct bnxt_filter_info *filter;\n+\tstruct bnxt_vnic_info *vnic = NULL;\n+\tbool update_flow = false;\n+\tstruct rte_flow *flow;\n+\tunsigned int i;\n+\tint ret = 0;\n+\n+\tflow = rte_zmalloc(\"bnxt_flow\", sizeof(struct rte_flow), 0);\n+\tif (!flow) {\n+\t\trte_flow_error_set(error, ENOMEM,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n+\t\t\t\t   \"Failed to allocate memory\");\n+\t\treturn flow;\n+\t}\n+\n+\tret = bnxt_flow_args_validate(attr, pattern, actions, error);\n+\tif (ret != 0) {\n+\t\tPMD_DRV_LOG(ERR, \"Not a validate flow.\\n\");\n+\t\tgoto free_flow;\n+\t}\n+\n+\tfilter = bnxt_get_unused_filter(bp);\n+\tif (filter == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"Not enough resources for a new flow.\\n\");\n+\t\tgoto free_flow;\n+\t}\n+\n+\tret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,\n+\t\t\t\t\t   error, filter);\n+\tif (ret != 0)\n+\t\tgoto free_filter;\n+\n+\tret = bnxt_match_filter(bp, filter);\n+\tif (ret == -EEXIST) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Flow already exists.\\n\");\n+\t\t/* Clear the filter that was created as part of\n+\t\t * validate_and_parse_flow() above\n+\t\t */\n+\t\tbnxt_hwrm_clear_l2_filter(bp, filter);\n+\t\tgoto free_filter;\n+\t} else if (ret == -EXDEV) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Flow with same pattern exists\\n\");\n+\t\tPMD_DRV_LOG(DEBUG, \"Updating with different destination\\n\");\n+\t\tupdate_flow = true;\n+\t}\n+\n+\tif (filter->filter_type == HWRM_CFA_EM_FILTER) {\n+\t\tfilter->enables |=\n+\t\t\tHWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;\n+\t\tret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);\n+\t}\n+\n+\tif (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {\n+\t\tfilter->enables |=\n+\t\t\tHWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;\n+\t\tret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);\n+\t}\n+\n+\tfor (i = 0; i < bp->nr_vnics; i++) {\n+\t\tvnic = &bp->vnic_info[i];\n+\t\tif (filter->dst_id == vnic->fw_vnic_id)\n+\t\t\tbreak;\n+\t}\n+\n+\tif (!ret) {\n+\t\tflow->filter = filter;\n+\t\tflow->vnic = vnic;\n+\t\tif (update_flow) {\n+\t\t\tret = -EXDEV;\n+\t\t\tgoto free_flow;\n+\t\t}\n+\t\tPMD_DRV_LOG(ERR, \"Successfully created flow.\\n\");\n+\t\tSTAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);\n+\t\treturn flow;\n+\t}\n+free_filter:\n+\tbnxt_free_filter(bp, filter);\n+free_flow:\n+\tif (ret == -EEXIST)\n+\t\trte_flow_error_set(error, ret,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n+\t\t\t\t   \"Matching Flow exists.\");\n+\telse if (ret == -EXDEV)\n+\t\trte_flow_error_set(error, ret,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n+\t\t\t\t   \"Flow with pattern exists, updating destination queue\");\n+\telse\n+\t\trte_flow_error_set(error, -ret,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n+\t\t\t\t   \"Failed to create flow.\");\n+\trte_free(flow);\n+\tflow = NULL;\n+\treturn flow;\n+}\n+\n+static int\n+bnxt_flow_destroy(struct rte_eth_dev *dev,\n+\t\t  struct rte_flow *flow,\n+\t\t  struct rte_flow_error *error)\n+{\n+\tstruct bnxt *bp = (struct bnxt *)dev->data->dev_private;\n+\tstruct bnxt_filter_info *filter = flow->filter;\n+\tstruct bnxt_vnic_info *vnic = flow->vnic;\n+\tint ret = 0;\n+\n+\tret = bnxt_match_filter(bp, filter);\n+\tif (ret == 0)\n+\t\tPMD_DRV_LOG(ERR, \"Could not find matching flow\\n\");\n+\tif (filter->filter_type == HWRM_CFA_EM_FILTER)\n+\t\tret = bnxt_hwrm_clear_em_filter(bp, filter);\n+\tif (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)\n+\t\tret = bnxt_hwrm_clear_ntuple_filter(bp, filter);\n+\telse\n+\t\tret = bnxt_hwrm_clear_l2_filter(bp, filter);\n+\tif (!ret) {\n+\t\tSTAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);\n+\t\trte_free(flow);\n+\t} else {\n+\t\trte_flow_error_set(error, -ret,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n+\t\t\t\t   \"Failed to destroy flow.\");\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static int\n+bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)\n+{\n+\tstruct bnxt *bp = (struct bnxt *)dev->data->dev_private;\n+\tstruct bnxt_vnic_info *vnic;\n+\tstruct rte_flow *flow;\n+\tunsigned int i;\n+\tint ret = 0;\n+\n+\tfor (i = 0; i < bp->nr_vnics; i++) {\n+\t\tvnic = &bp->vnic_info[i];\n+\t\tSTAILQ_FOREACH(flow, &vnic->flow_list, next) {\n+\t\t\tstruct bnxt_filter_info *filter = flow->filter;\n+\n+\t\t\tif (filter->filter_type == HWRM_CFA_EM_FILTER)\n+\t\t\t\tret = bnxt_hwrm_clear_em_filter(bp, filter);\n+\t\t\tif (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)\n+\t\t\t\tret = bnxt_hwrm_clear_ntuple_filter(bp, filter);\n+\n+\t\t\tif (ret) {\n+\t\t\t\trte_flow_error_set\n+\t\t\t\t\t(error,\n+\t\t\t\t\t -ret,\n+\t\t\t\t\t RTE_FLOW_ERROR_TYPE_HANDLE,\n+\t\t\t\t\t NULL,\n+\t\t\t\t\t \"Failed to flush flow in HW.\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\n+\t\t\tSTAILQ_REMOVE(&vnic->flow_list, flow,\n+\t\t\t\t      rte_flow, next);\n+\t\t\trte_free(flow);\n+\t\t}\n+\t}\n+\n+\treturn ret;\n+}\n+\n+const struct rte_flow_ops bnxt_flow_ops = {\n+\t.validate = bnxt_flow_validate,\n+\t.create = bnxt_flow_create,\n+\t.destroy = bnxt_flow_destroy,\n+\t.flush = bnxt_flow_flush,\n+};\n",
    "prefixes": [
        "v2",
        "11/23"
    ]
}