get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/40927/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 40927,
    "url": "http://patchwork.dpdk.org/api/patches/40927/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/6f9b34d75940a654ee79b96918e127212d9f5239.1528469677.git.rahul.lakkireddy@chelsio.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<6f9b34d75940a654ee79b96918e127212d9f5239.1528469677.git.rahul.lakkireddy@chelsio.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/6f9b34d75940a654ee79b96918e127212d9f5239.1528469677.git.rahul.lakkireddy@chelsio.com",
    "date": "2018-06-08T17:58:12",
    "name": "[dpdk-dev,2/7] net/cxgbe: parse and validate flows",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "0621af0b965773284f42fe6d873799e4645c90a9",
    "submitter": {
        "id": 241,
        "url": "http://patchwork.dpdk.org/api/people/241/?format=api",
        "name": "Rahul Lakkireddy",
        "email": "rahul.lakkireddy@chelsio.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patchwork.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/6f9b34d75940a654ee79b96918e127212d9f5239.1528469677.git.rahul.lakkireddy@chelsio.com/mbox/",
    "series": [
        {
            "id": 63,
            "url": "http://patchwork.dpdk.org/api/series/63/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=63",
            "date": "2018-06-08T17:58:10",
            "name": "cxgbe: add support to offload flows via rte_flow",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/63/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/40927/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/40927/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 85AA11CFFD;\n\tFri,  8 Jun 2018 19:59:16 +0200 (CEST)",
            "from stargate.chelsio.com (stargate.chelsio.com [12.32.117.8])\n\tby dpdk.org (Postfix) with ESMTP id 19CF81CFF0\n\tfor <dev@dpdk.org>; Fri,  8 Jun 2018 19:59:14 +0200 (CEST)",
            "from localhost (scalar.blr.asicdesigners.com [10.193.185.94])\n\tby stargate.chelsio.com (8.13.8/8.13.8) with ESMTP id w58HxBkY017291; \n\tFri, 8 Jun 2018 10:59:12 -0700"
        ],
        "From": "Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>",
        "To": "dev@dpdk.org",
        "Cc": "shaguna@chelsio.com, kumaras@chelsio.com, indranil@chelsio.com,\n\tnirranjan@chelsio.com",
        "Date": "Fri,  8 Jun 2018 23:28:12 +0530",
        "Message-Id": "<6f9b34d75940a654ee79b96918e127212d9f5239.1528469677.git.rahul.lakkireddy@chelsio.com>",
        "X-Mailer": "git-send-email 2.5.3",
        "In-Reply-To": [
            "<cover.1528469677.git.rahul.lakkireddy@chelsio.com>",
            "<cover.1528469677.git.rahul.lakkireddy@chelsio.com>"
        ],
        "References": [
            "<cover.1528469677.git.rahul.lakkireddy@chelsio.com>",
            "<cover.1528469677.git.rahul.lakkireddy@chelsio.com>"
        ],
        "Subject": "[dpdk-dev] [PATCH 2/7] net/cxgbe: parse and validate flows",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Shagun Agrawal <shaguna@chelsio.com>\n\nIntroduce rte_flow skeleton and implement validate operation.\n\nParse and convert <item>, <action>, <attributes> into hardware\nspecification. Perform validation, including basic sanity tests\nand underlying device's supported filter capability checks.\n\nCurrently add support for:\n<item>: IPv4, IPv6, TCP, and UDP.\n<action>: Drop, Queue, and Count.\n\nAlso add sanity checks to ensure filters are created at specified\nindex in LE-TCAM region. The index in LE-TCAM region indicates\nthe filter rule's priority with index 0 having the highest priority.\nIf no index is specified, filters are created at closest available\nfree index.\n\nSigned-off-by: Shagun Agrawal <shaguna@chelsio.com>\nSigned-off-by: Kumar Sanghvi <kumaras@chelsio.com>\nSigned-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>\n---\n doc/guides/nics/cxgbe.rst              |   1 +\n doc/guides/nics/features/cxgbe.ini     |   1 +\n doc/guides/rel_notes/release_18_08.rst |   5 +\n drivers/net/cxgbe/Makefile             |   2 +\n drivers/net/cxgbe/base/adapter.h       |  22 ++\n drivers/net/cxgbe/cxgbe_ethdev.c       |   2 +\n drivers/net/cxgbe/cxgbe_filter.c       |  77 ++++++\n drivers/net/cxgbe/cxgbe_filter.h       |  91 +++++++\n drivers/net/cxgbe/cxgbe_flow.c         | 473 +++++++++++++++++++++++++++++++++\n drivers/net/cxgbe/cxgbe_flow.h         |  38 +++\n 10 files changed, 712 insertions(+)\n create mode 100644 drivers/net/cxgbe/cxgbe_filter.c\n create mode 100644 drivers/net/cxgbe/cxgbe_flow.c\n create mode 100644 drivers/net/cxgbe/cxgbe_flow.h",
    "diff": "diff --git a/doc/guides/nics/cxgbe.rst b/doc/guides/nics/cxgbe.rst\nindex 78e391473..124022cfc 100644\n--- a/doc/guides/nics/cxgbe.rst\n+++ b/doc/guides/nics/cxgbe.rst\n@@ -30,6 +30,7 @@ CXGBE and CXGBEVF PMD has support for:\n - All multicast mode\n - Port hardware statistics\n - Jumbo frames\n+- Flow API\n \n Limitations\n -----------\ndiff --git a/doc/guides/nics/features/cxgbe.ini b/doc/guides/nics/features/cxgbe.ini\nindex 6cf5c13f5..88f2f92b7 100644\n--- a/doc/guides/nics/features/cxgbe.ini\n+++ b/doc/guides/nics/features/cxgbe.ini\n@@ -16,6 +16,7 @@ Allmulticast mode    = Y\n RSS hash             = Y\n RSS key update       = Y\n Flow control         = Y\n+Flow API             = Y\n CRC offload          = Y\n VLAN offload         = Y\n L3 checksum offload  = Y\ndiff --git a/doc/guides/rel_notes/release_18_08.rst b/doc/guides/rel_notes/release_18_08.rst\nindex 5bc23c537..bc0124295 100644\n--- a/doc/guides/rel_notes/release_18_08.rst\n+++ b/doc/guides/rel_notes/release_18_08.rst\n@@ -41,6 +41,11 @@ New Features\n      Also, make sure to start the actual text at the margin.\n      =========================================================\n \n+* **Added Flow API support for CXGBE PMD.**\n+\n+  Flow API support has been added to CXGBE Poll Mode Driver to offload\n+  flows to Chelsio T5/T6 NICs.\n+\n \n API Changes\n -----------\ndiff --git a/drivers/net/cxgbe/Makefile b/drivers/net/cxgbe/Makefile\nindex 79fdb6f06..edc5d8188 100644\n--- a/drivers/net/cxgbe/Makefile\n+++ b/drivers/net/cxgbe/Makefile\n@@ -49,6 +49,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbevf_ethdev.c\n SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_main.c\n SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbevf_main.c\n SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += sge.c\n+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_filter.c\n+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_flow.c\n SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4_hw.c\n SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4vf_hw.c\n \ndiff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h\nindex 1a0f96e40..f3434d28a 100644\n--- a/drivers/net/cxgbe/base/adapter.h\n+++ b/drivers/net/cxgbe/base/adapter.h\n@@ -312,6 +312,17 @@ struct adapter {\n \tstruct tid_info tids;     /* Info used to access TID related tables */\n };\n \n+/**\n+ * ethdev2pinfo - return the port_info structure associated with a rte_eth_dev\n+ * @dev: the rte_eth_dev\n+ *\n+ * Return the struct port_info associated with a rte_eth_dev\n+ */\n+static inline struct port_info *ethdev2pinfo(const struct rte_eth_dev *dev)\n+{\n+\treturn (struct port_info *)dev->data->dev_private;\n+}\n+\n /**\n  * adap2pinfo - return the port_info of a port\n  * @adap: the adapter\n@@ -324,6 +335,17 @@ static inline struct port_info *adap2pinfo(const struct adapter *adap, int idx)\n \treturn adap->port[idx];\n }\n \n+/**\n+ * ethdev2adap - return the adapter structure associated with a rte_eth_dev\n+ * @dev: the rte_eth_dev\n+ *\n+ * Return the struct adapter associated with a rte_eth_dev\n+ */\n+static inline struct adapter *ethdev2adap(const struct rte_eth_dev *dev)\n+{\n+\treturn ethdev2pinfo(dev)->adapter;\n+}\n+\n #define CXGBE_PCI_REG(reg) rte_read32(reg)\n \n static inline uint64_t cxgbe_read_addr64(volatile void *addr)\ndiff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c\nindex 32450915c..1adb8e41f 100644\n--- a/drivers/net/cxgbe/cxgbe_ethdev.c\n+++ b/drivers/net/cxgbe/cxgbe_ethdev.c\n@@ -36,6 +36,7 @@\n \n #include \"cxgbe.h\"\n #include \"cxgbe_pfvf.h\"\n+#include \"cxgbe_flow.h\"\n \n /*\n  * Macros needed to support the PCI Device ID Table ...\n@@ -1036,6 +1037,7 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = {\n \t.rx_queue_start\t\t= cxgbe_dev_rx_queue_start,\n \t.rx_queue_stop\t\t= cxgbe_dev_rx_queue_stop,\n \t.rx_queue_release\t= cxgbe_dev_rx_queue_release,\n+\t.filter_ctrl            = cxgbe_dev_filter_ctrl,\n \t.stats_get\t\t= cxgbe_dev_stats_get,\n \t.stats_reset\t\t= cxgbe_dev_stats_reset,\n \t.flow_ctrl_get\t\t= cxgbe_flow_ctrl_get,\ndiff --git a/drivers/net/cxgbe/cxgbe_filter.c b/drivers/net/cxgbe/cxgbe_filter.c\nnew file mode 100644\nindex 000000000..6b10a8be1\n--- /dev/null\n+++ b/drivers/net/cxgbe/cxgbe_filter.c\n@@ -0,0 +1,77 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2014-2018 Chelsio Communications.\n+ * All rights reserved.\n+ */\n+\n+#include \"common.h\"\n+#include \"t4_regs.h\"\n+#include \"cxgbe_filter.h\"\n+\n+/**\n+ * Validate if the requested filter specification can be set by checking\n+ * if the requested features have been enabled\n+ */\n+int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs)\n+{\n+\tu32 fconf;\n+\n+\t/*\n+\t * Check for unconfigured fields being used.\n+\t */\n+\tfconf = adapter->params.tp.vlan_pri_map;\n+\n+#define S(_field) \\\n+\t(fs->val._field || fs->mask._field)\n+#define U(_mask, _field) \\\n+\t(!(fconf & (_mask)) && S(_field))\n+\n+\tif (U(F_ETHERTYPE, ethtype) || U(F_PROTOCOL, proto))\n+\t\treturn -EOPNOTSUPP;\n+\n+#undef S\n+#undef U\n+\treturn 0;\n+}\n+\n+/**\n+ * Check if entry already filled.\n+ */\n+bool is_filter_set(struct tid_info *t, int fidx, int family)\n+{\n+\tbool result = FALSE;\n+\tint i, max;\n+\n+\t/* IPv6 requires four slots and IPv4 requires only 1 slot.\n+\t * Ensure, there's enough slots available.\n+\t */\n+\tmax = family == FILTER_TYPE_IPV6 ? fidx + 3 : fidx;\n+\n+\tt4_os_lock(&t->ftid_lock);\n+\tfor (i = fidx; i <= max; i++) {\n+\t\tif (rte_bitmap_get(t->ftid_bmap, i)) {\n+\t\t\tresult = TRUE;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\tt4_os_unlock(&t->ftid_lock);\n+\treturn result;\n+}\n+\n+/**\n+ * Allocate a available free entry\n+ */\n+int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family)\n+{\n+\tstruct tid_info *t = &adap->tids;\n+\tint pos;\n+\tint size = t->nftids;\n+\n+\tt4_os_lock(&t->ftid_lock);\n+\tif (family == FILTER_TYPE_IPV6)\n+\t\tpos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size, 4);\n+\telse\n+\t\tpos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);\n+\tt4_os_unlock(&t->ftid_lock);\n+\n+\treturn pos < size ? pos : -1;\n+}\ndiff --git a/drivers/net/cxgbe/cxgbe_filter.h b/drivers/net/cxgbe/cxgbe_filter.h\nindex d69c79e80..a9d2d3d39 100644\n--- a/drivers/net/cxgbe/cxgbe_filter.h\n+++ b/drivers/net/cxgbe/cxgbe_filter.h\n@@ -77,21 +77,112 @@ struct ch_filter_tuple {\n  * Filter specification\n  */\n struct ch_filter_specification {\n+\t/* Administrative fields for filter. */\n+\tuint32_t hitcnts:1;\t/* count filter hits in TCB */\n+\tuint32_t prio:1;\t/* filter has priority over active/server */\n+\n+\t/*\n+\t * Fundamental filter typing.  This is the one element of filter\n+\t * matching that doesn't exist as a (value, mask) tuple.\n+\t */\n+\tuint32_t type:1;\t/* 0 => IPv4, 1 => IPv6 */\n+\n+\t/*\n+\t * Packet dispatch information.  Ingress packets which match the\n+\t * filter rules will be dropped, passed to the host or switched back\n+\t * out as egress packets.\n+\t */\n+\tuint32_t action:2;\t/* drop, pass, switch */\n+\n+\tuint32_t dirsteer:1;\t/* 0 => RSS, 1 => steer to iq */\n+\tuint32_t iq:10;\t\t/* ingress queue */\n+\n \t/* Filter rule value/mask pairs. */\n \tstruct ch_filter_tuple val;\n \tstruct ch_filter_tuple mask;\n };\n \n+enum {\n+\tFILTER_PASS = 0,\t/* default */\n+\tFILTER_DROP\n+};\n+\n+enum filter_type {\n+\tFILTER_TYPE_IPV4 = 0,\n+\tFILTER_TYPE_IPV6,\n+};\n+\n /*\n  * Host shadow copy of ingress filter entry.  This is in host native format\n  * and doesn't match the ordering or bit order, etc. of the hardware or the\n  * firmware command.\n  */\n struct filter_entry {\n+\tstruct rte_eth_dev *dev;    /* Port's rte eth device */\n+\n \t/*\n \t * The filter itself.\n \t */\n \tstruct ch_filter_specification fs;\n };\n \n+#define FILTER_ID_MAX   (~0U)\n+\n+struct tid_info;\n+struct adapter;\n+\n+/**\n+ * Find first clear bit in the bitmap.\n+ */\n+static inline unsigned int cxgbe_find_first_zero_bit(struct rte_bitmap *bmap,\n+\t\t\t\t\t\t     unsigned int size)\n+{\n+\tunsigned int idx;\n+\n+\tfor (idx = 0; idx < size; idx++)\n+\t\tif (!rte_bitmap_get(bmap, idx))\n+\t\t\tbreak;\n+\n+\treturn idx;\n+}\n+\n+/**\n+ * Find a free region of 'num' consecutive entries.\n+ */\n+static inline unsigned int\n+cxgbe_bitmap_find_free_region(struct rte_bitmap *bmap, unsigned int size,\n+\t\t\t      unsigned int num)\n+{\n+\tunsigned int idx, j, free = 0;\n+\n+\tif (num > size)\n+\t\treturn size;\n+\n+\tfor (idx = 0; idx < size; idx += num) {\n+\t\tfor (j = 0; j < num; j++) {\n+\t\t\tif (!rte_bitmap_get(bmap, idx + j)) {\n+\t\t\t\tfree++;\n+\t\t\t} else {\n+\t\t\t\tfree = 0;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\n+\t\t/* Found the Region */\n+\t\tif (free == num)\n+\t\t\tbreak;\n+\n+\t\t/* Reached the end and still no region found */\n+\t\tif ((idx + num) > size) {\n+\t\t\tidx = size;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\treturn idx;\n+}\n+\n+bool is_filter_set(struct tid_info *, int fidx, int family);\n+int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family);\n+int validate_filter(struct adapter *adap, struct ch_filter_specification *fs);\n #endif /* _CXGBE_FILTER_H_ */\ndiff --git a/drivers/net/cxgbe/cxgbe_flow.c b/drivers/net/cxgbe/cxgbe_flow.c\nnew file mode 100644\nindex 000000000..a01708e70\n--- /dev/null\n+++ b/drivers/net/cxgbe/cxgbe_flow.c\n@@ -0,0 +1,473 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2014-2018 Chelsio Communications.\n+ * All rights reserved.\n+ */\n+#include \"common.h\"\n+#include \"cxgbe_flow.h\"\n+\n+#define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \\\n+do { \\\n+\tif (!((fs)->val.elem || (fs)->mask.elem)) { \\\n+\t\t(fs)->val.elem = (__v); \\\n+\t\t(fs)->mask.elem = (__m); \\\n+\t} else { \\\n+\t\treturn rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \\\n+\t\t\t\t\t  NULL, \"a filter can be specified\" \\\n+\t\t\t\t\t  \" only once\"); \\\n+\t} \\\n+} while (0)\n+\n+#define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \\\n+do { \\\n+\tmemcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \\\n+\tmemcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \\\n+} while (0)\n+\n+#define CXGBE_FILL_FS(v, m, elem) \\\n+\t__CXGBE_FILL_FS(v, m, fs, elem, e)\n+\n+#define CXGBE_FILL_FS_MEMCPY(v, m, elem) \\\n+\t__CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)\n+\n+static int\n+cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)\n+{\n+\t/* rte_flow specification does not allow it. */\n+\tif (!i->spec && (i->mask ||  i->last))\n+\t\treturn rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t   i, \"last or mask given without spec\");\n+\t/*\n+\t * We don't support it.\n+\t * Although, we can support values in last as 0's or last == spec.\n+\t * But this will not provide user with any additional functionality\n+\t * and will only increase the complexity for us.\n+\t */\n+\tif (i->last)\n+\t\treturn rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t   i, \"last is not supported by chelsio pmd\");\n+\treturn 0;\n+}\n+\n+static int\n+ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,\n+\t\t     struct ch_filter_specification *fs,\n+\t\t     struct rte_flow_error *e)\n+{\n+\tconst struct rte_flow_item_udp *val = item->spec;\n+\tconst struct rte_flow_item_udp *umask = item->mask;\n+\tconst struct rte_flow_item_udp *mask;\n+\n+\tmask = umask ? umask : (const struct rte_flow_item_udp *)dmask;\n+\n+\tif (mask->hdr.dgram_len || mask->hdr.dgram_cksum)\n+\t\treturn rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t  item,\n+\t\t\t\t\t  \"udp: only src/dst port supported\");\n+\n+\tCXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);\n+\tif (!val)\n+\t\treturn 0;\n+\tCXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),\n+\t\t      be16_to_cpu(mask->hdr.src_port), fport);\n+\tCXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),\n+\t\t      be16_to_cpu(mask->hdr.dst_port), lport);\n+\treturn 0;\n+}\n+\n+static int\n+ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,\n+\t\t     struct ch_filter_specification *fs,\n+\t\t     struct rte_flow_error *e)\n+{\n+\tconst struct rte_flow_item_tcp *val = item->spec;\n+\tconst struct rte_flow_item_tcp *umask = item->mask;\n+\tconst struct rte_flow_item_tcp *mask;\n+\n+\tmask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;\n+\n+\tif (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||\n+\t    mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||\n+\t    mask->hdr.tcp_urp)\n+\t\treturn rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t  item,\n+\t\t\t\t\t  \"tcp: only src/dst port supported\");\n+\n+\tCXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);\n+\tif (!val)\n+\t\treturn 0;\n+\tCXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),\n+\t\t      be16_to_cpu(mask->hdr.src_port), fport);\n+\tCXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),\n+\t\t      be16_to_cpu(mask->hdr.dst_port), lport);\n+\treturn 0;\n+}\n+\n+static int\n+ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,\n+\t\t      struct ch_filter_specification *fs,\n+\t\t      struct rte_flow_error *e)\n+{\n+\tconst struct rte_flow_item_ipv4 *val = item->spec;\n+\tconst struct rte_flow_item_ipv4 *umask = item->mask;\n+\tconst struct rte_flow_item_ipv4 *mask;\n+\n+\tmask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;\n+\n+\tif (mask->hdr.time_to_live || mask->hdr.type_of_service)\n+\t\treturn rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t  item, \"ttl/tos are not supported\");\n+\n+\tfs->type = FILTER_TYPE_IPV4;\n+\tCXGBE_FILL_FS(ETHER_TYPE_IPv4, 0xffff, ethtype);\n+\tif (!val)\n+\t\treturn 0; /* ipv4 wild card */\n+\n+\tCXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);\n+\tCXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);\n+\tCXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);\n+\n+\treturn 0;\n+}\n+\n+static int\n+ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,\n+\t\t      struct ch_filter_specification *fs,\n+\t\t      struct rte_flow_error *e)\n+{\n+\tconst struct rte_flow_item_ipv6 *val = item->spec;\n+\tconst struct rte_flow_item_ipv6 *umask = item->mask;\n+\tconst struct rte_flow_item_ipv6 *mask;\n+\n+\tmask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;\n+\n+\tif (mask->hdr.vtc_flow ||\n+\t    mask->hdr.payload_len || mask->hdr.hop_limits)\n+\t\treturn rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t  item,\n+\t\t\t\t\t  \"tc/flow/hop are not supported\");\n+\n+\tfs->type = FILTER_TYPE_IPV6;\n+\tCXGBE_FILL_FS(ETHER_TYPE_IPv6, 0xffff, ethtype);\n+\tif (!val)\n+\t\treturn 0; /* ipv6 wild card */\n+\n+\tCXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);\n+\tCXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);\n+\tCXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);\n+\n+\treturn 0;\n+}\n+\n+static int\n+cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,\n+\t\t      struct rte_flow_error *e)\n+{\n+\tif (attr->egress)\n+\t\treturn rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,\n+\t\t\t\t\t  attr, \"attribute:<egress> is\"\n+\t\t\t\t\t  \" not supported !\");\n+\tif (attr->group > 0)\n+\t\treturn rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,\n+\t\t\t\t\t  attr, \"group parameter is\"\n+\t\t\t\t\t  \" not supported.\");\n+\n+\tflow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;\n+\n+\treturn 0;\n+}\n+\n+static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)\n+{\n+\tstruct port_info *pi = ethdev2pinfo(dev);\n+\n+\tif (rxq > pi->n_rx_qsets)\n+\t\treturn -EINVAL;\n+\treturn 0;\n+}\n+\n+static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)\n+{\n+\tstruct adapter *adap = ethdev2adap(f->dev);\n+\tstruct ch_filter_specification fs = f->fs;\n+\n+\tif (fidx >= adap->tids.nftids) {\n+\t\tdev_err(adap, \"invalid flow index %d.\\n\", fidx);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (!is_filter_set(&adap->tids, fidx, fs.type)) {\n+\t\tdev_err(adap, \"Already free fidx:%d f:%p\\n\", fidx, f);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,\n+\t\t\t struct adapter *adap, unsigned int fidx)\n+{\n+\tif (is_filter_set(&adap->tids, fidx, fs->type)) {\n+\t\tdev_err(adap, \"filter index: %d is busy.\\n\", fidx);\n+\t\treturn -EBUSY;\n+\t}\n+\tif (fidx >= adap->tids.nftids) {\n+\t\tdev_err(adap, \"filter index (%u) >= max(%u)\\n\",\n+\t\t\tfidx, adap->tids.nftids);\n+\t\treturn -ERANGE;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)\n+{\n+\treturn del ? cxgbe_validate_fidxondel(flow->f, fidx) :\n+\t\tcxgbe_validate_fidxonadd(&flow->fs,\n+\t\t\t\t\t ethdev2adap(flow->dev), fidx);\n+}\n+\n+static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)\n+{\n+\tstruct ch_filter_specification *fs = &flow->fs;\n+\tstruct adapter *adap = ethdev2adap(flow->dev);\n+\n+\t/* For tcam get the next available slot, if default value specified */\n+\tif (flow->fidx == FILTER_ID_MAX) {\n+\t\tint idx;\n+\n+\t\tidx = cxgbe_alloc_ftid(adap, fs->type);\n+\t\tif (idx < 0) {\n+\t\t\tdev_err(adap, \"unable to get a filter index in tcam\\n\");\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t\t*fidx = (unsigned int)idx;\n+\t} else {\n+\t\t*fidx = flow->fidx;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+cxgbe_rtef_parse_actions(struct rte_flow *flow,\n+\t\t\t const struct rte_flow_action action[],\n+\t\t\t struct rte_flow_error *e)\n+{\n+\tstruct ch_filter_specification *fs = &flow->fs;\n+\tconst struct rte_flow_action_queue *q;\n+\tconst struct rte_flow_action *a;\n+\tchar abit = 0;\n+\n+\tfor (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {\n+\t\tswitch (a->type) {\n+\t\tcase RTE_FLOW_ACTION_TYPE_VOID:\n+\t\t\tcontinue;\n+\t\tcase RTE_FLOW_ACTION_TYPE_DROP:\n+\t\t\tif (abit++)\n+\t\t\t\treturn rte_flow_error_set(e, EINVAL,\n+\t\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION, a,\n+\t\t\t\t\t\t\"specify only 1 pass/drop\");\n+\t\t\tfs->action = FILTER_DROP;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n+\t\t\tq = (const struct rte_flow_action_queue *)a->conf;\n+\t\t\tif (!q)\n+\t\t\t\treturn rte_flow_error_set(e, EINVAL,\n+\t\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION, q,\n+\t\t\t\t\t\t\"specify rx queue index\");\n+\t\t\tif (check_rxq(flow->dev, q->index))\n+\t\t\t\treturn rte_flow_error_set(e, EINVAL,\n+\t\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION, q,\n+\t\t\t\t\t\t\"Invalid rx queue\");\n+\t\t\tif (abit++)\n+\t\t\t\treturn rte_flow_error_set(e, EINVAL,\n+\t\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION, a,\n+\t\t\t\t\t\t\"specify only 1 pass/drop\");\n+\t\t\tfs->action = FILTER_PASS;\n+\t\t\tfs->dirsteer = 1;\n+\t\t\tfs->iq = q->index;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_COUNT:\n+\t\t\tfs->hitcnts = 1;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\t/* Not supported action : return error */\n+\t\t\treturn rte_flow_error_set(e, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t  a, \"Action not supported\");\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+struct chrte_fparse parseitem[] = {\n+\t[RTE_FLOW_ITEM_TYPE_IPV4] = {\n+\t\t.fptr  = ch_rte_parsetype_ipv4,\n+\t\t.dmask = &rte_flow_item_ipv4_mask,\n+\t},\n+\n+\t[RTE_FLOW_ITEM_TYPE_IPV6] = {\n+\t\t.fptr  = ch_rte_parsetype_ipv6,\n+\t\t.dmask = &rte_flow_item_ipv6_mask,\n+\t},\n+\n+\t[RTE_FLOW_ITEM_TYPE_UDP] = {\n+\t\t.fptr  = ch_rte_parsetype_udp,\n+\t\t.dmask = &rte_flow_item_udp_mask,\n+\t},\n+\n+\t[RTE_FLOW_ITEM_TYPE_TCP] = {\n+\t\t.fptr  = ch_rte_parsetype_tcp,\n+\t\t.dmask = &rte_flow_item_tcp_mask,\n+\t},\n+};\n+\n+static int\n+cxgbe_rtef_parse_items(struct rte_flow *flow,\n+\t\t       const struct rte_flow_item items[],\n+\t\t       struct rte_flow_error *e)\n+{\n+\tconst struct rte_flow_item *i;\n+\tchar repeat[ARRAY_SIZE(parseitem)] = {0};\n+\n+\tfor (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {\n+\t\tstruct chrte_fparse *idx = &flow->item_parser[i->type];\n+\t\tint ret;\n+\n+\t\tif (i->type > ARRAY_SIZE(parseitem))\n+\t\t\treturn rte_flow_error_set(e, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  i, \"Item not supported\");\n+\n+\t\tswitch (i->type) {\n+\t\tcase RTE_FLOW_ITEM_TYPE_VOID:\n+\t\t\tcontinue;\n+\t\tdefault:\n+\t\t\t/* check if item is repeated */\n+\t\t\tif (repeat[i->type])\n+\t\t\t\treturn rte_flow_error_set(e, ENOTSUP,\n+\t\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM, i,\n+\t\t\t\t\t\t\"parse items cannot be repeated (except void)\");\n+\t\t\trepeat[i->type] = 1;\n+\n+\t\t\t/* validate the item */\n+\t\t\tret = cxgbe_validate_item(i, e);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\n+\t\t\tif (!idx || !idx->fptr) {\n+\t\t\t\treturn rte_flow_error_set(e, ENOTSUP,\n+\t\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM, i,\n+\t\t\t\t\t\t\"Item not supported\");\n+\t\t\t} else {\n+\t\t\t\tret = idx->fptr(idx->dmask, i, &flow->fs, e);\n+\t\t\t\tif (ret)\n+\t\t\t\t\treturn ret;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+cxgbe_flow_parse(struct rte_flow *flow,\n+\t\t const struct rte_flow_attr *attr,\n+\t\t const struct rte_flow_item item[],\n+\t\t const struct rte_flow_action action[],\n+\t\t struct rte_flow_error *e)\n+{\n+\tint ret;\n+\n+\t/* parse user request into ch_filter_specification */\n+\tret = cxgbe_rtef_parse_attr(flow, attr, e);\n+\tif (ret)\n+\t\treturn ret;\n+\tret = cxgbe_rtef_parse_items(flow, item, e);\n+\tif (ret)\n+\t\treturn ret;\n+\treturn cxgbe_rtef_parse_actions(flow, action, e);\n+}\n+\n+static int\n+cxgbe_flow_validate(struct rte_eth_dev *dev,\n+\t\t    const struct rte_flow_attr *attr,\n+\t\t    const struct rte_flow_item item[],\n+\t\t    const struct rte_flow_action action[],\n+\t\t    struct rte_flow_error *e)\n+{\n+\tstruct adapter *adap = ethdev2adap(dev);\n+\tstruct rte_flow *flow;\n+\tunsigned int fidx;\n+\tint ret;\n+\n+\tflow = t4_os_alloc(sizeof(struct rte_flow));\n+\tif (!flow)\n+\t\treturn rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,\n+\t\t\t\tNULL,\n+\t\t\t\t\"Unable to allocate memory for filter_entry\");\n+\n+\tflow->item_parser = parseitem;\n+\tflow->dev = dev;\n+\n+\tret = cxgbe_flow_parse(flow, attr, item, action, e);\n+\tif (ret) {\n+\t\tt4_os_free(flow);\n+\t\treturn ret;\n+\t}\n+\n+\tif (validate_filter(adap, &flow->fs)) {\n+\t\tt4_os_free(flow);\n+\t\treturn rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,\n+\t\t\t\tNULL,\n+\t\t\t\t\"validation failed. Check f/w config file.\");\n+\t}\n+\n+\tif (cxgbe_get_fidx(flow, &fidx)) {\n+\t\tt4_os_free(flow);\n+\t\treturn rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,\n+\t\t\t\t\t  NULL, \"no memory in tcam.\");\n+\t}\n+\n+\tif (cxgbe_verify_fidx(flow, fidx, 0)) {\n+\t\tt4_os_free(flow);\n+\t\treturn rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,\n+\t\t\t\t\t  NULL, \"validation failed\");\n+\t}\n+\n+\tt4_os_free(flow);\n+\treturn 0;\n+}\n+\n+static const struct rte_flow_ops cxgbe_flow_ops = {\n+\t.validate\t= cxgbe_flow_validate,\n+\t.create\t\t= NULL,\n+\t.destroy\t= NULL,\n+\t.flush\t\t= NULL,\n+\t.query\t\t= NULL,\n+\t.isolate\t= NULL,\n+};\n+\n+int\n+cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,\n+\t\t      enum rte_filter_type filter_type,\n+\t\t      enum rte_filter_op filter_op,\n+\t\t      void *arg)\n+{\n+\tint ret = 0;\n+\n+\tRTE_SET_USED(dev);\n+\tswitch (filter_type) {\n+\tcase RTE_ETH_FILTER_GENERIC:\n+\t\tif (filter_op != RTE_ETH_FILTER_GET)\n+\t\t\treturn -EINVAL;\n+\t\t*(const void **)arg = &cxgbe_flow_ops;\n+\t\tbreak;\n+\tdefault:\n+\t\tret = -ENOTSUP;\n+\t\tbreak;\n+\t}\n+\treturn ret;\n+}\ndiff --git a/drivers/net/cxgbe/cxgbe_flow.h b/drivers/net/cxgbe/cxgbe_flow.h\nnew file mode 100644\nindex 000000000..45bc37082\n--- /dev/null\n+++ b/drivers/net/cxgbe/cxgbe_flow.h\n@@ -0,0 +1,38 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2014-2018 Chelsio Communications.\n+ * All rights reserved.\n+ */\n+#ifndef _CXGBE_FLOW_H_\n+#define _CXGBE_FLOW_H_\n+\n+#include <rte_flow_driver.h>\n+#include \"cxgbe_filter.h\"\n+\n+struct chrte_fparse {\n+\tint (*fptr)(const void *mask, /* currently supported mask */\n+\t\t    const struct rte_flow_item *item, /* user input */\n+\t\t    struct ch_filter_specification *fs, /* where to parse */\n+\t\t    struct rte_flow_error *e);\n+\tconst void *dmask; /* Specify what is supported by chelsio by default*/\n+};\n+\n+struct rte_flow {\n+\tstruct filter_entry *f;\n+\tstruct ch_filter_specification fs; /* temp, to create filter */\n+\tstruct chrte_fparse *item_parser;\n+\t/*\n+\t * filter_entry doesn't store user priority.\n+\t * Post creation of filter this will indicate the\n+\t * flow index (fidx) for both hash and tcam filters\n+\t */\n+\tunsigned int fidx;\n+\tstruct rte_eth_dev *dev;\n+};\n+\n+int\n+cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,\n+\t\t      enum rte_filter_type filter_type,\n+\t\t      enum rte_filter_op filter_op,\n+\t\t      void *arg);\n+\n+#endif /* _CXGBE_FLOW_H_ */\n",
    "prefixes": [
        "dpdk-dev",
        "2/7"
    ]
}