get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/40930/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 40930,
    "url": "http://patchwork.dpdk.org/api/patches/40930/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/e5559ea7338e5051c1ca2b3d57b68aed58eab1d5.1528469677.git.rahul.lakkireddy@chelsio.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<e5559ea7338e5051c1ca2b3d57b68aed58eab1d5.1528469677.git.rahul.lakkireddy@chelsio.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/e5559ea7338e5051c1ca2b3d57b68aed58eab1d5.1528469677.git.rahul.lakkireddy@chelsio.com",
    "date": "2018-06-08T17:58:14",
    "name": "[dpdk-dev,4/7] net/cxgbe: implement flow create operation",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "61a8bb0209fb41bcbf60480c34308ebfc768b13c",
    "submitter": {
        "id": 241,
        "url": "http://patchwork.dpdk.org/api/people/241/?format=api",
        "name": "Rahul Lakkireddy",
        "email": "rahul.lakkireddy@chelsio.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patchwork.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/e5559ea7338e5051c1ca2b3d57b68aed58eab1d5.1528469677.git.rahul.lakkireddy@chelsio.com/mbox/",
    "series": [
        {
            "id": 63,
            "url": "http://patchwork.dpdk.org/api/series/63/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=63",
            "date": "2018-06-08T17:58:10",
            "name": "cxgbe: add support to offload flows via rte_flow",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/63/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/40930/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/40930/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 7C4B81CFF9;\n\tFri,  8 Jun 2018 19:59:36 +0200 (CEST)",
            "from stargate.chelsio.com (stargate.chelsio.com [12.32.117.8])\n\tby dpdk.org (Postfix) with ESMTP id 727D01BB35\n\tfor <dev@dpdk.org>; Fri,  8 Jun 2018 19:59:34 +0200 (CEST)",
            "from localhost (scalar.blr.asicdesigners.com [10.193.185.94])\n\tby stargate.chelsio.com (8.13.8/8.13.8) with ESMTP id w58HxLwv017297; \n\tFri, 8 Jun 2018 10:59:22 -0700"
        ],
        "From": "Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>",
        "To": "dev@dpdk.org",
        "Cc": "shaguna@chelsio.com, kumaras@chelsio.com, indranil@chelsio.com,\n\tnirranjan@chelsio.com",
        "Date": "Fri,  8 Jun 2018 23:28:14 +0530",
        "Message-Id": "<e5559ea7338e5051c1ca2b3d57b68aed58eab1d5.1528469677.git.rahul.lakkireddy@chelsio.com>",
        "X-Mailer": "git-send-email 2.5.3",
        "In-Reply-To": [
            "<cover.1528469677.git.rahul.lakkireddy@chelsio.com>",
            "<cover.1528469677.git.rahul.lakkireddy@chelsio.com>"
        ],
        "References": [
            "<cover.1528469677.git.rahul.lakkireddy@chelsio.com>",
            "<cover.1528469677.git.rahul.lakkireddy@chelsio.com>"
        ],
        "Subject": "[dpdk-dev] [PATCH 4/7] net/cxgbe: implement flow create operation",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Shagun Agrawal <shaguna@chelsio.com>\n\nDefine filter work request API used to construct filter operations\nto be communicated with firmware. These requests are sent via\ncontrol queue and completions come asynchronously in firmware event\nqueue.\n\nImplement flow create operation to create filters in LE-TCAM\n(maskfull) region at specified index.\n\nSigned-off-by: Shagun Agrawal <shaguna@chelsio.com>\nSigned-off-by: Kumar Sanghvi <kumaras@chelsio.com>\nSigned-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>\n---\n drivers/net/cxgbe/base/adapter.h        |  21 ++\n drivers/net/cxgbe/base/t4_msg.h         |  22 ++\n drivers/net/cxgbe/base/t4fw_interface.h | 145 +++++++++++++\n drivers/net/cxgbe/cxgbe.h               |   2 +\n drivers/net/cxgbe/cxgbe_filter.c        | 356 ++++++++++++++++++++++++++++++++\n drivers/net/cxgbe/cxgbe_filter.h        |  32 +++\n drivers/net/cxgbe/cxgbe_flow.c          |  82 +++++++-\n drivers/net/cxgbe/cxgbe_flow.h          |   4 +\n drivers/net/cxgbe/cxgbe_main.c          |  36 ++++\n 9 files changed, 699 insertions(+), 1 deletion(-)",
    "diff": "diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h\nindex 9a66a4a99..7f9ddae01 100644\n--- a/drivers/net/cxgbe/base/adapter.h\n+++ b/drivers/net/cxgbe/base/adapter.h\n@@ -717,6 +717,27 @@ static inline void t4_os_atomic_list_del(struct mbox_entry *entry,\n \tt4_os_unlock(lock);\n }\n \n+/**\n+ * t4_init_completion - initialize completion\n+ * @c: the completion context\n+ */\n+static inline void t4_init_completion(struct t4_completion *c)\n+{\n+\tc->done = 0;\n+\tt4_os_lock_init(&c->lock);\n+}\n+\n+/**\n+ * t4_complete - set completion as done\n+ * @c: the completion context\n+ */\n+static inline void t4_complete(struct t4_completion *c)\n+{\n+\tt4_os_lock(&c->lock);\n+\tc->done = 1;\n+\tt4_os_unlock(&c->lock);\n+}\n+\n void *t4_alloc_mem(size_t size);\n void t4_free_mem(void *addr);\n #define t4_os_alloc(_size)     t4_alloc_mem((_size))\ndiff --git a/drivers/net/cxgbe/base/t4_msg.h b/drivers/net/cxgbe/base/t4_msg.h\nindex 74b4fc193..43d1cb66f 100644\n--- a/drivers/net/cxgbe/base/t4_msg.h\n+++ b/drivers/net/cxgbe/base/t4_msg.h\n@@ -7,6 +7,7 @@\n #define T4_MSG_H\n \n enum {\n+\tCPL_SET_TCB_RPL       = 0x3A,\n \tCPL_SGE_EGR_UPDATE    = 0xA5,\n \tCPL_FW4_MSG           = 0xC0,\n \tCPL_FW6_MSG           = 0xE0,\n@@ -25,6 +26,13 @@ union opcode_tid {\n \t__u8 opcode;\n };\n \n+#define G_TID(x)    ((x) & 0xFFFFFF)\n+\n+#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)\n+\n+/* extract the TID from a CPL command */\n+#define GET_TID(cmd) (G_TID(be32_to_cpu(OPCODE_TID(cmd))))\n+\n struct rss_header {\n \t__u8 opcode;\n #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n@@ -66,6 +74,20 @@ struct work_request_hdr {\n #define WR_HDR_SIZE 0\n #endif\n \n+#define S_COOKIE    5\n+#define M_COOKIE    0x7\n+#define V_COOKIE(x) ((x) << S_COOKIE)\n+#define G_COOKIE(x) (((x) >> S_COOKIE) & M_COOKIE)\n+\n+struct cpl_set_tcb_rpl {\n+\tRSS_HDR\n+\tunion opcode_tid ot;\n+\t__be16 rsvd;\n+\t__u8   cookie;\n+\t__u8   status;\n+\t__be64 oldval;\n+};\n+\n struct cpl_tx_data {\n \tunion opcode_tid ot;\n \t__be32 len;\ndiff --git a/drivers/net/cxgbe/base/t4fw_interface.h b/drivers/net/cxgbe/base/t4fw_interface.h\nindex 44b6f6dac..842aa1263 100644\n--- a/drivers/net/cxgbe/base/t4fw_interface.h\n+++ b/drivers/net/cxgbe/base/t4fw_interface.h\n@@ -54,6 +54,7 @@ enum fw_memtype {\n  ********************************/\n \n enum fw_wr_opcodes {\n+\tFW_FILTER_WR\t\t= 0x02,\n \tFW_ETH_TX_PKT_WR\t= 0x08,\n \tFW_ETH_TX_PKTS_WR\t= 0x09,\n \tFW_ETH_TX_PKT_VM_WR\t= 0x11,\n@@ -143,6 +144,150 @@ struct fw_eth_tx_pkts_vm_wr {\n \t__be16 vlantci;\n };\n \n+/* filter wr reply code in cookie in CPL_SET_TCB_RPL */\n+enum fw_filter_wr_cookie {\n+\tFW_FILTER_WR_SUCCESS,\n+\tFW_FILTER_WR_FLT_ADDED,\n+\tFW_FILTER_WR_FLT_DELETED,\n+\tFW_FILTER_WR_SMT_TBL_FULL,\n+\tFW_FILTER_WR_EINVAL,\n+};\n+\n+struct fw_filter_wr {\n+\t__be32 op_pkd;\n+\t__be32 len16_pkd;\n+\t__be64 r3;\n+\t__be32 tid_to_iq;\n+\t__be32 del_filter_to_l2tix;\n+\t__be16 ethtype;\n+\t__be16 ethtypem;\n+\t__u8   frag_to_ovlan_vldm;\n+\t__u8   smac_sel;\n+\t__be16 rx_chan_rx_rpl_iq;\n+\t__be32 maci_to_matchtypem;\n+\t__u8   ptcl;\n+\t__u8   ptclm;\n+\t__u8   ttyp;\n+\t__u8   ttypm;\n+\t__be16 ivlan;\n+\t__be16 ivlanm;\n+\t__be16 ovlan;\n+\t__be16 ovlanm;\n+\t__u8   lip[16];\n+\t__u8   lipm[16];\n+\t__u8   fip[16];\n+\t__u8   fipm[16];\n+\t__be16 lp;\n+\t__be16 lpm;\n+\t__be16 fp;\n+\t__be16 fpm;\n+\t__be16 r7;\n+\t__u8   sma[6];\n+};\n+\n+#define S_FW_FILTER_WR_TID\t12\n+#define V_FW_FILTER_WR_TID(x)\t((x) << S_FW_FILTER_WR_TID)\n+\n+#define S_FW_FILTER_WR_RQTYPE\t\t11\n+#define V_FW_FILTER_WR_RQTYPE(x)\t((x) << S_FW_FILTER_WR_RQTYPE)\n+\n+#define S_FW_FILTER_WR_NOREPLY\t\t10\n+#define V_FW_FILTER_WR_NOREPLY(x)\t((x) << S_FW_FILTER_WR_NOREPLY)\n+\n+#define S_FW_FILTER_WR_IQ\t0\n+#define V_FW_FILTER_WR_IQ(x)\t((x) << S_FW_FILTER_WR_IQ)\n+\n+#define S_FW_FILTER_WR_DEL_FILTER\t31\n+#define V_FW_FILTER_WR_DEL_FILTER(x)\t((x) << S_FW_FILTER_WR_DEL_FILTER)\n+#define F_FW_FILTER_WR_DEL_FILTER\tV_FW_FILTER_WR_DEL_FILTER(1U)\n+\n+#define S_FW_FILTER_WR_RPTTID\t\t25\n+#define V_FW_FILTER_WR_RPTTID(x)\t((x) << S_FW_FILTER_WR_RPTTID)\n+\n+#define S_FW_FILTER_WR_DROP\t24\n+#define V_FW_FILTER_WR_DROP(x)\t((x) << S_FW_FILTER_WR_DROP)\n+\n+#define S_FW_FILTER_WR_DIRSTEER\t\t23\n+#define V_FW_FILTER_WR_DIRSTEER(x)\t((x) << S_FW_FILTER_WR_DIRSTEER)\n+\n+#define S_FW_FILTER_WR_MASKHASH\t\t22\n+#define V_FW_FILTER_WR_MASKHASH(x)\t((x) << S_FW_FILTER_WR_MASKHASH)\n+\n+#define S_FW_FILTER_WR_DIRSTEERHASH\t21\n+#define V_FW_FILTER_WR_DIRSTEERHASH(x)\t((x) << S_FW_FILTER_WR_DIRSTEERHASH)\n+\n+#define S_FW_FILTER_WR_LPBK\t20\n+#define V_FW_FILTER_WR_LPBK(x)\t((x) << S_FW_FILTER_WR_LPBK)\n+\n+#define S_FW_FILTER_WR_DMAC\t19\n+#define V_FW_FILTER_WR_DMAC(x)\t((x) << S_FW_FILTER_WR_DMAC)\n+\n+#define S_FW_FILTER_WR_INSVLAN\t\t17\n+#define V_FW_FILTER_WR_INSVLAN(x)\t((x) << S_FW_FILTER_WR_INSVLAN)\n+\n+#define S_FW_FILTER_WR_RMVLAN\t\t16\n+#define V_FW_FILTER_WR_RMVLAN(x)\t((x) << S_FW_FILTER_WR_RMVLAN)\n+\n+#define S_FW_FILTER_WR_HITCNTS\t\t15\n+#define V_FW_FILTER_WR_HITCNTS(x)\t((x) << S_FW_FILTER_WR_HITCNTS)\n+\n+#define S_FW_FILTER_WR_TXCHAN\t\t13\n+#define V_FW_FILTER_WR_TXCHAN(x)\t((x) << S_FW_FILTER_WR_TXCHAN)\n+\n+#define S_FW_FILTER_WR_PRIO\t12\n+#define V_FW_FILTER_WR_PRIO(x)\t((x) << S_FW_FILTER_WR_PRIO)\n+\n+#define S_FW_FILTER_WR_L2TIX\t0\n+#define V_FW_FILTER_WR_L2TIX(x)\t((x) << S_FW_FILTER_WR_L2TIX)\n+\n+#define S_FW_FILTER_WR_FRAG\t7\n+#define V_FW_FILTER_WR_FRAG(x)\t((x) << S_FW_FILTER_WR_FRAG)\n+\n+#define S_FW_FILTER_WR_FRAGM\t6\n+#define V_FW_FILTER_WR_FRAGM(x)\t((x) << S_FW_FILTER_WR_FRAGM)\n+\n+#define S_FW_FILTER_WR_IVLAN_VLD\t5\n+#define V_FW_FILTER_WR_IVLAN_VLD(x)\t((x) << S_FW_FILTER_WR_IVLAN_VLD)\n+\n+#define S_FW_FILTER_WR_OVLAN_VLD\t4\n+#define V_FW_FILTER_WR_OVLAN_VLD(x)\t((x) << S_FW_FILTER_WR_OVLAN_VLD)\n+\n+#define S_FW_FILTER_WR_IVLAN_VLDM\t3\n+#define V_FW_FILTER_WR_IVLAN_VLDM(x)\t((x) << S_FW_FILTER_WR_IVLAN_VLDM)\n+\n+#define S_FW_FILTER_WR_OVLAN_VLDM\t2\n+#define V_FW_FILTER_WR_OVLAN_VLDM(x)\t((x) << S_FW_FILTER_WR_OVLAN_VLDM)\n+\n+#define S_FW_FILTER_WR_RX_CHAN\t\t15\n+#define V_FW_FILTER_WR_RX_CHAN(x)\t((x) << S_FW_FILTER_WR_RX_CHAN)\n+\n+#define S_FW_FILTER_WR_RX_RPL_IQ\t0\n+#define V_FW_FILTER_WR_RX_RPL_IQ(x)\t((x) << S_FW_FILTER_WR_RX_RPL_IQ)\n+\n+#define S_FW_FILTER_WR_MACI\t23\n+#define V_FW_FILTER_WR_MACI(x)\t((x) << S_FW_FILTER_WR_MACI)\n+\n+#define S_FW_FILTER_WR_MACIM\t14\n+#define V_FW_FILTER_WR_MACIM(x)\t((x) << S_FW_FILTER_WR_MACIM)\n+\n+#define S_FW_FILTER_WR_FCOE\t13\n+#define V_FW_FILTER_WR_FCOE(x)\t((x) << S_FW_FILTER_WR_FCOE)\n+\n+#define S_FW_FILTER_WR_FCOEM\t12\n+#define V_FW_FILTER_WR_FCOEM(x)\t((x) << S_FW_FILTER_WR_FCOEM)\n+\n+#define S_FW_FILTER_WR_PORT\t9\n+#define V_FW_FILTER_WR_PORT(x)\t((x) << S_FW_FILTER_WR_PORT)\n+\n+#define S_FW_FILTER_WR_PORTM\t6\n+#define V_FW_FILTER_WR_PORTM(x)\t((x) << S_FW_FILTER_WR_PORTM)\n+\n+#define S_FW_FILTER_WR_MATCHTYPE\t3\n+#define V_FW_FILTER_WR_MATCHTYPE(x)\t((x) << S_FW_FILTER_WR_MATCHTYPE)\n+\n+#define S_FW_FILTER_WR_MATCHTYPEM\t0\n+#define V_FW_FILTER_WR_MATCHTYPEM(x)\t((x) << S_FW_FILTER_WR_MATCHTYPEM)\n+\n /******************************************************************************\n  *  C O M M A N D s\n  *********************/\ndiff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h\nindex 44f5934d1..27d6e2b84 100644\n--- a/drivers/net/cxgbe/cxgbe.h\n+++ b/drivers/net/cxgbe/cxgbe.h\n@@ -38,6 +38,8 @@ void cxgbe_close(struct adapter *adapter);\n void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats);\n void cxgbevf_stats_get(struct port_info *pi, struct port_stats *stats);\n void cxgbe_stats_reset(struct port_info *pi);\n+int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us,\n+\t\t\t      unsigned int cnt, struct t4_completion *c);\n int link_start(struct port_info *pi);\n void init_rspq(struct adapter *adap, struct sge_rspq *q, unsigned int us,\n \t       unsigned int cnt, unsigned int size, unsigned int iqe_size);\ndiff --git a/drivers/net/cxgbe/cxgbe_filter.c b/drivers/net/cxgbe/cxgbe_filter.c\nindex 6b10a8be1..cf83ec9c0 100644\n--- a/drivers/net/cxgbe/cxgbe_filter.c\n+++ b/drivers/net/cxgbe/cxgbe_filter.c\n@@ -33,6 +33,50 @@ int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs)\n \treturn 0;\n }\n \n+/**\n+ * Get the queue to which the traffic must be steered to.\n+ */\n+static unsigned int get_filter_steerq(struct rte_eth_dev *dev,\n+\t\t\t\t      struct ch_filter_specification *fs)\n+{\n+\tstruct port_info *pi = ethdev2pinfo(dev);\n+\tstruct adapter *adapter = pi->adapter;\n+\tunsigned int iq;\n+\n+\t/*\n+\t * If the user has requested steering matching Ingress Packets\n+\t * to a specific Queue Set, we need to make sure it's in range\n+\t * for the port and map that into the Absolute Queue ID of the\n+\t * Queue Set's Response Queue.\n+\t */\n+\tif (!fs->dirsteer) {\n+\t\tiq = 0;\n+\t} else {\n+\t\t/*\n+\t\t * If the iq id is greater than the number of qsets,\n+\t\t * then assume it is an absolute qid.\n+\t\t */\n+\t\tif (fs->iq < pi->n_rx_qsets)\n+\t\t\tiq = adapter->sge.ethrxq[pi->first_qset +\n+\t\t\t\t\t\t fs->iq].rspq.abs_id;\n+\t\telse\n+\t\t\tiq = fs->iq;\n+\t}\n+\n+\treturn iq;\n+}\n+\n+/* Return an error number if the indicated filter isn't writable ... */\n+int writable_filter(struct filter_entry *f)\n+{\n+\tif (f->locked)\n+\t\treturn -EPERM;\n+\tif (f->pending)\n+\t\treturn -EBUSY;\n+\n+\treturn 0;\n+}\n+\n /**\n  * Check if entry already filled.\n  */\n@@ -75,3 +119,315 @@ int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family)\n \n \treturn pos < size ? pos : -1;\n }\n+\n+/**\n+ * Clear a filter and release any of its resources that we own.  This also\n+ * clears the filter's \"pending\" status.\n+ */\n+void clear_filter(struct filter_entry *f)\n+{\n+\t/*\n+\t * The zeroing of the filter rule below clears the filter valid,\n+\t * pending, locked flags etc. so it's all we need for\n+\t * this operation.\n+\t */\n+\tmemset(f, 0, sizeof(*f));\n+}\n+\n+int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)\n+{\n+\tstruct adapter *adapter = ethdev2adap(dev);\n+\tstruct filter_entry *f = &adapter->tids.ftid_tab[fidx];\n+\tstruct rte_mbuf *mbuf;\n+\tstruct fw_filter_wr *fwr;\n+\tstruct sge_ctrl_txq *ctrlq;\n+\tunsigned int port_id = ethdev2pinfo(dev)->port_id;\n+\tint ret;\n+\n+\tctrlq = &adapter->sge.ctrlq[port_id];\n+\tmbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);\n+\tif (!mbuf) {\n+\t\tret = -ENOMEM;\n+\t\tgoto out;\n+\t}\n+\n+\tmbuf->data_len = sizeof(*fwr);\n+\tmbuf->pkt_len = mbuf->data_len;\n+\n+\tfwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);\n+\tmemset(fwr, 0, sizeof(*fwr));\n+\n+\t/*\n+\t * Construct the work request to set the filter.\n+\t */\n+\tfwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));\n+\tfwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));\n+\tfwr->tid_to_iq =\n+\t\tcpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |\n+\t\t\t    V_FW_FILTER_WR_RQTYPE(f->fs.type) |\n+\t\t\t    V_FW_FILTER_WR_NOREPLY(0) |\n+\t\t\t    V_FW_FILTER_WR_IQ(f->fs.iq));\n+\tfwr->del_filter_to_l2tix =\n+\t\tcpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |\n+\t\t\t    V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |\n+\t\t\t    V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |\n+\t\t\t    V_FW_FILTER_WR_PRIO(f->fs.prio));\n+\tfwr->ethtype = cpu_to_be16(f->fs.val.ethtype);\n+\tfwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);\n+\tfwr->smac_sel = 0;\n+\tfwr->rx_chan_rx_rpl_iq =\n+\t\tcpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |\n+\t\t\t    V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id\n+\t\t\t\t\t\t     ));\n+\tfwr->ptcl = f->fs.val.proto;\n+\tfwr->ptclm = f->fs.mask.proto;\n+\trte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));\n+\trte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));\n+\trte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));\n+\trte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));\n+\tfwr->lp = cpu_to_be16(f->fs.val.lport);\n+\tfwr->lpm = cpu_to_be16(f->fs.mask.lport);\n+\tfwr->fp = cpu_to_be16(f->fs.val.fport);\n+\tfwr->fpm = cpu_to_be16(f->fs.mask.fport);\n+\n+\t/*\n+\t * Mark the filter as \"pending\" and ship off the Filter Work Request.\n+\t * When we get the Work Request Reply we'll clear the pending status.\n+\t */\n+\tf->pending = 1;\n+\tt4_mgmt_tx(ctrlq, mbuf);\n+\treturn 0;\n+\n+out:\n+\treturn ret;\n+}\n+\n+/**\n+ * Set the corresponding entry in the bitmap. 4 slots are\n+ * marked for IPv6, whereas only 1 slot is marked for IPv4.\n+ */\n+static int cxgbe_set_ftid(struct tid_info *t, int fidx, int family)\n+{\n+\tt4_os_lock(&t->ftid_lock);\n+\tif (rte_bitmap_get(t->ftid_bmap, fidx)) {\n+\t\tt4_os_unlock(&t->ftid_lock);\n+\t\treturn -EBUSY;\n+\t}\n+\n+\tif (family == FILTER_TYPE_IPV4) {\n+\t\trte_bitmap_set(t->ftid_bmap, fidx);\n+\t} else {\n+\t\trte_bitmap_set(t->ftid_bmap, fidx);\n+\t\trte_bitmap_set(t->ftid_bmap, fidx + 1);\n+\t\trte_bitmap_set(t->ftid_bmap, fidx + 2);\n+\t\trte_bitmap_set(t->ftid_bmap, fidx + 3);\n+\t}\n+\tt4_os_unlock(&t->ftid_lock);\n+\treturn 0;\n+}\n+\n+/**\n+ * Clear the corresponding entry in the bitmap. 4 slots are\n+ * cleared for IPv6, whereas only 1 slot is cleared for IPv4.\n+ */\n+static void cxgbe_clear_ftid(struct tid_info *t, int fidx, int family)\n+{\n+\tt4_os_lock(&t->ftid_lock);\n+\tif (family == FILTER_TYPE_IPV4) {\n+\t\trte_bitmap_clear(t->ftid_bmap, fidx);\n+\t} else {\n+\t\trte_bitmap_clear(t->ftid_bmap, fidx);\n+\t\trte_bitmap_clear(t->ftid_bmap, fidx + 1);\n+\t\trte_bitmap_clear(t->ftid_bmap, fidx + 2);\n+\t\trte_bitmap_clear(t->ftid_bmap, fidx + 3);\n+\t}\n+\tt4_os_unlock(&t->ftid_lock);\n+}\n+\n+/**\n+ * Check a Chelsio Filter Request for validity, convert it into our internal\n+ * format and send it to the hardware.  Return 0 on success, an error number\n+ * otherwise.  We attach any provided filter operation context to the internal\n+ * filter specification in order to facilitate signaling completion of the\n+ * operation.\n+ */\n+int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,\n+\t\t     struct ch_filter_specification *fs,\n+\t\t     struct filter_ctx *ctx)\n+{\n+\tstruct port_info *pi = ethdev2pinfo(dev);\n+\tstruct adapter *adapter = pi->adapter;\n+\tunsigned int fidx, iq, fid_bit = 0;\n+\tstruct filter_entry *f;\n+\tint ret;\n+\n+\tif (filter_id >= adapter->tids.nftids)\n+\t\treturn -ERANGE;\n+\n+\tret = validate_filter(adapter, fs);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/*\n+\t * Ensure filter id is aligned on the 4 slot boundary for IPv6\n+\t * maskfull filters.\n+\t */\n+\tif (fs->type)\n+\t\tfilter_id &= ~(0x3);\n+\n+\tret = is_filter_set(&adapter->tids, filter_id, fs->type);\n+\tif (ret)\n+\t\treturn -EBUSY;\n+\n+\tiq = get_filter_steerq(dev, fs);\n+\n+\t/*\n+\t * IPv6 filters occupy four slots and must be aligned on\n+\t * four-slot boundaries.  IPv4 filters only occupy a single\n+\t * slot and have no alignment requirements but writing a new\n+\t * IPv4 filter into the middle of an existing IPv6 filter\n+\t * requires clearing the old IPv6 filter.\n+\t */\n+\tif (fs->type == FILTER_TYPE_IPV4) { /* IPv4 */\n+\t\t/*\n+\t\t * If our IPv4 filter isn't being written to a\n+\t\t * multiple of four filter index and there's an IPv6\n+\t\t * filter at the multiple of 4 base slot, then we need\n+\t\t * to delete that IPv6 filter ...\n+\t\t */\n+\t\tfidx = filter_id & ~0x3;\n+\t\tif (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) {\n+\t\t\tf = &adapter->tids.ftid_tab[fidx];\n+\t\t\tif (f->valid)\n+\t\t\t\treturn -EBUSY;\n+\t\t}\n+\t} else { /* IPv6 */\n+\t\t/*\n+\t\t * Ensure that the IPv6 filter is aligned on a\n+\t\t * multiple of 4 boundary.\n+\t\t */\n+\t\tif (filter_id & 0x3)\n+\t\t\treturn -EINVAL;\n+\n+\t\t/*\n+\t\t * Check all except the base overlapping IPv4 filter\n+\t\t * slots.\n+\t\t */\n+\t\tfor (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) {\n+\t\t\tf = &adapter->tids.ftid_tab[fidx];\n+\t\t\tif (f->valid)\n+\t\t\t\treturn -EBUSY;\n+\t\t}\n+\t}\n+\n+\t/*\n+\t * Check to make sure that provided filter index is not\n+\t * already in use by someone else\n+\t */\n+\tf = &adapter->tids.ftid_tab[filter_id];\n+\tif (f->valid)\n+\t\treturn -EBUSY;\n+\n+\tfidx = adapter->tids.ftid_base + filter_id;\n+\tfid_bit = filter_id;\n+\tret = cxgbe_set_ftid(&adapter->tids, fid_bit,\n+\t\t\t     fs->type ? FILTER_TYPE_IPV6 : FILTER_TYPE_IPV4);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/*\n+\t * Check to make sure the filter requested is writable ...\n+\t */\n+\tret = writable_filter(f);\n+\tif (ret) {\n+\t\t/* Clear the bits we have set above */\n+\t\tcxgbe_clear_ftid(&adapter->tids, fid_bit,\n+\t\t\t\t fs->type ? FILTER_TYPE_IPV6 :\n+\t\t\t\t\t    FILTER_TYPE_IPV4);\n+\t\treturn ret;\n+\t}\n+\n+\t/*\n+\t * Convert the filter specification into our internal format.\n+\t * We copy the PF/VF specification into the Outer VLAN field\n+\t * here so the rest of the code -- including the interface to\n+\t * the firmware -- doesn't have to constantly do these checks.\n+\t */\n+\tf->fs = *fs;\n+\tf->fs.iq = iq;\n+\tf->dev = dev;\n+\n+\t/*\n+\t * Attempt to set the filter.  If we don't succeed, we clear\n+\t * it and return the failure.\n+\t */\n+\tf->ctx = ctx;\n+\tf->tid = fidx; /* Save the actual tid */\n+\tret = set_filter_wr(dev, filter_id);\n+\tif (ret) {\n+\t\tfid_bit = f->tid - adapter->tids.ftid_base;\n+\t\tcxgbe_clear_ftid(&adapter->tids, fid_bit,\n+\t\t\t\t fs->type ? FILTER_TYPE_IPV6 :\n+\t\t\t\t\t    FILTER_TYPE_IPV4);\n+\t\tclear_filter(f);\n+\t}\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * Handle a LE-TCAM filter write/deletion reply.\n+ */\n+void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)\n+{\n+\tstruct filter_entry *f = NULL;\n+\tunsigned int tid = GET_TID(rpl);\n+\tint idx, max_fidx = adap->tids.nftids;\n+\n+\t/* Get the corresponding filter entry for this tid */\n+\tif (adap->tids.ftid_tab) {\n+\t\t/* Check this in normal filter region */\n+\t\tidx = tid - adap->tids.ftid_base;\n+\t\tif (idx >= max_fidx)\n+\t\t\treturn;\n+\n+\t\tf = &adap->tids.ftid_tab[idx];\n+\t\tif (f->tid != tid)\n+\t\t\treturn;\n+\t}\n+\n+\t/* We found the filter entry for this tid */\n+\tif (f) {\n+\t\tunsigned int ret = G_COOKIE(rpl->cookie);\n+\t\tstruct filter_ctx *ctx;\n+\n+\t\t/*\n+\t\t * Pull off any filter operation context attached to the\n+\t\t * filter.\n+\t\t */\n+\t\tctx = f->ctx;\n+\t\tf->ctx = NULL;\n+\n+\t\tif (ret == FW_FILTER_WR_FLT_ADDED) {\n+\t\t\tf->pending = 0;  /* asynchronous setup completed */\n+\t\t\tf->valid = 1;\n+\t\t\tif (ctx) {\n+\t\t\t\tctx->tid = f->tid;\n+\t\t\t\tctx->result = 0;\n+\t\t\t}\n+\t\t} else {\n+\t\t\t/*\n+\t\t\t * Something went wrong.  Issue a warning about the\n+\t\t\t * problem and clear everything out.\n+\t\t\t */\n+\t\t\tdev_warn(adap, \"filter %u setup failed with error %u\\n\",\n+\t\t\t\t idx, ret);\n+\t\t\tclear_filter(f);\n+\t\t\tif (ctx)\n+\t\t\t\tctx->result = -EINVAL;\n+\t\t}\n+\n+\t\tif (ctx)\n+\t\t\tt4_complete(&ctx->completion);\n+\t}\n+}\ndiff --git a/drivers/net/cxgbe/cxgbe_filter.h b/drivers/net/cxgbe/cxgbe_filter.h\nindex a9d2d3d39..e12baa7f9 100644\n--- a/drivers/net/cxgbe/cxgbe_filter.h\n+++ b/drivers/net/cxgbe/cxgbe_filter.h\n@@ -112,14 +112,39 @@ enum filter_type {\n \tFILTER_TYPE_IPV6,\n };\n \n+struct t4_completion {\n+\tunsigned int done;       /* completion done (0 - No, 1 - Yes) */\n+\trte_spinlock_t lock;     /* completion lock */\n+};\n+\n+/*\n+ * Filter operation context to allow callers to wait for\n+ * an asynchronous completion.\n+ */\n+struct filter_ctx {\n+\tstruct t4_completion completion; /* completion rendezvous */\n+\tint result;                      /* result of operation */\n+\tu32 tid;                         /* to store tid of hash filter */\n+};\n+\n /*\n  * Host shadow copy of ingress filter entry.  This is in host native format\n  * and doesn't match the ordering or bit order, etc. of the hardware or the\n  * firmware command.\n  */\n struct filter_entry {\n+\t/*\n+\t * Administrative fields for filter.\n+\t */\n+\tu32 valid:1;                /* filter allocated and valid */\n+\tu32 locked:1;               /* filter is administratively locked */\n+\tu32 pending:1;              /* filter action is pending FW reply */\n+\tstruct filter_ctx *ctx;     /* caller's completion hook */\n \tstruct rte_eth_dev *dev;    /* Port's rte eth device */\n \n+\t/* This will store the actual tid */\n+\tu32 tid;\n+\n \t/*\n \t * The filter itself.\n \t */\n@@ -183,6 +208,13 @@ cxgbe_bitmap_find_free_region(struct rte_bitmap *bmap, unsigned int size,\n }\n \n bool is_filter_set(struct tid_info *, int fidx, int family);\n+void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl);\n+void clear_filter(struct filter_entry *f);\n+int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx);\n+int writable_filter(struct filter_entry *f);\n+int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,\n+\t\t     struct ch_filter_specification *fs,\n+\t\t     struct filter_ctx *ctx);\n int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family);\n int validate_filter(struct adapter *adap, struct ch_filter_specification *fs);\n #endif /* _CXGBE_FILTER_H_ */\ndiff --git a/drivers/net/cxgbe/cxgbe_flow.c b/drivers/net/cxgbe/cxgbe_flow.c\nindex a01708e70..7fa3f5810 100644\n--- a/drivers/net/cxgbe/cxgbe_flow.c\n+++ b/drivers/net/cxgbe/cxgbe_flow.c\n@@ -391,6 +391,86 @@ cxgbe_flow_parse(struct rte_flow *flow,\n \treturn cxgbe_rtef_parse_actions(flow, action, e);\n }\n \n+static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)\n+{\n+\tstruct ch_filter_specification *fs = &flow->fs;\n+\tstruct adapter *adap = ethdev2adap(dev);\n+\tstruct filter_ctx ctx;\n+\tunsigned int fidx;\n+\tint err;\n+\n+\tif (cxgbe_get_fidx(flow, &fidx))\n+\t\treturn -ENOMEM;\n+\tif (cxgbe_verify_fidx(flow, fidx, 0))\n+\t\treturn -1;\n+\n+\tt4_init_completion(&ctx.completion);\n+\t/* go create the filter */\n+\terr = cxgbe_set_filter(dev, fidx, fs, &ctx);\n+\tif (err) {\n+\t\tdev_err(adap, \"Error %d while creating filter.\\n\", err);\n+\t\treturn err;\n+\t}\n+\n+\t/* Poll the FW for reply */\n+\terr = cxgbe_poll_for_completion(&adap->sge.fw_evtq,\n+\t\t\t\t\tCXGBE_FLOW_POLL_US,\n+\t\t\t\t\tCXGBE_FLOW_POLL_CNT,\n+\t\t\t\t\t&ctx.completion);\n+\tif (err) {\n+\t\tdev_err(adap, \"Filter set operation timed out (%d)\\n\", err);\n+\t\treturn err;\n+\t}\n+\tif (ctx.result) {\n+\t\tdev_err(adap, \"Hardware error %d while creating the filter.\\n\",\n+\t\t\tctx.result);\n+\t\treturn ctx.result;\n+\t}\n+\n+\tflow->fidx = fidx;\n+\tflow->f = &adap->tids.ftid_tab[fidx];\n+\n+\treturn 0;\n+}\n+\n+static struct rte_flow *\n+cxgbe_flow_create(struct rte_eth_dev *dev,\n+\t\t  const struct rte_flow_attr *attr,\n+\t\t  const struct rte_flow_item item[],\n+\t\t  const struct rte_flow_action action[],\n+\t\t  struct rte_flow_error *e)\n+{\n+\tstruct rte_flow *flow;\n+\tint ret;\n+\n+\tflow = t4_os_alloc(sizeof(struct rte_flow));\n+\tif (!flow) {\n+\t\trte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,\n+\t\t\t\t   NULL, \"Unable to allocate memory for\"\n+\t\t\t\t   \" filter_entry\");\n+\t\treturn NULL;\n+\t}\n+\n+\tflow->item_parser = parseitem;\n+\tflow->dev = dev;\n+\n+\tif (cxgbe_flow_parse(flow, attr, item, action, e)) {\n+\t\tt4_os_free(flow);\n+\t\treturn NULL;\n+\t}\n+\n+\t/* go, interact with cxgbe_filter */\n+\tret = __cxgbe_flow_create(dev, flow);\n+\tif (ret) {\n+\t\trte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,\n+\t\t\t\t   NULL, \"Unable to create flow rule\");\n+\t\tt4_os_free(flow);\n+\t\treturn NULL;\n+\t}\n+\n+\treturn flow;\n+}\n+\n static int\n cxgbe_flow_validate(struct rte_eth_dev *dev,\n \t\t    const struct rte_flow_attr *attr,\n@@ -443,7 +523,7 @@ cxgbe_flow_validate(struct rte_eth_dev *dev,\n \n static const struct rte_flow_ops cxgbe_flow_ops = {\n \t.validate\t= cxgbe_flow_validate,\n-\t.create\t\t= NULL,\n+\t.create\t\t= cxgbe_flow_create,\n \t.destroy\t= NULL,\n \t.flush\t\t= NULL,\n \t.query\t\t= NULL,\ndiff --git a/drivers/net/cxgbe/cxgbe_flow.h b/drivers/net/cxgbe/cxgbe_flow.h\nindex 45bc37082..4456376aa 100644\n--- a/drivers/net/cxgbe/cxgbe_flow.h\n+++ b/drivers/net/cxgbe/cxgbe_flow.h\n@@ -7,6 +7,10 @@\n \n #include <rte_flow_driver.h>\n #include \"cxgbe_filter.h\"\n+#include \"cxgbe.h\"\n+\n+#define CXGBE_FLOW_POLL_US  10\n+#define CXGBE_FLOW_POLL_CNT 10\n \n struct chrte_fparse {\n \tint (*fptr)(const void *mask, /* currently supported mask */\ndiff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c\nindex 5416800de..a00e0700d 100644\n--- a/drivers/net/cxgbe/cxgbe_main.c\n+++ b/drivers/net/cxgbe/cxgbe_main.c\n@@ -86,6 +86,10 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,\n \t\tconst struct cpl_fw6_msg *msg = (const void *)rsp;\n \n \t\tt4_handle_fw_rpl(q->adapter, msg->data);\n+\t} else if (opcode == CPL_SET_TCB_RPL) {\n+\t\tconst struct cpl_set_tcb_rpl *p = (const void *)rsp;\n+\n+\t\tfilter_rpl(q->adapter, p);\n \t} else {\n \t\tdev_err(adapter, \"unexpected CPL %#x on FW event queue\\n\",\n \t\t\topcode);\n@@ -135,6 +139,38 @@ int setup_sge_ctrl_txq(struct adapter *adapter)\n \treturn err;\n }\n \n+/**\n+ * cxgbe_poll_for_completion: Poll rxq for completion\n+ * @q: rxq to poll\n+ * @us: microseconds to delay\n+ * @cnt: number of times to poll\n+ * @c: completion to check for 'done' status\n+ *\n+ * Polls the rxq for reples until completion is done or the count\n+ * expires.\n+ */\n+int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us,\n+\t\t\t      unsigned int cnt, struct t4_completion *c)\n+{\n+\tunsigned int i;\n+\tunsigned int work_done, budget = 4;\n+\n+\tif (!c)\n+\t\treturn -EINVAL;\n+\n+\tfor (i = 0; i < cnt; i++) {\n+\t\tcxgbe_poll(q, NULL, budget, &work_done);\n+\t\tt4_os_lock(&c->lock);\n+\t\tif (c->done) {\n+\t\t\tt4_os_unlock(&c->lock);\n+\t\t\treturn 0;\n+\t\t}\n+\t\tt4_os_unlock(&c->lock);\n+\t\tudelay(us);\n+\t}\n+\treturn -ETIMEDOUT;\n+}\n+\n int setup_sge_fwevtq(struct adapter *adapter)\n {\n \tstruct sge *s = &adapter->sge;\n",
    "prefixes": [
        "dpdk-dev",
        "4/7"
    ]
}