get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/89927/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 89927,
    "url": "http://patchwork.dpdk.org/api/patches/89927/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20210326140850.7332-5-pbhagavatula@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210326140850.7332-5-pbhagavatula@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210326140850.7332-5-pbhagavatula@marvell.com",
    "date": "2021-03-26T14:08:45",
    "name": "[v8,4/8] eventdev: add Rx adapter event vector support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "b85414e172b7ae4f5559bf19a2026a9222d3ff3c",
    "submitter": {
        "id": 1183,
        "url": "http://patchwork.dpdk.org/api/people/1183/?format=api",
        "name": "Pavan Nikhilesh Bhagavatula",
        "email": "pbhagavatula@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patchwork.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20210326140850.7332-5-pbhagavatula@marvell.com/mbox/",
    "series": [
        {
            "id": 15910,
            "url": "http://patchwork.dpdk.org/api/series/15910/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=15910",
            "date": "2021-03-26T14:08:41",
            "name": "Introduce event vectorization",
            "version": 8,
            "mbox": "http://patchwork.dpdk.org/series/15910/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/89927/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/89927/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 3A2CEA0A02;\n\tFri, 26 Mar 2021 15:10:42 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 820A7140EB2;\n\tFri, 26 Mar 2021 15:10:36 +0100 (CET)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173])\n by mails.dpdk.org (Postfix) with ESMTP id 1C4BF140EB1\n for <dev@dpdk.org>; Fri, 26 Mar 2021 15:10:35 +0100 (CET)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.16.0.43/8.16.0.43) with SMTP id\n 12QEAXtj006849; Fri, 26 Mar 2021 07:10:34 -0700",
            "from dc5-exch02.marvell.com ([199.233.59.182])\n by mx0b-0016f401.pphosted.com with ESMTP id 37h11pjqrp-2\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT);\n Fri, 26 Mar 2021 07:10:34 -0700",
            "from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.2;\n Fri, 26 Mar 2021 07:10:30 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.2 via Frontend\n Transport; Fri, 26 Mar 2021 07:10:30 -0700",
            "from BG-LT7430.marvell.com (unknown [10.193.68.121])\n by maili.marvell.com (Postfix) with ESMTP id 4C9593F7085;\n Fri, 26 Mar 2021 07:10:25 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=eZ32yMYwY18Xx+uyQDJRAIxC7PkXy0E6Rp7/0kxectU=;\n b=MPgpd+DW1qew0lP80T7vHIvEkiR5gdIYzQxYdG1bsbuxFwhfFns0+dsFpq0u+ch98Svg\n uS2fgek+XnUGoLAh/iiBogyPCmgzYgjMsWKYGrcSrZREM0sZh5L378V1AuR8EaL99l0G\n YB2jQs7ExCY1h9ZSkrbDPSsxefCN6mmNtaGdTwXfdZYWdMAY+ZGpCIHPiKj8t+GDq8qT\n /ApHLVUgixVkaxOeCz3AIMM0G2Ar/XKX3Y8GGarFkMAc+CTPctrODqE6a95cDMaFsG3o\n hyeksUr8neIGFJDXW75DKiZ3s1+6si0bpd/pQX4RlF2UNB2I3N3DUSOfrm8Q1PUQqN8c vQ==",
        "From": "<pbhagavatula@marvell.com>",
        "To": "<jerinj@marvell.com>, <jay.jayatheerthan@intel.com>,\n <erik.g.carrillo@intel.com>, <abhinandan.gujjar@intel.com>,\n <timothy.mcdaniel@intel.com>, <hemant.agrawal@nxp.com>,\n <harry.van.haaren@intel.com>, <mattias.ronnblom@ericsson.com>,\n <liang.j.ma@intel.com>",
        "CC": "<dev@dpdk.org>, Pavan Nikhilesh <pbhagavatula@marvell.com>",
        "Date": "Fri, 26 Mar 2021 19:38:45 +0530",
        "Message-ID": "<20210326140850.7332-5-pbhagavatula@marvell.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20210326140850.7332-1-pbhagavatula@marvell.com>",
        "References": "<20210325171057.6699-1-pbhagavatula@marvell.com>\n <20210326140850.7332-1-pbhagavatula@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-ORIG-GUID": "iK5X1xcyIHq9cLZ2L0gwzKZV4GtzrH_x",
        "X-Proofpoint-GUID": "iK5X1xcyIHq9cLZ2L0gwzKZV4GtzrH_x",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:6.0.369, 18.0.761\n definitions=2021-03-26_06:2021-03-26,\n 2021-03-26 signatures=0",
        "Subject": "[dpdk-dev] [PATCH v8 4/8] eventdev: add Rx adapter event vector\n support",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Pavan Nikhilesh <pbhagavatula@marvell.com>\n\nAdd event vector support for event eth Rx adapter, the implementation\ncreates vector flows based on port and queue identifier of the received\nmbufs.\nThe flow id for SW Rx event vectorization will use 12-bits of queue\nidentifier and 8-bits port identifier when custom flow id is not set\nfor simplicity.\n\nSigned-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>\n---\n lib/librte_eventdev/eventdev_pmd.h            |   7 +-\n .../rte_event_eth_rx_adapter.c                | 269 ++++++++++++++++--\n lib/librte_eventdev/rte_eventdev.c            |   6 +-\n 3 files changed, 258 insertions(+), 24 deletions(-)",
    "diff": "diff --git a/lib/librte_eventdev/eventdev_pmd.h b/lib/librte_eventdev/eventdev_pmd.h\nindex 9297f1433..0f724ac85 100644\n--- a/lib/librte_eventdev/eventdev_pmd.h\n+++ b/lib/librte_eventdev/eventdev_pmd.h\n@@ -69,9 +69,10 @@ extern \"C\" {\n \t} \\\n } while (0)\n \n-#define RTE_EVENT_ETH_RX_ADAPTER_SW_CAP \\\n-\t\t((RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) | \\\n-\t\t\t(RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ))\n+#define RTE_EVENT_ETH_RX_ADAPTER_SW_CAP                                        \\\n+\t((RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) |                     \\\n+\t (RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) |                         \\\n+\t (RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR))\n \n #define RTE_EVENT_CRYPTO_ADAPTER_SW_CAP \\\n \t\tRTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA\ndiff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c b/lib/librte_eventdev/rte_event_eth_rx_adapter.c\nindex ac8ba5bf0..fba3b5ec3 100644\n--- a/lib/librte_eventdev/rte_event_eth_rx_adapter.c\n+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c\n@@ -26,6 +26,10 @@\n #define BATCH_SIZE\t\t32\n #define BLOCK_CNT_THRESHOLD\t10\n #define ETH_EVENT_BUFFER_SIZE\t(4*BATCH_SIZE)\n+#define MAX_VECTOR_SIZE\t\t1024\n+#define MIN_VECTOR_SIZE\t\t4\n+#define MAX_VECTOR_NS\t\t1E9\n+#define MIN_VECTOR_NS\t\t1E5\n \n #define ETH_RX_ADAPTER_SERVICE_NAME_LEN\t32\n #define ETH_RX_ADAPTER_MEM_NAME_LEN\t32\n@@ -59,6 +63,20 @@ struct eth_rx_poll_entry {\n \tuint16_t eth_rx_qid;\n };\n \n+struct eth_rx_vector_data {\n+\tTAILQ_ENTRY(eth_rx_vector_data) next;\n+\tuint16_t port;\n+\tuint16_t queue;\n+\tuint16_t max_vector_count;\n+\tuint64_t event;\n+\tuint64_t ts;\n+\tuint64_t vector_timeout_ticks;\n+\tstruct rte_mempool *vector_pool;\n+\tstruct rte_event_vector *vector_ev;\n+} __rte_cache_aligned;\n+\n+TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);\n+\n /* Instance per adapter */\n struct rte_eth_event_enqueue_buffer {\n \t/* Count of events in this buffer */\n@@ -92,6 +110,14 @@ struct rte_event_eth_rx_adapter {\n \tuint32_t wrr_pos;\n \t/* Event burst buffer */\n \tstruct rte_eth_event_enqueue_buffer event_enqueue_buffer;\n+\t/* Vector enable flag */\n+\tuint8_t ena_vector;\n+\t/* Timestamp of previous vector expiry list traversal */\n+\tuint64_t prev_expiry_ts;\n+\t/* Minimum ticks to wait before traversing expiry list */\n+\tuint64_t vector_tmo_ticks;\n+\t/* vector list */\n+\tstruct eth_rx_vector_data_list vector_list;\n \t/* Per adapter stats */\n \tstruct rte_event_eth_rx_adapter_stats stats;\n \t/* Block count, counts up to BLOCK_CNT_THRESHOLD */\n@@ -198,9 +224,11 @@ struct eth_device_info {\n struct eth_rx_queue_info {\n \tint queue_enabled;\t/* True if added */\n \tint intr_enabled;\n+\tuint8_t ena_vector;\n \tuint16_t wt;\t\t/* Polling weight */\n \tuint32_t flow_id_mask;\t/* Set to ~0 if app provides flow id else 0 */\n \tuint64_t event;\n+\tstruct eth_rx_vector_data vector_data;\n };\n \n static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;\n@@ -722,6 +750,9 @@ rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)\n \t    &rx_adapter->event_enqueue_buffer;\n \tstruct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;\n \n+\tif (!buf->count)\n+\t\treturn 0;\n+\n \tuint16_t n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,\n \t\t\t\t\trx_adapter->event_port_id,\n \t\t\t\t\tbuf->events,\n@@ -742,6 +773,77 @@ rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)\n \treturn n;\n }\n \n+static inline void\n+rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,\n+\t\tstruct eth_rx_vector_data *vec)\n+{\n+\tvec->vector_ev->nb_elem = 0;\n+\tvec->vector_ev->port = vec->port;\n+\tvec->vector_ev->queue = vec->queue;\n+\tvec->vector_ev->attr_valid = true;\n+\tTAILQ_INSERT_TAIL(&rx_adapter->vector_list, vec, next);\n+}\n+\n+static inline uint16_t\n+rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,\n+\t\t\tstruct eth_rx_queue_info *queue_info,\n+\t\t\tstruct rte_eth_event_enqueue_buffer *buf,\n+\t\t\tstruct rte_mbuf **mbufs, uint16_t num)\n+{\n+\tstruct rte_event *ev = &buf->events[buf->count];\n+\tstruct eth_rx_vector_data *vec;\n+\tuint16_t filled, space, sz;\n+\n+\tfilled = 0;\n+\tvec = &queue_info->vector_data;\n+\n+\tif (vec->vector_ev == NULL) {\n+\t\tif (rte_mempool_get(vec->vector_pool,\n+\t\t\t\t    (void **)&vec->vector_ev) < 0) {\n+\t\t\trte_pktmbuf_free_bulk(mbufs, num);\n+\t\t\treturn 0;\n+\t\t}\n+\t\trxa_init_vector(rx_adapter, vec);\n+\t}\n+\twhile (num) {\n+\t\tif (vec->vector_ev->nb_elem == vec->max_vector_count) {\n+\t\t\t/* Event ready. */\n+\t\t\tev->event = vec->event;\n+\t\t\tev->vec = vec->vector_ev;\n+\t\t\tev++;\n+\t\t\tfilled++;\n+\t\t\tvec->vector_ev = NULL;\n+\t\t\tTAILQ_REMOVE(&rx_adapter->vector_list, vec, next);\n+\t\t\tif (rte_mempool_get(vec->vector_pool,\n+\t\t\t\t\t    (void **)&vec->vector_ev) < 0) {\n+\t\t\t\trte_pktmbuf_free_bulk(mbufs, num);\n+\t\t\t\treturn 0;\n+\t\t\t}\n+\t\t\trxa_init_vector(rx_adapter, vec);\n+\t\t}\n+\n+\t\tspace = vec->max_vector_count - vec->vector_ev->nb_elem;\n+\t\tsz = num > space ? space : num;\n+\t\tmemcpy(vec->vector_ev->mbufs + vec->vector_ev->nb_elem, mbufs,\n+\t\t       sizeof(void *) * sz);\n+\t\tvec->vector_ev->nb_elem += sz;\n+\t\tnum -= sz;\n+\t\tmbufs += sz;\n+\t\tvec->ts = rte_rdtsc();\n+\t}\n+\n+\tif (vec->vector_ev->nb_elem == vec->max_vector_count) {\n+\t\tev->event = vec->event;\n+\t\tev->vec = vec->vector_ev;\n+\t\tev++;\n+\t\tfilled++;\n+\t\tvec->vector_ev = NULL;\n+\t\tTAILQ_REMOVE(&rx_adapter->vector_list, vec, next);\n+\t}\n+\n+\treturn filled;\n+}\n+\n static inline void\n rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,\n \t\tuint16_t eth_dev_id,\n@@ -766,29 +868,33 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,\n \tuint16_t nb_cb;\n \tuint16_t dropped;\n \n-\t/* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */\n-\trss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1);\n-\tdo_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;\n-\n-\tfor (i = 0; i < num; i++) {\n-\t\tm = mbufs[i];\n-\n-\t\trss = do_rss ?\n-\t\t\trxa_do_softrss(m, rx_adapter->rss_key_be) :\n-\t\t\tm->hash.rss;\n-\t\tev->event = event;\n-\t\tev->flow_id = (rss & ~flow_id_mask) |\n-\t\t\t\t(ev->flow_id & flow_id_mask);\n-\t\tev->mbuf = m;\n-\t\tev++;\n+\tif (!eth_rx_queue_info->ena_vector) {\n+\t\t/* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */\n+\t\trss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1);\n+\t\tdo_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;\n+\t\tfor (i = 0; i < num; i++) {\n+\t\t\tm = mbufs[i];\n+\n+\t\t\trss = do_rss ? rxa_do_softrss(m, rx_adapter->rss_key_be)\n+\t\t\t\t     : m->hash.rss;\n+\t\t\tev->event = event;\n+\t\t\tev->flow_id = (rss & ~flow_id_mask) |\n+\t\t\t\t      (ev->flow_id & flow_id_mask);\n+\t\t\tev->mbuf = m;\n+\t\t\tev++;\n+\t\t}\n+\t} else {\n+\t\tnum = rxa_create_event_vector(rx_adapter, eth_rx_queue_info,\n+\t\t\t\t\t      buf, mbufs, num);\n \t}\n \n-\tif (dev_info->cb_fn) {\n+\tif (num && dev_info->cb_fn) {\n \n \t\tdropped = 0;\n \t\tnb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id,\n-\t\t\t\t\tETH_EVENT_BUFFER_SIZE, buf->count, ev,\n-\t\t\t\t\tnum, dev_info->cb_arg, &dropped);\n+\t\t\t\t\tETH_EVENT_BUFFER_SIZE, buf->count,\n+\t\t\t\t\t&buf->events[buf->count], num,\n+\t\t\t\t\tdev_info->cb_arg, &dropped);\n \t\tif (unlikely(nb_cb > num))\n \t\t\tRTE_EDEV_LOG_ERR(\"Rx CB returned %d (> %d) events\",\n \t\t\t\tnb_cb, num);\n@@ -1124,6 +1230,30 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)\n \treturn nb_rx;\n }\n \n+static void\n+rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)\n+{\n+\tstruct rte_event_eth_rx_adapter *rx_adapter = arg;\n+\tstruct rte_eth_event_enqueue_buffer *buf =\n+\t\t&rx_adapter->event_enqueue_buffer;\n+\tstruct rte_event *ev;\n+\n+\tif (buf->count)\n+\t\trxa_flush_event_buffer(rx_adapter);\n+\n+\tif (vec->vector_ev->nb_elem == 0)\n+\t\treturn;\n+\tev = &buf->events[buf->count];\n+\n+\t/* Event ready. */\n+\tev->event = vec->event;\n+\tev->vec = vec->vector_ev;\n+\tbuf->count++;\n+\n+\tvec->vector_ev = NULL;\n+\tvec->ts = 0;\n+}\n+\n static int\n rxa_service_func(void *args)\n {\n@@ -1137,6 +1267,24 @@ rxa_service_func(void *args)\n \t\treturn 0;\n \t}\n \n+\tif (rx_adapter->ena_vector) {\n+\t\tif ((rte_rdtsc() - rx_adapter->prev_expiry_ts) >=\n+\t\t    rx_adapter->vector_tmo_ticks) {\n+\t\t\tstruct eth_rx_vector_data *vec;\n+\n+\t\t\tTAILQ_FOREACH(vec, &rx_adapter->vector_list, next) {\n+\t\t\t\tuint64_t elapsed_time = rte_rdtsc() - vec->ts;\n+\n+\t\t\t\tif (elapsed_time >= vec->vector_timeout_ticks) {\n+\t\t\t\t\trxa_vector_expire(vec, rx_adapter);\n+\t\t\t\t\tTAILQ_REMOVE(&rx_adapter->vector_list,\n+\t\t\t\t\t\t     vec, next);\n+\t\t\t\t}\n+\t\t\t}\n+\t\t\trx_adapter->prev_expiry_ts = rte_rdtsc();\n+\t\t}\n+\t}\n+\n \tstats = &rx_adapter->stats;\n \tstats->rx_packets += rxa_intr_ring_dequeue(rx_adapter);\n \tstats->rx_packets += rxa_poll(rx_adapter);\n@@ -1640,6 +1788,29 @@ rxa_update_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n \t}\n }\n \n+static void\n+rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count,\n+\t\t    uint64_t vector_ns, struct rte_mempool *mp, int32_t qid,\n+\t\t    uint16_t port_id)\n+{\n+#define NSEC2TICK(__ns, __freq) (((__ns) * (__freq)) / 1E9)\n+\tstruct eth_rx_vector_data *vector_data;\n+\tuint32_t flow_id;\n+\n+\tvector_data = &queue_info->vector_data;\n+\tvector_data->max_vector_count = vector_count;\n+\tvector_data->port = port_id;\n+\tvector_data->queue = qid;\n+\tvector_data->vector_pool = mp;\n+\tvector_data->vector_timeout_ticks =\n+\t\tNSEC2TICK(vector_ns, rte_get_timer_hz());\n+\tvector_data->ts = 0;\n+\tflow_id = queue_info->event & 0xFFFFF;\n+\tflow_id =\n+\t\tflow_id == 0 ? (qid & 0xFFF) | (port_id & 0xFF) << 12 : flow_id;\n+\tvector_data->event = (queue_info->event & ~0xFFFFF) | flow_id;\n+}\n+\n static void\n rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,\n \tstruct eth_device_info *dev_info,\n@@ -1741,6 +1912,42 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,\n \t}\n }\n \n+static void\n+rxa_sw_event_vector_configure(\n+\tstruct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,\n+\tint rx_queue_id,\n+\tconst struct rte_event_eth_rx_adapter_event_vector_config *config)\n+{\n+\tstruct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];\n+\tstruct eth_rx_queue_info *queue_info;\n+\tstruct rte_event *qi_ev;\n+\n+\tif (rx_queue_id == -1) {\n+\t\tuint16_t nb_rx_queues;\n+\t\tuint16_t i;\n+\n+\t\tnb_rx_queues = dev_info->dev->data->nb_rx_queues;\n+\t\tfor (i = 0; i < nb_rx_queues; i++)\n+\t\t\trxa_sw_event_vector_configure(rx_adapter, eth_dev_id, i,\n+\t\t\t\t\t\t      config);\n+\t\treturn;\n+\t}\n+\n+\tqueue_info = &dev_info->rx_queue[rx_queue_id];\n+\tqi_ev = (struct rte_event *)&queue_info->event;\n+\tqueue_info->ena_vector = 1;\n+\tqi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;\n+\trxa_set_vector_data(queue_info, config->vector_sz,\n+\t\t\t    config->vector_timeout_ns, config->vector_mp,\n+\t\t\t    rx_queue_id, dev_info->dev->data->port_id);\n+\trx_adapter->ena_vector = 1;\n+\trx_adapter->vector_tmo_ticks =\n+\t\trx_adapter->vector_tmo_ticks ?\n+\t\t\t      RTE_MIN(config->vector_timeout_ns >> 1,\n+\t\t\t\trx_adapter->vector_tmo_ticks) :\n+\t\t\t      config->vector_timeout_ns >> 1;\n+}\n+\n static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,\n \t\tuint16_t eth_dev_id,\n \t\tint rx_queue_id,\n@@ -1967,6 +2174,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,\n \trx_adapter->conf_cb = conf_cb;\n \trx_adapter->conf_arg = conf_arg;\n \trx_adapter->id = id;\n+\tTAILQ_INIT(&rx_adapter->vector_list);\n \tstrcpy(rx_adapter->mem_name, mem_name);\n \trx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,\n \t\t\t\t\tRTE_MAX_ETHPORTS *\n@@ -2081,6 +2289,15 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,\n \t\treturn -EINVAL;\n \t}\n \n+\tif ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0 &&\n+\t    (queue_conf->rx_queue_flags &\n+\t     RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR)) {\n+\t\tRTE_EDEV_LOG_ERR(\"Event vectorization is not supported,\"\n+\t\t\t\t \" eth port: %\" PRIu16 \" adapter id: %\" PRIu8,\n+\t\t\t\t eth_dev_id, id);\n+\t\treturn -EINVAL;\n+\t}\n+\n \tif ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&\n \t\t(rx_queue_id != -1)) {\n \t\tRTE_EDEV_LOG_ERR(\"Rx queues can only be connected to single \"\n@@ -2143,6 +2360,17 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,\n \treturn 0;\n }\n \n+static int\n+rxa_sw_vector_limits(struct rte_event_eth_rx_adapter_vector_limits *limits)\n+{\n+\tlimits->max_sz = MAX_VECTOR_SIZE;\n+\tlimits->min_sz = MIN_VECTOR_SIZE;\n+\tlimits->max_timeout_ns = MAX_VECTOR_NS;\n+\tlimits->min_timeout_ns = MIN_VECTOR_NS;\n+\n+\treturn 0;\n+}\n+\n int\n rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,\n \t\t\t\tint32_t rx_queue_id)\n@@ -2333,7 +2561,8 @@ rte_event_eth_rx_adapter_queue_event_vector_config(\n \t\tret = dev->dev_ops->eth_rx_adapter_event_vector_config(\n \t\t\tdev, &rte_eth_devices[eth_dev_id], rx_queue_id, config);\n \t} else {\n-\t\tret = -ENOTSUP;\n+\t\trxa_sw_event_vector_configure(rx_adapter, eth_dev_id,\n+\t\t\t\t\t      rx_queue_id, config);\n \t}\n \n \treturn ret;\n@@ -2371,7 +2600,7 @@ rte_event_eth_rx_adapter_vector_limits_get(\n \t\tret = dev->dev_ops->eth_rx_adapter_vector_limits_get(\n \t\t\tdev, &rte_eth_devices[eth_port_id], limits);\n \t} else {\n-\t\tret = -ENOTSUP;\n+\t\tret = rxa_sw_vector_limits(limits);\n \t}\n \n \treturn ret;\ndiff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c\nindex be0499c52..62824654b 100644\n--- a/lib/librte_eventdev/rte_eventdev.c\n+++ b/lib/librte_eventdev/rte_eventdev.c\n@@ -122,7 +122,11 @@ rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,\n \n \tif (caps == NULL)\n \t\treturn -EINVAL;\n-\t*caps = 0;\n+\n+\tif (dev->dev_ops->eth_rx_adapter_caps_get == NULL)\n+\t\t*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;\n+\telse\n+\t\t*caps = 0;\n \n \treturn dev->dev_ops->eth_rx_adapter_caps_get ?\n \t\t\t\t(*dev->dev_ops->eth_rx_adapter_caps_get)(dev,\n",
    "prefixes": [
        "v8",
        "4/8"
    ]
}