get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/40920/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 40920,
    "url": "http://patchwork.dpdk.org/api/patches/40920/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/1528478659-15859-21-git-send-email-anoob.joseph@caviumnetworks.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1528478659-15859-21-git-send-email-anoob.joseph@caviumnetworks.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1528478659-15859-21-git-send-email-anoob.joseph@caviumnetworks.com",
    "date": "2018-06-08T17:24:19",
    "name": "[dpdk-dev,20/20] examples/l2fwd: add eventmode for l2fwd",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "318dd7dffd9b5891b45bc7a72965c3b3e76485b2",
    "submitter": {
        "id": 893,
        "url": "http://patchwork.dpdk.org/api/people/893/?format=api",
        "name": "Anoob Joseph",
        "email": "anoob.joseph@caviumnetworks.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patchwork.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/1528478659-15859-21-git-send-email-anoob.joseph@caviumnetworks.com/mbox/",
    "series": [
        {
            "id": 61,
            "url": "http://patchwork.dpdk.org/api/series/61/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=61",
            "date": "2018-06-08T17:23:59",
            "name": "add eventmode helper functions",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/61/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/40920/comments/",
    "check": "fail",
    "checks": "http://patchwork.dpdk.org/api/patches/40920/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 151BA1D0AE;\n\tFri,  8 Jun 2018 19:26:14 +0200 (CEST)",
            "from NAM03-BY2-obe.outbound.protection.outlook.com\n\t(mail-by2nam03on0070.outbound.protection.outlook.com [104.47.42.70])\n\tby dpdk.org (Postfix) with ESMTP id 302F81D06E\n\tfor <dev@dpdk.org>; Fri,  8 Jun 2018 19:26:12 +0200 (CEST)",
            "from ajoseph83.caveonetworks.com.caveonetworks.com (115.113.156.2)\n\tby DM6PR07MB4906.namprd07.prod.outlook.com (2603:10b6:5:a3::11)\n\twith Microsoft SMTP Server (version=TLS1_2,\n\tcipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.841.17;\n\tFri, 8 Jun 2018 17:26:07 +0000"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=CAVIUMNETWORKS.onmicrosoft.com; s=selector1-cavium-com;\n\th=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n\tbh=3pcx5I535udSgZSGUtMlcudL8Dt+Q9RlNRpZwGzoEKI=;\n\tb=bM5KLxGIqHUhTGy9F0C3Z1v9nfl9wJzREcL4Y8tVuwR+XZhAMIjbRoB8ntPtpklX0lYqNmf2SesCAPhLH/ZD3D75i0t3wOqV8FmzE9qYoXtTboNLv3FyxFNmZHrBUipWelVlS+bHIRTE58fftbp+NYajBB142Z5tnEHh66G8uMc=",
        "Authentication-Results": "spf=none (sender IP is )\n\tsmtp.mailfrom=Anoob.Joseph@cavium.com; ",
        "From": "Anoob Joseph <anoob.joseph@caviumnetworks.com>",
        "To": "Bruce Richardson <bruce.richardson@intel.com>,\n\tJerin Jacob <jerin.jacob@caviumnetworks.com>,\n\tPablo de Lara <pablo.de.lara.guarch@intel.com>",
        "Cc": "Anoob Joseph <anoob.joseph@caviumnetworks.com>,\n\tHemant Agrawal <hemant.agrawal@nxp.com>,\n\tNarayana Prasad <narayanaprasad.athreya@caviumnetworks.com>,\n\tNikhil Rao <nikhil.rao@intel.com>,\n\tPavan Nikhilesh <pbhagavatula@caviumnetworks.com>,\n\tSunil Kumar Kori <sunil.kori@nxp.com>, dev@dpdk.org",
        "Date": "Fri,  8 Jun 2018 22:54:19 +0530",
        "Message-Id": "<1528478659-15859-21-git-send-email-anoob.joseph@caviumnetworks.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1528478659-15859-1-git-send-email-anoob.joseph@caviumnetworks.com>",
        "References": "<1528478659-15859-1-git-send-email-anoob.joseph@caviumnetworks.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[115.113.156.2]",
        "X-ClientProxiedBy": "BM1PR0101CA0050.INDPRD01.PROD.OUTLOOK.COM\n\t(2603:1096:b00:19::12) To DM6PR07MB4906.namprd07.prod.outlook.com\n\t(2603:10b6:5:a3::11)",
        "X-MS-PublicTrafficType": "Email",
        "X-Microsoft-Antispam": "UriScan:; BCL:0; PCL:0;\n\tRULEID:(7020095)(4652020)(5600026)(4534165)(7168020)(4627221)(201703031133081)(201702281549075)(2017052603328)(7153060)(7193020);\n\tSRVR:DM6PR07MB4906; ",
        "X-Microsoft-Exchange-Diagnostics": [
            "1; DM6PR07MB4906;\n\t3:qAOGS7xLaKFuFNIJ0xSXNFjk8GFjXuavXRirI+eqbzVlb6f3JPAc9VyLkljpDXgwuIPwKoU4YW9oO9ktICrclFhoNyk0E0/N5rzXyQ5IjvE0YqzIwoyUTUL9L9pUgl2CHWea1hv5+uXq8KYWsnpKeLDxW0KwMjHcker6vyTfQ0tyLhsIWYH+GlIMpdzYPJEO92ON22bcDVgfp6HuRVbnV9y1ezFmXfDYQ9BnBXrT7rf0mmiSoLnyWfEJhEM9kETy;\n\t25:L+BOv6sztv/vw2mSvDvsNvgG0/AXBhG72TYBM/JQ2OaNasRnpESj0v560GsQNgVfvT1qOQ2kOK/jF/0dPwxEPIcVK1FljsoBdsmCYFaymTbAeDgTmfPbhOpc+j/4KC2bWauOgp0to3ZXjKkET2kpExWj0r2zrh1IVQClZQwwklrLW46aq70peBAsmoy4XYSS5x+2uyHjEI4XlwclLF2AjFHpLtWvELAUwIVftDG1/gyJlAtpxCJEZqugwWISn2gYizW7W63LsMRRqvi4vEPvqbTDPyHUe/dTHTYUfxEIOUGpvWJ4TktqazDMh/aL7um6erHtZ/iIBHqVe5BnaLnN8g==;\n\t31:gbxSXzkDjiiz6XBMcx9Rfy4Q1DoBBOl6woXXL1h9FmTdOm85tMEkU0AMopFyE+99QjSLui8phKcjPhsD2/6W5ueAOri1TGMO38kya/gd0IdKvSSG7Pp9f5gcXglBCnspvUKxy5g5xt41dQj6D1dSM43IzY64h5TgL2zko1Rh5lL2Zf0TshSelPN9MMSnx2EJcSmSJFsIbUZILzdY4WY8vFih0VdWjsc/AD9tgqQmwpI=",
            "1; DM6PR07MB4906;\n\t20:s1+AePawPiukkP/eT8vX5Yo3bQE7rVi+ksxRb3w8DmRCmMeRuIo1XWmyTgajUugDF/1wh+hGL+4aarMlumVTnCw18i+3ljXjZs09syhZ6NHhGcDTQjzya63VWnbRehTSPYoNnovpvZThCQYjmrRs1L4H/SM24IfVZSvvH2uup1xV5tYAE7woqPtxuyXMTJNV+jexdoHZ1lMVp9WFANAYemEVhgK7A22QQf0e9Nns+08d13QDQ2OsP1W1ObWt8H0Ol5oLWO9jlm4gNg2/AO8ONLlp2VeG0YkwSh7htMjd3vYOrlksMwOl91R1Dld50AEtQUj4DV0fAsooZW5ihACeRk5MnqVGzDA85S+y6pPWo5VGnooeEn9S9w/YWdliKeNADVTz3Uj4JTQuBlU+wzOBRZ3fFrif5kUnOcVe9kQQeI0+do7ItHiVayLG8y0xOur+oAX87oIaAuZF4Ye4Jqns8dMyeEabVts16gkT6inRo9iGeR64a8EDNKNAp9n8pUiZyFAUxDMv6K8YxevtN7VsGAwlHGaUgUkt8tJTFLeMncqrw4fl/BfA+7t9LJe7cgxUVP1VGfuUylbqnMU8FK5h+dYXaBlRD1+cthUhDu3ypf0=;\n\t4:5tA7pRFG2067EuRkfcn+jmwOwx5UEztcW9knSEnwKzIOHoTL+56ZXSWYj72MQQVQO4/9f02O93WWd0JHyZrC63eVFxXFHSPj4knL4khOelbwIgpcWz4yQdAN0Fsqe/PC9BY03wbJPp84Z73NbdJUZRPsZ03zwaGVDTVCJfyIH3P0iHyXL2FY5jJlPaW99UIyfhYtOQicb4DcBnUdzIZk2KFzRSbrz3YzxHcZkIt1b7rDYbvq+Yrg1tqTwVONCKIdiG79ji0RYguivMCl/hlDGA==",
            "=?us-ascii?Q?1; DM6PR07MB4906;\n\t23:xpCSgfrE0tIoG1D0lRkTduwXZfSroqdEkZF7BiY0m?=\n\tALZXxezQ7HeBmmMe310e5tKS8Ri++aEXsK1PwwDFzzN5m+4DIloGU3sKCzHhbWzSHf7U+Q0qjWHIZCavmt3lzq/dFfLZ934LkyUwgOOg3+Ozo/YoTveu5YQH2s7V7MFWIu7nPf+8BCQ8jSM8nnfb6o7G+IyOftyJawotnDhgmRuOPGorbHdXdvO0G6Dy9RY/o0At6Zqh69V8BVV2jd7HJ1RyZVlfk3bvhFfXhk3XCLIMLrMk1Z10hpz8MjjVMEgLdFpdzv21vS18x8qpBG5uVJN7ZyDCplDfjs2BNxeD8/3xJKJpiylEiYsuwWiP96HleuXhxLwyQhJ6Y5BFcMSZCW0heZMs7DgHQ/2sC6Bo2GqV0ULXbN4Olc/qnuq4gsBzC4I4zjbS0PTViH6/VXGNQngPQ3BLEtGI6RF1PqFC6tR8VOh/jz54eT1OOAbKuOiylIN9bnai9uN2zOHrUb7o4QO957K1QCebTe+O7at4K5VBHDjj3N4AVHc0iv3x17hp/hFz3BXzgUEvbvfiELIjl7bzSKSRmo9d3vhM0EyswUdRI2ISWW1zV+zAC0JJ9jGHms5377cn5SBLFWnqlfIeaKxhyLkYNq0/odMz75hCAP3AWN1TBawi8eSQIfya8pc96jug+LGvmuy39mtJGsrUiuNWnSnfEKJHjpszzlCPvX3Ql0X2j+P4CoXKY9DxXRhcTNu02YMqix+Kz9lNbbgxM8Wo8IOcwB+m9cRlMq8sgmK69Vk2Hea5rqp+OTHEDPmXzVzftJ+r7Az3zUqPySPI/gYzAgC/ckFcf8bvRkE6Zz9EmfH1iizRSXRw331okDffURNdOlmUw/rIv3TH3lZ9K63xnbHNor+PKqkiQfgH1F2I7w8x5i2rQ2qqsAkgnHMyjjgj8VqS14cvD8MEBhMgPZeqVL0YwwJ2umLHWtYO15vmVu3Arg9mmef2WgIuIj7n9MDkcr5HPkcd1agIFXjvyPiLPPR2FrVmM8OnMhxwP/F0Z8GRuFUM8EDdaL4/APzVbHuZCrJ3yf274IF5J2YmUaXyaM0bZkpu7W/mf0CA9JM1++F0SCHSGwIiOFkbRqBKFwfvo/1YFU6GEcrKyTLgZbFZdRuZ4iE53u+WUXingTEPKhSe6oZzKzlHlAMhU8SQ0SQvaEC8keqALbgsy+w8EdvRUfoy+tY+dB38nZtb1v4Zb2LQ7U/PjabGywz0jms493uu3563WYm+FWz6RWHU3eeh7BcKc4nT6qT+76q748B0BVHeZsRqM1ujVUNkAd+0iABXrejx8mwihDYcqYVuBHA",
            "1; DM6PR07MB4906;\n\t6:r3MDoyQLzkzZBQcvsrAll9vZVvgPDegY/b4Frbqa+M0Fqu6/OQ3asF0LWeo5WIN82jIrIyE3c8VJwn2tAm7A7eFw1ohblKBfFIrnXN3FvKvaNSCEounosLDH4Hm7JiB2HoqL06e7zaZiHCXpLvEUazYQULZq34dxKIXeCs5gaLVB2MYRlajSP0DWjLv6Mv4lranlVo5vNgXtssjLJSPTdUOeWCnplhu2gGjXZ82AVlXSNizF32u8PHmuHFs6HvhfTipfmlW6VQLoFEZEjS9VLnurQhUuefitk/i6yEIkN8lo3BQKgIY8olMqe90okdi0ODlWK8q6B2IZSzs5PcXhCpbp66QYKBTK1fzwo9T6IpAqH0sHHxbkWZZvdtcoNOJVLHAeBb/dBVXQly6i66wpobXGlt/e62KuYCP5LcOCelhh2/NwiYxsgRLcafVNxG4pIfgfy5gfDGOQnoMrmQAEXg==;\n\t5:R1I8+l+A31mX61S9+n3lLvy24sFPyffwKiAz9qbAde59EYfBtaYEkHE+aiBG+mi2KkD+afwJyxNv1efHdznIcMLnHCtMeyFnjCqv+k5TuoKV6gdtaNvtY2Mc1/Scny7UXuERwrWsz15QggEXFAidwSMB+CYed/Kk9Dc/KNuH5H8=;\n\t24:Tc6XOkpa+mJfTkDao3TenV73mYIBJDz9udS8H1QnXRZB5+ScgpYhPbouINKqHaPayDxxR7XeLHgC5/01jhvZvLjHhlkAyuP6yZY0InH3ORE=",
            "1; DM6PR07MB4906;\n\t7:wQ1GEVL5iE8Ylpx5Jl0BzD72CIvgVYJBlbz/1WJXIYCHftYy0b/EMW0iJv0cvd4o36/B2xbKJFNu7GLLtxGMBwnNbUo6Qk4cNqrsbH9YEyUkCOAS87+a8LsL1PhG+Ym3X+LD/b1foGAa/VoRSeNtDg8blfqjzGONbp0SFhmsCDWZteRrzMsHbEpjUa3/af7k8ksgTEmYhHgUFhgdd0ZjxHlidDo3lXNEEkb1dCW8d8tcIyTLs/ktT0pHVOYtMORf"
        ],
        "X-MS-TrafficTypeDiagnostic": "DM6PR07MB4906:",
        "X-Microsoft-Antispam-PRVS": "<DM6PR07MB4906FC2406E3154B8FC9B389F87B0@DM6PR07MB4906.namprd07.prod.outlook.com>",
        "X-Exchange-Antispam-Report-Test": "UriScan:;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-Exchange-Antispam-Report-CFA-Test": "BCL:0; PCL:0;\n\tRULEID:(8211001083)(6040522)(2401047)(5005006)(8121501046)(93006095)(3231254)(944501410)(52105095)(10201501046)(3002001)(149027)(150027)(6041310)(201703131423095)(201702281528075)(20161123555045)(201703061421075)(201703061406153)(20161123564045)(20161123560045)(20161123562045)(20161123558120)(6072148)(201708071742011)(7699016);\n\tSRVR:DM6PR07MB4906; BCL:0; PCL:0; RULEID:; SRVR:DM6PR07MB4906; ",
        "X-Forefront-PRVS": "06973FFAD3",
        "X-Forefront-Antispam-Report": "SFV:NSPM;\n\tSFS:(10009020)(39860400002)(346002)(366004)(376002)(396003)(39380400002)(189003)(199004)(186003)(16526019)(5660300001)(26005)(42882007)(55236004)(66066001)(47776003)(68736007)(6666003)(305945005)(2906002)(8676002)(7736002)(8936002)(36756003)(52116002)(2616005)(956004)(476003)(446003)(50226002)(575784001)(81166006)(11346002)(44832011)(6506007)(386003)(486006)(59450400001)(76176011)(51416003)(48376002)(50466002)(97736004)(316002)(6512007)(25786009)(53936002)(6486002)(81156014)(4326008)(3846002)(6116002)(16586007)(54906003)(110136005)(105586002)(106356001)(72206003)(478600001)(8656006);\n\tDIR:OUT; SFP:1101; SCL:1; SRVR:DM6PR07MB4906;\n\tH:ajoseph83.caveonetworks.com.caveonetworks.com; FPR:; SPF:None;\n\tLANG:en; PTR:InfoNoRecords; MX:1; A:1; ",
        "Received-SPF": "None (protection.outlook.com: cavium.com does not designate\n\tpermitted sender hosts)",
        "X-Microsoft-Antispam-Message-Info": "hTmdTa+D/EmwbVUY1Xf8y+E2tDwdvPBsHtOvMwyHrWtFRB4cpXIhZZE6cNkcTTwUPFYHyTNln3UwqhfwVO0PKQzzVWGqtmyq0bWLbBtRP+0Zx3hGSBiL3sTqotjLPFbsUTL5okIHek0XCqIbUZ5zrnpc/VmwOe4MKDW8RSl2F0N5Nmcic/pmuI2ExbadSRPj",
        "SpamDiagnosticOutput": "1:99",
        "SpamDiagnosticMetadata": "NSPM",
        "X-MS-Office365-Filtering-Correlation-Id": "29ce72b9-9114-4e3b-a7ba-08d5cd64ed65",
        "X-OriginatorOrg": "caviumnetworks.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "08 Jun 2018 17:26:07.8110\n\t(UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "29ce72b9-9114-4e3b-a7ba-08d5cd64ed65",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "Hosted",
        "X-MS-Exchange-CrossTenant-Id": "711e4ccf-2e9b-4bcf-a551-4094005b6194",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "DM6PR07MB4906",
        "Subject": "[dpdk-dev] [PATCH 20/20] examples/l2fwd: add eventmode for l2fwd",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Adding eventmode support in l2fwd. This uses rte_eventmode_helper APIs\nto setup and use the eventmode capabilties.\n\nSigned-off-by: Anoob Joseph <anoob.joseph@caviumnetworks.com>\n---\n examples/l2fwd/l2fwd_worker.c | 815 +++++++++++++++++++++++++++++++++++++++++-\n examples/l2fwd/main.c         |  64 +++-\n 2 files changed, 864 insertions(+), 15 deletions(-)",
    "diff": "diff --git a/examples/l2fwd/l2fwd_worker.c b/examples/l2fwd/l2fwd_worker.c\nindex 56e0bdb..bc63b31 100644\n--- a/examples/l2fwd/l2fwd_worker.c\n+++ b/examples/l2fwd/l2fwd_worker.c\n@@ -25,6 +25,9 @@\n #include <rte_branch_prediction.h>\n #include <rte_ether.h>\n #include <rte_ethdev.h>\n+#include <rte_eventdev.h>\n+#include <rte_eventmode_helper.h>\n+#include <rte_malloc.h>\n #include <rte_mbuf.h>\n \n #include \"l2fwd_common.h\"\n@@ -138,6 +141,16 @@ l2fwd_periodic_drain_stats_monitor(struct lcore_queue_conf *qconf,\n \t}\n }\n \n+static inline void\n+l2fwd_drain_loop(struct lcore_queue_conf *qconf, struct tsc_tracker *t,\n+\t\tint is_master_core)\n+{\n+\twhile (!force_quit) {\n+\t\t/* Do periodic operations (buffer drain & stats monitor) */\n+\t\tl2fwd_periodic_drain_stats_monitor(qconf, t, is_master_core);\n+\t}\n+}\n+\n static void\n l2fwd_mac_updating(struct rte_mbuf *m, unsigned dest_portid)\n {\n@@ -180,9 +193,45 @@ l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)\n \tl2fwd_send_pkt(m, dst_port);\n }\n \n-/* main processing loop */\n+static inline void\n+l2fwd_send_single_pkt(struct rte_mbuf *m)\n+{\n+\tl2fwd_send_pkt(m, m->port);\n+}\n+\n+static inline void\n+l2fwd_event_pre_forward(struct rte_event *ev, unsigned portid)\n+{\n+\tunsigned dst_port;\n+\tstruct rte_mbuf *m;\n+\n+\t/* Get the mbuf */\n+\tm = ev->mbuf;\n+\n+\t/* Get the destination port from the tables */\n+\tdst_port = l2fwd_dst_ports[portid];\n+\n+\t/* Save the destination port in the mbuf */\n+\tm->port = dst_port;\n+\n+\t/* Perform work */\n+\tif (mac_updating)\n+\t\tl2fwd_mac_updating(m, dst_port);\n+}\n+\n+static inline void\n+l2fwd_event_switch_to_atomic(struct rte_event *ev, uint8_t atomic_queue_id)\n+{\n+\tev->event_type = RTE_EVENT_TYPE_CPU;\n+\tev->op = RTE_EVENT_OP_FORWARD;\n+\tev->sched_type = RTE_SCHED_TYPE_ATOMIC;\n+\tev->queue_id = atomic_queue_id;\n+}\n+\n+\n+/* poll mode processing loop */\n static void\n-l2fwd_main_loop(void)\n+l2fwd_poll_mode_worker(void)\n {\n \tstruct rte_mbuf *pkts_burst[MAX_PKT_BURST];\n \tstruct rte_mbuf *m;\n@@ -241,9 +290,767 @@ l2fwd_main_loop(void)\n \t}\n }\n \n+/*\n+ * Event mode exposes various operating modes depending on the\n+ * capabilities of the event device and the operating mode\n+ * selected.\n+ */\n+\n+/* Workers registered */\n+#define L2FWD_EVENTMODE_WORKERS\t\t4\n+\n+/*\n+ * Event mode worker\n+ * Operating mode : Single stage non-burst with atomic scheduling\n+ */\n+static void\n+l2fwd_eventmode_non_burst_atomic_worker(void *args)\n+{\n+\tstruct rte_event ev;\n+\tstruct rte_mbuf *pkt;\n+\tstruct rte_eventmode_helper_conf *mode_conf;\n+\tstruct rte_eventmode_helper_event_link_info *links = NULL;\n+\tunsigned lcore_nb_link = 0;\n+\tuint32_t lcore_id;\n+\tunsigned i, nb_rx = 0;\n+\tunsigned portid;\n+\tstruct lcore_queue_conf *qconf;\n+\tint is_master_core;\n+\tstruct tsc_tracker tsc = {0};\n+\n+\t/* Get core ID */\n+\tlcore_id = rte_lcore_id();\n+\n+\tRTE_LOG(INFO, L2FWD,\n+\t\t\"Launching event mode single stage non-burst woker with \"\n+\t\t\"atomic scheduling on lcore %d\\n\", lcore_id);\n+\n+\t/* Set the flag if master core */\n+\tis_master_core = (lcore_id == rte_get_master_lcore()) ? 1 : 0;\n+\n+\t/* Get qconf for this core */\n+\tqconf = &lcore_queue_conf[lcore_id];\n+\n+\t/* Set drain tsc */\n+\ttsc.drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /\n+\t\t\tUS_PER_S * BURST_TX_DRAIN_US;\n+\n+\t/* Mode conf will be passed as args */\n+\tmode_conf = (struct rte_eventmode_helper_conf *)args;\n+\n+\t/* Get the links configured for this lcore */\n+\tlcore_nb_link = rte_eventmode_helper_get_event_lcore_links(lcore_id,\n+\t\t\tmode_conf, &links);\n+\n+\t/* Check if we have links registered for this lcore */\n+\tif (lcore_nb_link == 0) {\n+\t\t/* No links registered. The core could do periodic drains */\n+\t\tl2fwd_drain_loop(qconf, &tsc, is_master_core);\n+\t\tgoto clean_and_exit;\n+\t}\n+\n+\t/* We have valid links */\n+\n+\t/* See if it's single link */\n+\tif (lcore_nb_link == 1)\n+\t\tgoto single_link_loop;\n+\telse\n+\t\tgoto multi_link_loop;\n+\n+single_link_loop:\n+\n+\tRTE_LOG(INFO, L2FWD, \" -- lcoreid=%u event_port_id=%u\\n\", lcore_id,\n+\t\t\tlinks[0].event_portid);\n+\n+\twhile (!force_quit) {\n+\n+\t\t/* Do periodic operations (buffer drain & stats monitor) */\n+\t\tl2fwd_periodic_drain_stats_monitor(qconf, &tsc, is_master_core);\n+\n+\t\t/* Read packet from event queues */\n+\t\tnb_rx = rte_event_dequeue_burst(links[0].eventdev_id,\n+\t\t\t\tlinks[0].event_portid,\n+\t\t\t\t&ev,     /* events */\n+\t\t\t\t1,       /* nb_events */\n+\t\t\t\t0        /* timeout_ticks */);\n+\n+\t\tif (nb_rx == 0)\n+\t\t\tcontinue;\n+\n+\t\tportid = ev.queue_id;\n+\t\tport_statistics[portid].rx++;\n+\t\tpkt = ev.mbuf;\n+\n+\t\trte_prefetch0(rte_pktmbuf_mtod(pkt, void *));\n+\t\tl2fwd_simple_forward(pkt, portid);\n+\t}\n+\tgoto clean_and_exit;\n+\n+multi_link_loop:\n+\n+\tfor (i = 0; i < lcore_nb_link; i++) {\n+\t\tRTE_LOG(INFO, L2FWD, \" -- lcoreid=%u event_port_id=%u\\n\",\n+\t\t\t\tlcore_id, links[i].event_portid);\n+\t}\n+\n+\twhile (!force_quit) {\n+\n+\t\t/* Do periodic operations (buffer drain & stats monitor) */\n+\t\tl2fwd_periodic_drain_stats_monitor(qconf, &tsc, is_master_core);\n+\n+\t\tfor (i = 0; i < lcore_nb_link; i++) {\n+\t\t\t/* Read packet from event queues */\n+\t\t\tnb_rx = rte_event_dequeue_burst(links[i].eventdev_id,\n+\t\t\t\t\tlinks[i].event_portid,\n+\t\t\t\t\t&ev,     /* events */\n+\t\t\t\t\t1,       /* nb_events */\n+\t\t\t\t\t0        /* timeout_ticks */);\n+\n+\t\t\tif (nb_rx == 0)\n+\t\t\t\tcontinue;\n+\n+\t\t\tportid = ev.queue_id;\n+\t\t\tport_statistics[portid].rx++;\n+\t\t\tpkt = ev.mbuf;\n+\n+\t\t\trte_prefetch0(rte_pktmbuf_mtod(pkt, void *));\n+\t\t\tl2fwd_simple_forward(pkt, portid);\n+\t\t}\n+\t}\n+\tgoto clean_and_exit;\n+\n+clean_and_exit:\n+\tif (links != NULL)\n+\t\trte_free(links);\n+}\n+\n+/*\n+ * Event mode worker\n+ * Operating mode : Single stage burst with atomic scheduling\n+ */\n+static void\n+l2fwd_eventmode_burst_atomic_worker(void *args)\n+{\n+\tstruct rte_event ev[MAX_PKT_BURST];\n+\tstruct rte_mbuf *pkt;\n+\tstruct rte_eventmode_helper_conf *mode_conf;\n+\tstruct rte_eventmode_helper_event_link_info *links = NULL;\n+\tunsigned lcore_nb_link = 0;\n+\tuint32_t lcore_id;\n+\tunsigned i, j, nb_rx = 0;\n+\tunsigned portid;\n+\tstruct lcore_queue_conf *qconf;\n+\tint is_master_core;\n+\tstruct rte_event_port_conf event_port_conf;\n+\tuint16_t dequeue_len = 0;\n+\tstruct tsc_tracker tsc = {0};\n+\n+\t/* Get core ID */\n+\tlcore_id = rte_lcore_id();\n+\n+\tRTE_LOG(INFO, L2FWD,\n+\t\t\"Launching event mode single stage burst woker with \"\n+\t\t\"atomic scheduling on lcore %d\\n\", lcore_id);\n+\n+\t/* Set the flag if master core */\n+\tis_master_core = (lcore_id == rte_get_master_lcore()) ? 1 : 0;\n+\n+\t/* Get qconf for this core */\n+\tqconf = &lcore_queue_conf[lcore_id];\n+\n+\t/* Set drain tsc */\n+\ttsc.drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /\n+\t\t\tUS_PER_S * BURST_TX_DRAIN_US;\n+\n+\t/* Mode conf will be passed as args */\n+\tmode_conf = (struct rte_eventmode_helper_conf *)args;\n+\n+\t/* Get the links configured for this lcore */\n+\tlcore_nb_link = rte_eventmode_helper_get_event_lcore_links(lcore_id,\n+\t\t\tmode_conf, &links);\n+\n+\t/* Check if we have links registered for this lcore */\n+\tif (lcore_nb_link == 0) {\n+\t\t/* No links registered. The core could do periodic drains */\n+\t\tl2fwd_drain_loop(qconf, &tsc, is_master_core);\n+\t\tgoto clean_and_exit;\n+\t}\n+\n+\t/* We have valid links */\n+\n+\t/* Get the burst size of the event device */\n+\n+\t/* Get the default conf of the first link */\n+\trte_event_port_default_conf_get(links[0].eventdev_id,\n+\t\t\tlinks[0].event_portid,\n+\t\t\t&event_port_conf);\n+\n+\t/* Save the burst size */\n+\tdequeue_len = event_port_conf.dequeue_depth;\n+\n+\t/* Dequeue len should not exceed MAX_PKT_BURST */\n+\tif (dequeue_len > MAX_PKT_BURST)\n+\t\tdequeue_len = MAX_PKT_BURST;\n+\n+\t/* See if it's single link */\n+\tif (lcore_nb_link == 1)\n+\t\tgoto single_link_loop;\n+\telse\n+\t\tgoto multi_link_loop;\n+\n+single_link_loop:\n+\n+\tRTE_LOG(INFO, L2FWD, \" -- lcoreid=%u event_port_id=%u\\n\", lcore_id,\n+\t\t\tlinks[0].event_portid);\n+\n+\twhile (!force_quit) {\n+\n+\t\t/* Do periodic operations (buffer drain & stats monitor) */\n+\t\tl2fwd_periodic_drain_stats_monitor(qconf, &tsc, is_master_core);\n+\n+\t\t/* Read packet from event queues */\n+\t\tnb_rx = rte_event_dequeue_burst(links[0].eventdev_id,\n+\t\t\t\tlinks[0].event_portid,\n+\t\t\t\tev,             /* events */\n+\t\t\t\tdequeue_len,    /* nb_events */\n+\t\t\t\t0               /* timeout_ticks */);\n+\n+\t\tif (nb_rx == 0)\n+\t\t\tcontinue;\n+\n+\t\tfor (j = 0; j < nb_rx; j++) {\n+\t\t\tportid = ev[j].queue_id;\n+\t\t\tport_statistics[portid].rx++;\n+\t\t\tpkt = ev[j].mbuf;\n+\n+\t\t\trte_prefetch0(rte_pktmbuf_mtod(pkt, void *));\n+\t\t\tl2fwd_simple_forward(pkt, portid);\n+\t\t}\n+\t}\n+\tgoto clean_and_exit;\n+\n+multi_link_loop:\n+\n+\tfor (i = 0; i < lcore_nb_link; i++) {\n+\t\tRTE_LOG(INFO, L2FWD, \" -- lcoreid=%u event_port_id=%u\\n\",\n+\t\t\t\tlcore_id, links[i].event_portid);\n+\t}\n+\n+\twhile (!force_quit) {\n+\n+\t\t/* Do periodic operations (buffer drain & stats monitor) */\n+\t\tl2fwd_periodic_drain_stats_monitor(qconf, &tsc, is_master_core);\n+\n+\t\tfor (i = 0; i < lcore_nb_link; i++) {\n+\t\t\t/* Read packet from event queues */\n+\t\t\tnb_rx = rte_event_dequeue_burst(links[i].eventdev_id,\n+\t\t\t\t\tlinks[i].event_portid,\n+\t\t\t\t\tev,             /* events */\n+\t\t\t\t\tdequeue_len,    /* nb_events */\n+\t\t\t\t\t0               /* timeout_ticks */);\n+\n+\t\t\tif (nb_rx == 0)\n+\t\t\t\tcontinue;\n+\n+\t\t\tfor (j = 0; j < nb_rx; j++) {\n+\t\t\t\tportid = ev[j].queue_id;\n+\t\t\t\tport_statistics[portid].rx++;\n+\t\t\t\tpkt = ev[j].mbuf;\n+\n+\t\t\t\trte_prefetch0(rte_pktmbuf_mtod(pkt, void *));\n+\t\t\t\tl2fwd_simple_forward(pkt, portid);\n+\t\t\t}\n+\t\t}\n+\t}\n+\tgoto clean_and_exit;\n+\n+clean_and_exit:\n+\tif (links != NULL)\n+\t\trte_free(links);\n+}\n+\n+/*\n+ * Event mode worker\n+ * Operating mode : Single stage non-burst with ordered scheduling\n+ */\n+static void\n+l2fwd_eventmode_non_burst_ordered_worker(void *args)\n+{\n+\tstruct rte_event ev;\n+\tstruct rte_mbuf *pkt;\n+\tstruct rte_eventmode_helper_conf *mode_conf;\n+\tstruct rte_eventmode_helper_event_link_info *links = NULL;\n+\tunsigned lcore_nb_link = 0;\n+\tuint32_t lcore_id;\n+\tunsigned i, nb_rx = 0;\n+\tunsigned portid;\n+\tstruct lcore_queue_conf *qconf;\n+\tint is_master_core;\n+\tuint8_t tx_queue;\n+\tuint8_t eventdev_id;\n+\tstruct tsc_tracker tsc = {0};\n+\n+\t/* Get core ID */\n+\tlcore_id = rte_lcore_id();\n+\n+\tRTE_LOG(INFO, L2FWD,\n+\t\t\"Launching event mode single stage non-burst woker with \"\n+\t\t\"ordered scheduling on lcore %d\\n\", lcore_id);\n+\n+\t/* Set the flag if master core */\n+\tis_master_core = (lcore_id == rte_get_master_lcore()) ? 1 : 0;\n+\n+\t/* Get qconf for this core */\n+\tqconf = &lcore_queue_conf[lcore_id];\n+\n+\t/* Set drain tsc */\n+\ttsc.drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /\n+\t\t\tUS_PER_S * BURST_TX_DRAIN_US;\n+\n+\t/* Mode conf will be passed as args */\n+\tmode_conf = (struct rte_eventmode_helper_conf *)args;\n+\n+\t/* Get the links configured for this lcore */\n+\tlcore_nb_link = rte_eventmode_helper_get_event_lcore_links(lcore_id,\n+\t\t\tmode_conf, &links);\n+\n+\t/* Check if we have links registered for this lcore */\n+\tif (lcore_nb_link == 0) {\n+\t\t/* No links registered. The core could do periodic drains */\n+\t\tl2fwd_drain_loop(qconf, &tsc, is_master_core);\n+\t\tgoto clean_and_exit;\n+\t}\n+\n+\t/* We have valid links */\n+\n+\t/*\n+\t * When the stage 1 is set to have scheduling ORDERED, the event need\n+\t * to change the scheduling type to ATOMIC before it can be send out.\n+\t * This would ensure that the packets are send out in the same order\n+\t * as it came.\n+\t */\n+\n+\t/*\n+\t * The helper function would create a queue with ATOMIC scheduling\n+\t * for this purpose. Worker would submit packets to that queue if the\n+\t * event is not coming from an ATOMIC queue.\n+\t */\n+\n+\t/* Get event dev ID from the first link */\n+\teventdev_id = links[0].eventdev_id;\n+\n+\t/*\n+\t * One queue would be reserved to be used as atomic queue for the last\n+\t * stage (eth packet tx stage)\n+\t */\n+\ttx_queue = rte_eventmode_helper_get_tx_queue(mode_conf, eventdev_id);\n+\n+\t/* See if it's single link */\n+\tif (lcore_nb_link == 1)\n+\t\tgoto single_link_loop;\n+\telse\n+\t\tgoto multi_link_loop;\n+\n+single_link_loop:\n+\n+\tRTE_LOG(INFO, L2FWD, \" -- lcoreid=%u event_port_id=%u\\n\", lcore_id,\n+\t\t\tlinks[0].event_portid);\n+\n+\twhile (!force_quit) {\n+\n+\t\t/* Do periodic operations (buffer drain & stats monitor) */\n+\t\tl2fwd_periodic_drain_stats_monitor(qconf, &tsc, is_master_core);\n+\n+\t\t/* Read packet from event queues */\n+\t\tnb_rx = rte_event_dequeue_burst(links[0].eventdev_id,\n+\t\t\t\tlinks[0].event_portid,\n+\t\t\t\t&ev,     /* events */\n+\t\t\t\t1,       /* nb_events */\n+\t\t\t\t0        /* timeout_ticks */);\n+\n+\t\tif (nb_rx == 0)\n+\t\t\tcontinue;\n+\n+\t\t/*\n+\t\t * Check if this event came on atomic queue. If yes, do eth tx\n+\t\t */\n+\t\tif (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {\n+\t\t\tl2fwd_send_single_pkt(ev.mbuf);\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\t/* Else, we have a fresh packet */\n+\t\tportid = ev.queue_id;\n+\t\tport_statistics[portid].rx++;\n+\t\tpkt = ev.mbuf;\n+\n+\t\trte_prefetch0(rte_pktmbuf_mtod(pkt, void *));\n+\n+\t\t/* Process packet */\n+\t\tl2fwd_event_pre_forward(&ev, portid);\n+\n+\t\t/* Update the scheduling type for tx stage */\n+\t\tl2fwd_event_switch_to_atomic(&ev, tx_queue);\n+\n+\t\t/* Submit the updated event for tx stage */\n+\t\trte_event_enqueue_burst(links[0].eventdev_id,\n+\t\t\t\tlinks[0].event_portid,\n+\t\t\t\t&ev,    /* events */\n+\t\t\t\t1       /* nb_events */);\n+\t}\n+\tgoto clean_and_exit;\n+\n+multi_link_loop:\n+\n+\tfor (i = 0; i < lcore_nb_link; i++) {\n+\t\tRTE_LOG(INFO, L2FWD, \" -- lcoreid=%u event_port_id=%u\\n\",\n+\t\t\t\tlcore_id, links[i].event_portid);\n+\t}\n+\n+\twhile (!force_quit) {\n+\n+\t\t/* Do periodic operations (buffer drain & stats monitor) */\n+\t\tl2fwd_periodic_drain_stats_monitor(qconf, &tsc, is_master_core);\n+\n+\t\tfor (i = 0; i < lcore_nb_link; i++) {\n+\t\t\t/* Read packet from event queues */\n+\t\t\tnb_rx = rte_event_dequeue_burst(links[i].eventdev_id,\n+\t\t\t\t\tlinks[i].event_portid,\n+\t\t\t\t\t&ev,     /* events */\n+\t\t\t\t\t1,       /* nb_events */\n+\t\t\t\t\t0        /* timeout_ticks */);\n+\n+\t\t\tif (nb_rx == 0)\n+\t\t\t\tcontinue;\n+\n+\t\t\t/*\n+\t\t\t * Check if this event came on atomic queue.\n+\t\t\t * If yes, do eth tx\n+\t\t\t */\n+\t\t\tif (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {\n+\t\t\t\tl2fwd_send_single_pkt(ev.mbuf);\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\n+\t\t\t/* Else, we have a fresh packet */\n+\t\t\tportid = ev.queue_id;\n+\t\t\tport_statistics[portid].rx++;\n+\t\t\tpkt = ev.mbuf;\n+\n+\t\t\trte_prefetch0(rte_pktmbuf_mtod(pkt, void *));\n+\n+\t\t\t/* Process packet */\n+\t\t\tl2fwd_event_pre_forward(&ev, portid);\n+\n+\t\t\t/* Update the scheduling type for tx stage */\n+\t\t\tl2fwd_event_switch_to_atomic(&ev, tx_queue);\n+\n+\t\t\t/* Submit the updated event for tx stage */\n+\t\t\trte_event_enqueue_burst(links[i].eventdev_id,\n+\t\t\t\t\tlinks[i].event_portid,\n+\t\t\t\t\t&ev,    /* events */\n+\t\t\t\t\t1       /* nb_events */);\n+\t\t}\n+\t}\n+\tgoto clean_and_exit;\n+\n+clean_and_exit:\n+\tif (links != NULL)\n+\t\trte_free(links);\n+}\n+\n+/*\n+ * Event mode worker\n+ * Operating mode : Single stage burst with ordered scheduling\n+ */\n+static void\n+l2fwd_eventmode_burst_ordered_worker(void *args)\n+{\n+\tstruct rte_event ev[MAX_PKT_BURST];\n+\tstruct rte_mbuf *pkt;\n+\tstruct rte_eventmode_helper_conf *mode_conf;\n+\tstruct rte_eventmode_helper_event_link_info *links = NULL;\n+\tunsigned lcore_nb_link = 0;\n+\tuint32_t lcore_id;\n+\tunsigned i, j, nb_rx = 0;\n+\tunsigned portid;\n+\tstruct lcore_queue_conf *qconf;\n+\tint is_master_core;\n+\tstruct rte_event_port_conf event_port_conf;\n+\tuint16_t dequeue_len = 0;\n+\tuint8_t tx_queue;\n+\tuint8_t eventdev_id;\n+\tstruct tsc_tracker tsc = {0};\n+\n+\t/* Get core ID */\n+\tlcore_id = rte_lcore_id();\n+\n+\tRTE_LOG(INFO, L2FWD,\n+\t\t\"Launching event mode single stage burst woker with \"\n+\t\t\"ordered scheduling on lcore %d\\n\", lcore_id);\n+\n+\t/* Set the flag if master core */\n+\tis_master_core = (lcore_id == rte_get_master_lcore()) ? 1 : 0;\n+\n+\t/* Get qconf for this core */\n+\tqconf = &lcore_queue_conf[lcore_id];\n+\n+\t/* Set drain tsc */\n+\ttsc.drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /\n+\t\t\tUS_PER_S * BURST_TX_DRAIN_US;\n+\n+\t/* Mode conf will be passed as args */\n+\tmode_conf = (struct rte_eventmode_helper_conf *)args;\n+\n+\t/* Get the links configured for this lcore */\n+\tlcore_nb_link = rte_eventmode_helper_get_event_lcore_links(lcore_id,\n+\t\t\tmode_conf, &links);\n+\n+\t/* Check if we have links registered for this lcore */\n+\tif (lcore_nb_link == 0) {\n+\t\t/* No links registered. The core could do periodic drains */\n+\t\tl2fwd_drain_loop(qconf, &tsc, is_master_core);\n+\t\tgoto clean_and_exit;\n+\t}\n+\n+\t/* We have valid links */\n+\n+\t/*\n+\t * When the stage 1 is set to have scheduling ORDERED, the event need\n+\t * to change the scheduling type to ATOMIC before it can be send out.\n+\t * This would ensure that the packets are send out in the same order\n+\t * as it came.\n+\t */\n+\n+\t/*\n+\t * The helper function would create a queue with ATOMIC scheduling\n+\t * for this purpose. Worker would submit packets to that queue if the\n+\t * event is not coming from an ATOMIC queue.\n+\t */\n+\n+\t/* Get event dev ID from the first link */\n+\teventdev_id = links[0].eventdev_id;\n+\n+\t/*\n+\t * One queue would be reserved to be used as atomic queue for the last\n+\t * stage (eth packet tx stage)\n+\t */\n+\ttx_queue = rte_eventmode_helper_get_tx_queue(mode_conf, eventdev_id);\n+\n+\t/* Get the burst size of the event device */\n+\n+\t/* Get the default conf of the first link */\n+\trte_event_port_default_conf_get(links[0].eventdev_id,\n+\t\t\tlinks[0].event_portid,\n+\t\t\t&event_port_conf);\n+\n+\t/* Save the burst size */\n+\tdequeue_len = event_port_conf.dequeue_depth;\n+\n+\t/* Dequeue len should not exceed MAX_PKT_BURST */\n+\tif (dequeue_len > MAX_PKT_BURST)\n+\t\tdequeue_len = MAX_PKT_BURST;\n+\n+\t/* See if it's single link */\n+\tif (lcore_nb_link == 1)\n+\t\tgoto single_link_loop;\n+\telse\n+\t\tgoto multi_link_loop;\n+\n+single_link_loop:\n+\n+\tRTE_LOG(INFO, L2FWD, \" -- lcoreid=%u event_port_id=%u\\n\", lcore_id,\n+\t\t\tlinks[0].event_portid);\n+\n+\twhile (!force_quit) {\n+\n+\t\t/* Do periodic operations (buffer drain & stats monitor) */\n+\t\tl2fwd_periodic_drain_stats_monitor(qconf, &tsc, is_master_core);\n+\n+\t\t/* Read packet from event queues */\n+\t\tnb_rx = rte_event_dequeue_burst(links[0].eventdev_id,\n+\t\t\t\tlinks[0].event_portid,\n+\t\t\t\tev,             /* events */\n+\t\t\t\tdequeue_len,    /* nb_events */\n+\t\t\t\t0               /* timeout_ticks */);\n+\n+\t\tif (nb_rx == 0)\n+\t\t\tcontinue;\n+\n+\t\tfor (j = 0; j < nb_rx; j++) {\n+\t\t\t/*\n+\t\t\t * Check if this event came on atomic queue.\n+\t\t\t * If yes, do eth tx\n+\t\t\t */\n+\t\t\tif (ev[j].sched_type == RTE_SCHED_TYPE_ATOMIC) {\n+\t\t\t\tl2fwd_send_single_pkt(ev[j].mbuf);\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\n+\t\t\t/* Else, we have a fresh packet */\n+\t\t\tportid = ev[j].queue_id;\n+\t\t\tport_statistics[portid].rx++;\n+\t\t\tpkt = ev[j].mbuf;\n+\n+\t\t\trte_prefetch0(rte_pktmbuf_mtod(pkt, void *));\n+\n+\t\t\t/* Process packet */\n+\t\t\tl2fwd_event_pre_forward(&(ev[j]), portid);\n+\n+\t\t\t/* Update the scheduling type for tx stage */\n+\t\t\tl2fwd_event_switch_to_atomic(&(ev[j]), tx_queue);\n+\n+\t\t\t/* Submit the updated event for tx stage */\n+\t\t\trte_event_enqueue_burst(links[0].eventdev_id,\n+\t\t\t\t\tlinks[0].event_portid,\n+\t\t\t\t\t&(ev[j]),       /* events */\n+\t\t\t\t\t1               /* nb_events */);\n+\t\t}\n+\t}\n+\tgoto clean_and_exit;\n+\n+multi_link_loop:\n+\n+\tfor (i = 0; i < lcore_nb_link; i++) {\n+\t\tRTE_LOG(INFO, L2FWD, \" -- lcoreid=%u event_port_id=%u\\n\",\n+\t\t\t\tlcore_id, links[i].event_portid);\n+\t}\n+\n+\twhile (!force_quit) {\n+\n+\t\t/* Do periodic operations (buffer drain & stats monitor) */\n+\t\tl2fwd_periodic_drain_stats_monitor(qconf, &tsc, is_master_core);\n+\n+\t\tfor (i = 0; i < lcore_nb_link; i++) {\n+\t\t\t/* Read packet from event queues */\n+\t\t\tnb_rx = rte_event_dequeue_burst(links[i].eventdev_id,\n+\t\t\t\t\tlinks[i].event_portid,\n+\t\t\t\t\tev,             /* events */\n+\t\t\t\t\tdequeue_len,    /* nb_events */\n+\t\t\t\t\t0               /* timeout_ticks */);\n+\n+\t\t\tif (nb_rx == 0)\n+\t\t\t\tcontinue;\n+\n+\t\t\tfor (j = 0; j < nb_rx; j++) {\n+\t\t\t\t/*\n+\t\t\t\t * Check if this event came on atomic queue.\n+\t\t\t\t * If yes, do eth tx\n+\t\t\t\t */\n+\t\t\t\tif (ev[j].sched_type == RTE_SCHED_TYPE_ATOMIC) {\n+\t\t\t\t\tl2fwd_send_single_pkt(ev[j].mbuf);\n+\t\t\t\t\tcontinue;\n+\t\t\t\t}\n+\n+\t\t\t\t/* Else, we have a fresh packet */\n+\t\t\t\tportid = ev[j].queue_id;\n+\t\t\t\tport_statistics[portid].rx++;\n+\t\t\t\tpkt = ev[j].mbuf;\n+\n+\t\t\t\trte_prefetch0(rte_pktmbuf_mtod(pkt, void *));\n+\n+\t\t\t\t/* Process packet */\n+\t\t\t\tl2fwd_event_pre_forward(&(ev[j]), portid);\n+\n+\t\t\t\t/* Update the scheduling type for tx stage */\n+\t\t\t\tl2fwd_event_switch_to_atomic(&(ev[j]),\n+\t\t\t\t\t\ttx_queue);\n+\n+\t\t\t\t/* Submit the updated event for tx stage */\n+\t\t\t\trte_event_enqueue_burst(links[i].eventdev_id,\n+\t\t\t\t\t\tlinks[i].event_portid,\n+\t\t\t\t\t\t&(ev[j]), /* events */\n+\t\t\t\t\t\t1         /* nb_events */);\n+\t\t\t}\n+\t\t}\n+\t}\n+\tgoto clean_and_exit;\n+\n+clean_and_exit:\n+\tif (links != NULL)\n+\t\trte_free(links);\n+}\n+\n+static uint8_t\n+l2fwd_eventmode_populate_wrkr_params(\n+\t\tstruct rte_eventmode_helper_app_worker_params *wrkrs)\n+{\n+\tuint8_t nb_wrkr_param = 0;\n+\tstruct rte_eventmode_helper_app_worker_params *wrkr;\n+\n+\t/* Save workers */\n+\n+\twrkr = wrkrs;\n+\n+\t/* Single stage non-burst with atomic scheduling */\n+\twrkr->cap.burst = RTE_EVENTMODE_HELPER_RX_TYPE_NON_BURST;\n+\twrkr->cap.s1_sched_type = RTE_SCHED_TYPE_ATOMIC;\n+\twrkr->nb_stage = 1;\n+\twrkr->s1_worker_thread = l2fwd_eventmode_non_burst_atomic_worker;\n+\n+\tnb_wrkr_param++;\n+\twrkr++;\n+\n+\t/* Single stage burst with atomic scheduling */\n+\twrkr->cap.burst = RTE_EVENTMODE_HELPER_RX_TYPE_BURST;\n+\twrkr->cap.s1_sched_type = RTE_SCHED_TYPE_ATOMIC;\n+\twrkr->nb_stage = 1;\n+\twrkr->s1_worker_thread = l2fwd_eventmode_burst_atomic_worker;\n+\n+\tnb_wrkr_param++;\n+\twrkr++;\n+\n+\t/* Single stage non-burst with ordered scheduling */\n+\twrkr->cap.burst = RTE_EVENTMODE_HELPER_RX_TYPE_NON_BURST;\n+\twrkr->cap.s1_sched_type = RTE_SCHED_TYPE_ORDERED;\n+\twrkr->nb_stage = 1;\n+\twrkr->s1_worker_thread = l2fwd_eventmode_non_burst_ordered_worker;\n+\n+\tnb_wrkr_param++;\n+\twrkr++;\n+\n+\t/* Single stage burst with ordered scheduling */\n+\twrkr->cap.burst = RTE_EVENTMODE_HELPER_RX_TYPE_BURST;\n+\twrkr->cap.s1_sched_type = RTE_SCHED_TYPE_ORDERED;\n+\twrkr->nb_stage = 1;\n+\twrkr->s1_worker_thread = l2fwd_eventmode_burst_ordered_worker;\n+\n+\tnb_wrkr_param++;\n+\treturn nb_wrkr_param;\n+}\n+\n+static void\n+l2fwd_eventmode_worker(struct rte_eventmode_helper_conf *mode_conf)\n+{\n+\tstruct rte_eventmode_helper_app_worker_params\n+\t\t\tl2fwd_wrkr[L2FWD_EVENTMODE_WORKERS] = {0};\n+\tuint8_t nb_wrkr_param;\n+\n+\t/* Populate l2fwd_wrkr params */\n+\tnb_wrkr_param = l2fwd_eventmode_populate_wrkr_params(l2fwd_wrkr);\n+\n+\t/*\n+\t * The helper function will launch the correct worker after checking the\n+\t * event device's capabilities.\n+\t */\n+\trte_eventmode_helper_launch_worker(mode_conf, l2fwd_wrkr,\n+\t\t\tnb_wrkr_param);\n+}\n+\n int\n-l2fwd_launch_one_lcore(__attribute__((unused)) void *dummy)\n+l2fwd_launch_one_lcore(void *args)\n {\n-\tl2fwd_main_loop();\n+\tstruct rte_eventmode_helper_conf *mode_conf;\n+\n+\tmode_conf = (struct rte_eventmode_helper_conf *)args;\n+\n+\tif (mode_conf->mode == RTE_EVENTMODE_HELPER_PKT_TRANSFER_MODE_POLL) {\n+\t\t/* App is initialized to run in poll mode */\n+\t\tl2fwd_poll_mode_worker();\n+\t} else if (mode_conf->mode ==\n+\t\t\tRTE_EVENTMODE_HELPER_PKT_TRANSFER_MODE_EVENT) {\n+\t\t/* App is initialized to run in event mode */\n+\t\tl2fwd_eventmode_worker(mode_conf);\n+\t}\n \treturn 0;\n }\ndiff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c\nindex ac81beb..278b9a8 100644\n--- a/examples/l2fwd/main.c\n+++ b/examples/l2fwd/main.c\n@@ -38,6 +38,7 @@\n #include <rte_ethdev.h>\n #include <rte_mempool.h>\n #include <rte_mbuf.h>\n+#include <rte_eventmode_helper.h>\n \n #include \"l2fwd_common.h\"\n #include \"l2fwd_worker.h\"\n@@ -69,6 +70,8 @@ l2fwd_usage(const char *prgname)\n \t\t\" [-q NQ]\",\n \t\tprgname);\n \n+\trte_eventmode_helper_print_options_list();\n+\n \tfprintf(stderr, \"\\n\\n\");\n \n \tfprintf(stderr,\n@@ -79,7 +82,9 @@ l2fwd_usage(const char *prgname)\n \t\t\"      When enabled:\\n\"\n \t\t\"       - The source MAC address is replaced by the TX port MAC address\\n\"\n \t\t\"       - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\\n\"\n-\t\t\"\\n\");\n+\t\t\"\");\n+\n+\trte_eventmode_helper_print_options_description();\n }\n \n static int\n@@ -158,12 +163,14 @@ static const struct option lgopts[] = {\n \n /* Parse the argument given in the command line of the application */\n static int\n-l2fwd_parse_args(int argc, char **argv)\n+l2fwd_parse_args(int argc, char **argv,\n+\t\tstruct rte_eventmode_helper_conf **mode_conf)\n {\n-\tint opt, ret, timer_secs;\n+\tint opt, timer_secs;\n \tchar **argvopt;\n \tint option_index;\n \tchar *prgname = argv[0];\n+\tint options_parsed = 0;\n \n \targvopt = argv;\n \n@@ -212,12 +219,31 @@ l2fwd_parse_args(int argc, char **argv)\n \t\t}\n \t}\n \n-\tif (optind >= 0)\n-\t\targv[optind-1] = prgname;\n+\t/* Update argc & argv to move to event mode options */\n+\toptions_parsed = optind-1;\n+\targc -= options_parsed;\n+\targv += options_parsed;\n \n-\tret = optind-1;\n-\toptind = 1; /* reset getopt lib */\n-\treturn ret;\n+\t/* Reset getopt lib */\n+\toptind = 1;\n+\n+\t/* Check for event mode parameters and get the conf prepared*/\n+\t*mode_conf = rte_eventmode_helper_parse_args(argc, argv);\n+\tif (*mode_conf == NULL) {\n+\t\tl2fwd_usage(prgname);\n+\t\treturn -1;\n+\t}\n+\n+\t/* Add the number of options parsed */\n+\toptions_parsed += optind-1;\n+\n+\tif (options_parsed >= 0)\n+\t\targv[options_parsed] = prgname;\n+\n+\t/* Reset getopt lib */\n+\toptind = 1;\n+\n+\treturn options_parsed;\n }\n \n /* Check the link status of all ports in up to 9s, and print them finally */\n@@ -315,6 +341,7 @@ main(int argc, char **argv)\n \tunsigned nb_ports_in_mask = 0;\n \tunsigned int nb_lcores = 0;\n \tunsigned int nb_mbufs;\n+\tstruct rte_eventmode_helper_conf *mode_conf = NULL;\n \n \t/* Set default values for global vars */\n \tl2fwd_init_global_vars();\n@@ -329,8 +356,12 @@ main(int argc, char **argv)\n \tsignal(SIGINT, signal_handler);\n \tsignal(SIGTERM, signal_handler);\n \n-\t/* parse application arguments (after the EAL ones) */\n-\tret = l2fwd_parse_args(argc, argv);\n+\t/*\n+\t * Parse application arguments (after the EAL ones). This would parse\n+\t * the event mode options too, and would set the conf pointer\n+\t * accordingly.\n+\t */\n+\tret = l2fwd_parse_args(argc, argv, &mode_conf);\n \tif (ret < 0)\n \t\trte_exit(EXIT_FAILURE, \"Invalid L2FWD arguments\\n\");\n \n@@ -521,9 +552,20 @@ main(int argc, char **argv)\n \n \tcheck_all_ports_link_status(l2fwd_enabled_port_mask);\n \n+\t/*\n+\t * Set the enabled port mask in helper conf to be used by helper\n+\t * sub-system. This would be used while intializing devices using\n+\t * helper sub-system.\n+\t */\n+\tmode_conf->eth_portmask = l2fwd_enabled_port_mask;\n+\n+\t/* Initialize eventmode components */\n+\trte_eventmode_helper_initialize_devs(mode_conf);\n+\n \tret = 0;\n \t/* launch per-lcore init on every lcore */\n-\trte_eal_mp_remote_launch(l2fwd_launch_one_lcore, NULL, CALL_MASTER);\n+\trte_eal_mp_remote_launch(l2fwd_launch_one_lcore, (void *)mode_conf,\n+\t\t\tCALL_MASTER);\n \tRTE_LCORE_FOREACH_SLAVE(lcore_id) {\n \t\tif (rte_eal_wait_lcore(lcore_id) < 0) {\n \t\t\tret = -1;\n",
    "prefixes": [
        "dpdk-dev",
        "20/20"
    ]
}