get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/104487/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 104487,
    "url": "http://patchwork.dpdk.org/api/patches/104487/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20211118015228.30628-2-eagostini@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211118015228.30628-2-eagostini@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211118015228.30628-2-eagostini@nvidia.com",
    "date": "2021-11-18T01:52:28",
    "name": "[v1,1/1] app/test-gpudev: introduce ethdev to rx/tx packets using GPU memory",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "b4f0aafd97351bc08ec2054a4d05fb05ed916e0f",
    "submitter": {
        "id": 1571,
        "url": "http://patchwork.dpdk.org/api/people/1571/?format=api",
        "name": "Elena Agostini",
        "email": "eagostini@nvidia.com"
    },
    "delegate": null,
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20211118015228.30628-2-eagostini@nvidia.com/mbox/",
    "series": [
        {
            "id": 20630,
            "url": "http://patchwork.dpdk.org/api/series/20630/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=20630",
            "date": "2021-11-18T01:52:27",
            "name": "app/test-gpudev: introduce ethdev to rx/tx packets using GPU memory",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/20630/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/104487/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/104487/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id C61BCA0C41;\n\tWed, 17 Nov 2021 18:42:08 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id B68AE4117D;\n\tWed, 17 Nov 2021 18:42:02 +0100 (CET)",
            "from NAM12-BN8-obe.outbound.protection.outlook.com\n (mail-bn8nam12on2047.outbound.protection.outlook.com [40.107.237.47])\n by mails.dpdk.org (Postfix) with ESMTP id B1AD441158\n for <dev@dpdk.org>; Wed, 17 Nov 2021 18:42:00 +0100 (CET)",
            "from DM6PR11CA0048.namprd11.prod.outlook.com (2603:10b6:5:14c::25)\n by MWHPR12MB1885.namprd12.prod.outlook.com (2603:10b6:300:114::9) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4713.19; Wed, 17 Nov\n 2021 17:41:58 +0000",
            "from DM6NAM11FT017.eop-nam11.prod.protection.outlook.com\n (2603:10b6:5:14c:cafe::d9) by DM6PR11CA0048.outlook.office365.com\n (2603:10b6:5:14c::25) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4690.26 via Frontend\n Transport; Wed, 17 Nov 2021 17:41:58 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n DM6NAM11FT017.mail.protection.outlook.com (10.13.172.145) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4713.20 via Frontend Transport; Wed, 17 Nov 2021 17:41:58 +0000",
            "from nvidia.com (172.20.187.5) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Wed, 17 Nov\n 2021 17:41:54 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=K2YnEWqm6rOwuTBzBajA3EHeKnmUY120Ix9/1BkxC5RqKdWX9OTWyvmBCLdn1u9PoYkXMX85VW+Z6zAgj2H2YCGHCeiMMAPlNNV0B9yrs4ItS9I6y/jj8EflV1SvurQnUyIXgZBvnKoeN3AnSVA+lqFJ+OvwM0hkMW7kDurssnwd7wHmFEgGtCydqzetPXBmVkigxCZZl7XyQAsnGYDrTXXpJxfs3DLzGaxZBT9Vtx4KS0fFLAk4v5UdCe56kVhRKZS3YoHRIM67ErWDq+XEs84e7uRDKGqBAq0GEO9hEzhyrDFlJxqINhH+HWgg9YEGss1oW4kw9e1ab8kzhdJ3BA==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=w1Bt4kf7fWyfOv4zS1CzesWPk30T6Z+4UDuRiYA58E4=;\n b=MNxl60MWbaBVSkEoTZzYqkocsibr1MU8s+r2b7O3Or5G+hmuwRaZsd03XgHu+c4+QPr8tA2UXD8O/L1OSv7HDobk80NesHIUyI0QFeoMMzTbUdE0uLONIl400LixzVQcvA82Ly7UOmEPqOvLfFG0K+8CghxA2F2erHZR1wykujXtaDL1jwUKElQbbMngXnFnZeS3q3mXD0mKs+g6nuY1o3w9xQZXO47B65ZYW9k4lrBxGFsG/VRA13NYsOd0skjqPPe+5F1JiZE5LeumwpfPHs+FsiY/XZLHi6JFHlYa0jcuJj44p2JihJetpK59zS9rmjSYjxk+sy9YJRVOCd202A==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=quarantine pct=100) action=none\n header.from=nvidia.com; dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=w1Bt4kf7fWyfOv4zS1CzesWPk30T6Z+4UDuRiYA58E4=;\n b=Emg1rASiZYVAg5KG3iyVrVd3jCI593hqdj7IlwMUt/ut25G7LEUjBeOHwnj+OL2YmvIduAA9yohsFmZE1TKLLpZytU36b9a6od7o/dkzOfpXEqf1igS8KgoTY3MQhHL0N9QSdkFxWjGS2/wBNK3TG08XblVXcosocbSAYm9beSW+R/fl+pt09+sKtdVUjrdPnIWfiem42Bkn4V77UtK1yHrdSB50PQMAguGGxEJPmbEpmTuoTK1Vke47iI7zoG2hxORsfNEYQiGYdPMP7qeZ7YR4OI79qRwZoZoS8N6jk+cTBio3ogBaBqsOb24S2haPyW6mzoaSI0xT8K9zrSsB9A==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "<eagostini@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "Elena Agostini <eagostini@nvidia.com>",
        "Subject": "[PATCH v1 1/1] app/test-gpudev: introduce ethdev to rx/tx packets\n using GPU memory",
        "Date": "Thu, 18 Nov 2021 01:52:28 +0000",
        "Message-ID": "<20211118015228.30628-2-eagostini@nvidia.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20211118015228.30628-1-eagostini@nvidia.com>",
        "References": "<20211118015228.30628-1-eagostini@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.5]",
        "X-ClientProxiedBy": "HQMAIL111.nvidia.com (172.20.187.18) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "0e70a319-6dee-4e97-c4c9-08d9a9f18e08",
        "X-MS-TrafficTypeDiagnostic": "MWHPR12MB1885:",
        "X-Microsoft-Antispam-PRVS": "\n <MWHPR12MB188575C1DD07AE7F617B1925CD9A9@MWHPR12MB1885.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:125;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n rB6uJ71dOVj3WRMjtuSgcbpXhYfiitZkteHulnrIUu5xbPQuP/BYonmY5Mwx/tP4ePgZrL/s1S/4Gt7RRqMT4oc5EK9UEq2aTFCMHjRtZJrA+L+tO/B/DyVakotg3GgudmHXjVxkydJpF5Z3EgBPXb6wwxbXJ8vz88stoMV+zpjbDf88RoE8FOSEarJAOo5ufn84jMKKPM5lwoogHRVu7YiiYFkb/F3X8Sv0jPFrv98opG3Ia0A/mlhO9m/fAlUN+kSSc91KBRR8m+D7tkwCtVsgch5y4qkZq2kQj0mBAuulssOTEk99e1zOO5oLPmVkc5JAsPZ2kRufBatoNY+ybDszfO9GOh+8M723aDJkumQ26kauMUqF66VqcukcMHxrTUEwpefNd+Th1T59tVaRkIax63eXX1uoJSwR4Wo38XubWBGYi0VVYTnl56aQBwpTJOxHRKyJDSgrwRNsmvlR76w/mMxexViXAl4+bI3rIgyUc2S6rpNUm2VFKQpdU2gMIE/duAqgFRHGCVrgloo9ef+G3QWl2DVYGX6HM1lv0UEE5JgKctyhvtljz3dyEIRnWsF1ncVRNUZTiB+VewQCXEEHg4LJd6zbExEc4eHzW/uxhZE4bbKYEC+81mTnRAB4PUPzWlQ11NdqtEkwLLKUhn1lc1RZMEENC6wrQ5t+psRE/swskxDjqTWclYr3YdE0KBP7MaRDlYUZZ9XmulSTjg==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(36840700001)(46966006)(107886003)(30864003)(36756003)(83380400001)(6286002)(82310400003)(7696005)(8676002)(7636003)(186003)(86362001)(356005)(4326008)(8936002)(16526019)(1076003)(2876002)(36860700001)(55016002)(70206006)(36906005)(426003)(70586007)(6916009)(336012)(2616005)(47076005)(26005)(5660300002)(2906002)(316002)(6666004)(508600001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "17 Nov 2021 17:41:58.2968 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 0e70a319-6dee-4e97-c4c9-08d9a9f18e08",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT017.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "MWHPR12MB1885",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Elena Agostini <eagostini@nvidia.com>\n\nThis patch introduces ethdev in test-gpudev app to provide:\n- an example to show how GPU memory can be used to send and receive packets\n- an useful tool to measure network metrics when using GPU memory with\nio forwarding\n\nWith this feature test-gpudev can:\n- RX packets in CPU or GPU memory\n- Store packets in the gpudev communication list\n- TX receive packets from the communication list\n\nIt's a simulation of a multi-core application.\n\nSigned-off-by: Elena Agostini <eagostini@nvidia.com>\n---\n app/test-gpudev/main.c | 471 +++++++++++++++++++++++++++++++++++++++--\n 1 file changed, 452 insertions(+), 19 deletions(-)",
    "diff": "diff --git a/app/test-gpudev/main.c b/app/test-gpudev/main.c\nindex 250fba6427..daa586c64e 100644\n--- a/app/test-gpudev/main.c\n+++ b/app/test-gpudev/main.c\n@@ -10,6 +10,8 @@\n #include <stdarg.h>\n #include <errno.h>\n #include <getopt.h>\n+#include <stdbool.h>\n+#include <signal.h>\n \n #include <rte_common.h>\n #include <rte_malloc.h>\n@@ -19,22 +21,98 @@\n #include <rte_ethdev.h>\n #include <rte_mempool.h>\n #include <rte_mbuf.h>\n+#include <rte_launch.h>\n+#include <rte_lcore.h>\n+#include <rte_per_lcore.h>\n \n #include <rte_gpudev.h>\n \n+#ifndef ACCESS_ONCE\n+#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&x)\n+#endif\n+\n+#ifndef WRITE_ONCE\n+#define WRITE_ONCE(x, v) (ACCESS_ONCE(x) = (v))\n+#endif\n+\n+#define GPU_PAGE_SHIFT   16\n+#define GPU_PAGE_SIZE    (1UL << GPU_PAGE_SHIFT)\n+#define GPU_PAGE_OFFSET  (GPU_PAGE_SIZE-1)\n+#define GPU_PAGE_MASK    (~GPU_PAGE_OFFSET)\n+\n+#define MAX_QUEUES 16\n+#define NUM_COMM_ITEMS 2048\n+#define PKT_GAP 4\n+\n+// #define DEBUG_PRINT 1\n+\n enum app_args {\n \tARG_HELP,\n-\tARG_MEMPOOL\n+\tARG_BURST,\n+\tARG_GPU,\n+\tARG_MBUFD,\n+\tARG_MEMORY,\n+\tARG_QUEUES,\n+\tARG_TESTAPI,\n+};\n+\n+enum mem_type {\n+\tMEMORY_CPU,\n+\tMEMORY_GPU\n+};\n+\n+/* Options configurable from cmd line */\n+static uint32_t conf_burst = 64;\n+static uint16_t conf_gpu_id = 0;\n+static enum mem_type conf_mtype = MEMORY_CPU;\n+static uint32_t conf_mbuf_dataroom = 2048;\n+static uint32_t conf_queues = 1;\n+static bool conf_testapi = false;\n+static uint16_t conf_nb_descriptors = 2048;\n+\n+/* Options statically defined */\n+static uint32_t conf_nb_mbuf = 16384;\n+static uint16_t conf_port_id = 0;\n+\n+/* Other variables */\n+static volatile bool force_quit;\n+static struct rte_mempool *mpool;\n+static struct rte_pktmbuf_extmem ext_mem;\n+struct rte_gpu_comm_list *comm_list_fwd[MAX_QUEUES];\n+struct rte_ether_addr port_eth_addr;\n+static struct rte_eth_conf port_conf = {\n+\t.rxmode = {\n+\t\t\t\t.mq_mode = ETH_MQ_RX_RSS,\n+\t\t\t\t.split_hdr_size = 0,\n+\t\t\t\t.offloads = 0,\n+\t\t\t},\n+\t.txmode = {\n+\t\t\t.mq_mode = ETH_MQ_TX_NONE,\n+\t\t\t.offloads = 0,\n+\t\t\t},\n+\t.rx_adv_conf = {\n+\t\t\t.rss_conf = {\n+\t\t\t\t\t\t.rss_key = NULL,\n+\t\t\t\t\t\t.rss_hf = ETH_RSS_IP\n+\t\t\t\t\t},\n+\t\t\t},\n };\n \n static void\n usage(const char *prog_name)\n {\n-\tprintf(\"%s [EAL options] --\\n\",\n+\tprintf(\"%s [EAL options] --\\n\"\n+\t\t\" --help\\n\"\n+\t\t\" --burst N: number of packets per rx burst\\n\"\n+\t\t\" --gpu N: GPU ID to use\\n\"\n+\t\t\" --memory N: external mempool memory type, 0 CPU, 1 GPU\\n\"\n+\t\t\" --mbufd N: mbuf dataroom size\\n\"\n+\t\t\" --testapi: test gpudev function\\n\"\n+\t\t\" --queues N: number of RX queues\\n\",\n \t\tprog_name);\n }\n \n-static void\n+static int\n args_parse(int argc, char **argv)\n {\n \tchar **argvopt;\n@@ -42,7 +120,19 @@ args_parse(int argc, char **argv)\n \tint opt_idx;\n \n \tstatic struct option lgopts[] = {\n-\t\t{ \"help\", 0, 0, ARG_HELP},\n+\t\t{ \"help\",  0, 0, ARG_HELP},\n+\t\t/* Packets per burst. */\n+\t\t{ \"burst\",  1, 0, ARG_BURST},\n+\t\t/* GPU to use. */\n+\t\t{ \"gpu\",  1, 0, ARG_GPU},\n+\t\t/* Type of memory for the mempool. */\n+\t\t{ \"memory\",  1, 0, ARG_MEMORY},\n+\t\t/* Size of mbufs dataroom */\n+\t\t{ \"mbufd\", 1, 0, ARG_MBUFD},\n+\t\t/* Number of RX queues */\n+\t\t{ \"queues\", 1, 0, ARG_QUEUES},\n+\t\t/* Test only gpudev functions */\n+\t\t{ \"testapi\", 0, 0, ARG_TESTAPI},\n \t\t/* End of options */\n \t\t{ 0, 0, 0, 0 }\n \t};\n@@ -51,6 +141,24 @@ args_parse(int argc, char **argv)\n \twhile ((opt = getopt_long(argc, argvopt, \"\",\n \t\t\t\tlgopts, &opt_idx)) != EOF) {\n \t\tswitch (opt) {\n+\t\tcase ARG_BURST:\n+\t\t\tconf_burst = (uint32_t) atoi(optarg);\n+\t\t\tbreak;\n+\t\tcase ARG_GPU:\n+\t\t\tconf_gpu_id = (uint16_t) atoi(optarg);\n+\t\t\tbreak;\n+\t\tcase ARG_MEMORY:\n+\t\t\tconf_mtype = (atoi(optarg) == 1 ? MEMORY_GPU : MEMORY_CPU);\n+\t\t\tbreak;\n+\t\tcase ARG_MBUFD:\n+\t\t\tconf_mbuf_dataroom = (uint32_t) atoi(optarg);\n+\t\t\tbreak;\n+\t\tcase ARG_QUEUES:\n+\t\t\tconf_queues = (uint32_t) atoi(optarg);\n+\t\t\tbreak;\n+\t\tcase ARG_TESTAPI:\n+\t\t\tconf_testapi = (atoi(optarg) == 1 ? true : false);\n+\t\t\tbreak;\n \t\tcase ARG_HELP:\n \t\t\tusage(argv[0]);\n \t\t\tbreak;\n@@ -60,6 +168,20 @@ args_parse(int argc, char **argv)\n \t\t\tbreak;\n \t\t}\n \t}\n+\n+\tif (conf_queues > MAX_QUEUES) {\n+\t\tfprintf(stderr, \"Can't support more than %d queues\\n\", MAX_QUEUES);\n+\t\treturn -1;\n+\t}\n+\n+\tif (conf_queues * 2 > rte_lcore_count()) {\n+\t\tfprintf(stderr,\n+\t\t\t\t\"Need to use at least %d cores to support %d RX/TX queues (EAL cores %d)\\n\",\n+\t\t\t\tconf_queues * 2, conf_queues, rte_lcore_count());\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n }\n \n static int\n@@ -342,13 +464,118 @@ create_update_comm_list(uint16_t gpu_id)\n \treturn -1;\n }\n \n+static void\n+signal_handler(int signum)\n+{\n+\tif (signum == SIGINT || signum == SIGTERM) {\n+\t\tprintf(\"\\n\\nSignal %d received, preparing to exit...\\n\",\n+\t\t\t\tsignum);\n+\t\tforce_quit = true;\n+\t}\n+}\n+\n+static int\n+rx_core(__rte_unused void *arg)\n+{\n+\tuint32_t queue_id;\n+\tuint32_t nb_rx = 0;\n+\tint ret = 0;\n+\tint comm_list_item = 0;\n+\tstruct rte_mbuf *rx_mbufs[RTE_GPU_COMM_LIST_PKTS_MAX];\n+\n+\tqueue_id = (rte_lcore_index(rte_lcore_id()) - 1) / 2;\n+\n+\tprintf(\"RX core started on queue %d.\\n\", queue_id);\n+\n+\twhile (force_quit == false) {\n+\n+\t\tnb_rx = 0;\n+\t\twhile (nb_rx < RTE_GPU_COMM_LIST_PKTS_MAX &&\n+\t\t\t\tnb_rx < (conf_burst - PKT_GAP) &&\n+\t\t\t\tforce_quit == false) {\n+\t\t\tnb_rx += rte_eth_rx_burst(conf_port_id, queue_id,\n+\t\t\t\t\t\t\t\t\t\t&(rx_mbufs[nb_rx]),\n+\t\t\t\t\t\t\t\t\t\t(conf_burst - nb_rx));\n+\t\t}\n+\n+\t\tret = rte_gpu_comm_populate_list_pkts(\n+\t\t\t\t&(comm_list_fwd[queue_id][comm_list_item]), rx_mbufs, nb_rx);\n+\t\tif (ret) {\n+\t\t\tfprintf(stderr,\n+\t\t\t\t\t\"rte_gpu_comm_populate_list_pkts error %d.\\n\", ret);\n+\t\t\treturn -1;\n+\t\t}\n+\n+#ifdef DEBUG_PRINT\n+\t\tprintf(\"RX %d pkts from item %d\\n\",\n+\t\t\tcomm_list_fwd[queue_id][comm_list_item].num_pkts,\n+\t\t\tcomm_list_item);\n+#endif\n+\n+\t\tWRITE_ONCE(comm_list_fwd[queue_id][comm_list_item].status, RTE_GPU_COMM_LIST_DONE);\n+\n+\t\tcomm_list_item = (comm_list_item+1) % NUM_COMM_ITEMS;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+tx_core(__rte_unused void *arg)\n+{\n+\tuint32_t queue_id = 0;\n+\tuint32_t nb_tx = 0;\n+\tint ret = 0;\n+\tint comm_list_item = 0;\n+\n+\tqueue_id = (rte_lcore_index(rte_lcore_id()) - 1) / 2;\n+\tprintf(\"TX core started on queue %d.\\n\", queue_id);\n+\n+\twhile (force_quit == false) {\n+\n+#ifdef DEBUG_PRINT\n+\t\tprintf(\"Waiting on item %d\\n\", comm_list_item);\n+#endif\n+\t\twhile (ACCESS_ONCE(comm_list_fwd[queue_id][comm_list_item].status)\n+\t\t\t\t!= RTE_GPU_COMM_LIST_DONE && force_quit == false);\n+\n+\t\tnb_tx = 0;\n+\t\twhile (nb_tx < comm_list_fwd[queue_id][comm_list_item].num_pkts) {\n+\t\t\tnb_tx += rte_eth_tx_burst(conf_port_id, queue_id,\n+\t\t\t\t\t&(comm_list_fwd[queue_id][comm_list_item].mbufs[nb_tx]),\n+\t\t\t\t\tcomm_list_fwd[queue_id][comm_list_item].num_pkts - nb_tx);\n+\t\t}\n+\t\trte_wmb();\n+\n+#ifdef DEBUG_PRINT\n+\t\tprintf(\"TX %d/%d pkts from item %d\\n\",\n+\t\t\t\tnb_tx, comm_list_fwd[queue_id][comm_list_item].num_pkts,\n+\t\t\t\tcomm_list_item);\n+#endif\n+\t\tret = rte_gpu_comm_cleanup_list(&(comm_list_fwd[queue_id][comm_list_item]));\n+\t\tif (ret) {\n+\t\t\tfprintf(stderr, \"rte_gpu_comm_cleanup_list error %d.\\n\", ret);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\trte_mb();\n+\n+\t\tcomm_list_item = (comm_list_item+1) % NUM_COMM_ITEMS;\n+\t}\n+\n+\treturn 0;\n+}\n+\n int\n main(int argc, char **argv)\n {\n-\tint ret;\n+\tint ret, core_id;\n \tint nb_gpus = 0;\n+\tint nb_ports = 0;\n \tint16_t gpu_id = 0;\n+\tuint32_t idx_q = 0;\n \tstruct rte_gpu_info ginfo;\n+\tstruct rte_eth_dev_info dev_info;\n \n \t/* Init EAL. */\n \tret = rte_eal_init(argc, argv);\n@@ -356,8 +583,14 @@ main(int argc, char **argv)\n \t\trte_exit(EXIT_FAILURE, \"EAL init failed\\n\");\n \targc -= ret;\n \targv += ret;\n-\tif (argc > 1)\n-\t\targs_parse(argc, argv);\n+\tif (argc > 1) {\n+\t\tret = args_parse(argc, argv);\n+\t\tif (ret) {\n+\t\t\tfprintf(stderr, \"Input args error.\\n\");\n+\t\t\tgoto exit;\n+\t\t}\n+\t}\n+\n \targc -= ret;\n \targv += ret;\n \n@@ -381,25 +614,225 @@ main(int argc, char **argv)\n \n \tif (nb_gpus == 0) {\n \t\tfprintf(stderr, \"Need at least one GPU on the system to run the example\\n\");\n-\t\treturn EXIT_FAILURE;\n+\t\tgoto exit;\n \t}\n \n-\tgpu_id = 0;\n+\tif (nb_gpus < conf_gpu_id) {\n+\t\tfprintf(stderr, \"Not enough GPUs in the system (%d / %d).\\n\", nb_gpus, conf_gpu_id);\n+\t\tgoto exit;\n+\t}\n \n-\t/**\n-\t * Memory tests\n-\t */\n-\talloc_gpu_memory(gpu_id);\n-\tregister_cpu_memory(gpu_id);\n+\tif (conf_testapi == true) {\n+\t\t/* Memory tests */\n+\t\talloc_gpu_memory(gpu_id);\n+\t\tregister_cpu_memory(gpu_id);\n \n-\t/**\n-\t * Communication items test\n-\t */\n-\tcreate_update_comm_flag(gpu_id);\n-\tcreate_update_comm_list(gpu_id);\n+\t\t/* Communication items test */\n+\t\tcreate_update_comm_flag(gpu_id);\n+\t\tcreate_update_comm_list(gpu_id);\n+\n+\t\tgoto exit;\n+\t}\n+\n+\tforce_quit = false;\n+\tsignal(SIGINT, signal_handler);\n+\tsignal(SIGTERM, signal_handler);\n+\n+\tnb_ports = rte_eth_dev_count_avail();\n+\tif (nb_ports == 0)\n+\t\trte_exit(EXIT_FAILURE, \"No Ethernet ports - bye\\n\");\n+\n+\tret = rte_eth_dev_info_get(conf_port_id, &dev_info);\n+\tif (ret) {\n+\t\tfprintf(stderr, \"rte_eth_dev_info_get failed with %d.\\n\", ret);\n+\t\tgoto exit;\n+\t}\n+\n+\t/* Create external memory mempool. */\n+\text_mem.elt_size = conf_mbuf_dataroom + RTE_PKTMBUF_HEADROOM;\n+\text_mem.buf_len = RTE_ALIGN_CEIL(conf_nb_mbuf * ext_mem.elt_size, GPU_PAGE_SIZE);\n+\n+\tif (conf_mtype == MEMORY_CPU) {\n+\t\text_mem.buf_ptr = rte_malloc(\"extmem\", ext_mem.buf_len, 0);\n+\t\tif (ext_mem.buf_ptr == NULL) {\n+\t\t\tfprintf(stderr, \"Could not allocate CPU DPDK memory.\\n\");\n+\t\t\tgoto exit;\n+\t\t}\n+\n+\t\tret = rte_gpu_mem_register(conf_gpu_id, ext_mem.buf_len, ext_mem.buf_ptr);\n+\t\tif (ret < 0) {\n+\t\t\tfprintf(stderr,\n+\t\t\t\t\t\"rte_gpu_mem_register CPU memory returned error %d.\\n\", ret);\n+\t\t\treturn -1;\n+\t\t}\n+\t} else {\n+\t\text_mem.buf_iova = RTE_BAD_IOVA;\n+\n+\t\text_mem.buf_ptr = rte_gpu_mem_alloc(conf_gpu_id, ext_mem.buf_len);\n+\t\tif (ext_mem.buf_ptr == NULL) {\n+\t\t\tfprintf(stderr, \"Could not allocate GPU device memory.\\n\");\n+\t\t\tgoto exit;\n+\t\t}\n+\n+\t\tret = rte_extmem_register(ext_mem.buf_ptr, ext_mem.buf_len,\n+\t\t\t\tNULL, ext_mem.buf_iova, GPU_PAGE_SIZE);\n+\t\tif (ret) {\n+\t\t\tfprintf(stderr, \"Unable to register addr 0x%p, ret %d.\\n\", ext_mem.buf_ptr, ret);\n+\t\t\tgoto exit;\n+\t\t}\n+\t}\n+\n+\t/* DMA map the external memory. */\n+\tret = rte_dev_dma_map(dev_info.device, ext_mem.buf_ptr,\n+\t\t\text_mem.buf_iova, ext_mem.buf_len);\n+\tif (ret) {\n+\t\tfprintf(stderr, \"Could not DMA map EXT memory.\\n\");\n+\t\tgoto exit;\n+\t}\n+\n+\t/* Create external memory mempool. */\n+\tmpool = rte_pktmbuf_pool_create_extbuf(\"payload_mpool\", conf_nb_mbuf,\n+\t\t\t0, 0, ext_mem.elt_size,\n+\t\t\trte_socket_id(), &ext_mem, 1);\n+\tif (mpool == NULL) {\n+\t\tfprintf(stderr, \"Could not create EXT memory mempool.\\n\");\n+\t\tgoto exit;\n+\t}\n+\n+\t/* Queues configuration. */\n+\tret = rte_eth_dev_configure(conf_port_id, conf_queues,\n+\t\t\tconf_queues, &port_conf);\n+\tif (ret < 0) {\n+\t\tfprintf(stderr,\n+\t\t\t\t\"Cannot configure device: err=%d, port=%u queues=%u\\n\",\n+\t\t\t\tret, conf_port_id, conf_queues);\n+\t\tgoto exit;\n+\t}\n+\n+\tret = rte_eth_dev_adjust_nb_rx_tx_desc(conf_port_id,\n+\t\t\t&conf_nb_descriptors, &conf_nb_descriptors);\n+\tif (ret) {\n+\t\tfprintf(stderr,\n+\t\t\t\t\"Cannot adjust number of descriptors: err=%d, port=%u\\n\",\n+\t\t\t\tret, conf_port_id);\n+\t\tgoto exit;\n+\t}\n+\n+\tfor (idx_q = 0; idx_q < conf_queues; idx_q++) {\n+\n+\t\tret = rte_eth_rx_queue_setup(conf_port_id, idx_q,\n+\t\t\t\tconf_nb_descriptors, rte_lcore_to_socket_id(idx_q),\n+\t\t\t\tNULL, mpool);\n+\n+\t\tif (ret) {\n+\t\t\tfprintf(stderr, \"rte_eth_rx_queue_setup: err=%d, port=%u\\n\",\n+\t\t\t\t\tret, conf_port_id);\n+\t\t\tgoto exit;\n+\t\t}\n+\n+\t\tret = rte_eth_tx_queue_setup(conf_port_id, idx_q,\n+\t\t\t\tconf_nb_descriptors, rte_lcore_to_socket_id(idx_q), NULL);\n+\t\tif (ret) {\n+\t\t\tfprintf(stderr, \"rte_eth_tx_queue_setup: err=%d, port=%u\\n\",\n+\t\t\t\t\tret, conf_port_id);\n+\t\t\tgoto exit;\n+\t\t}\n+\t}\n+\n+\trte_eth_macaddr_get(conf_port_id, &port_eth_addr);\n+\n+\tret = rte_eth_dev_start(conf_port_id);\n+\tif (ret) {\n+\t\tfprintf(stderr, \"rte_eth_dev_start: err=%d, port=%u\\n\",\n+\t\t\t\tret, conf_port_id);\n+\t\t\tgoto exit;\n+\t}\n+\n+\tprintf(\"Port %d: %02x:%02x:%02x:%02x:%02x:%02x started!\\n\",\n+\t\t\t\tconf_port_id,\n+\t\t\t\t(uint8_t)port_eth_addr.addr_bytes[0],\n+\t\t\t\t(uint8_t)port_eth_addr.addr_bytes[1],\n+\t\t\t\tport_eth_addr.addr_bytes[2],\n+\t\t\t\tport_eth_addr.addr_bytes[3],\n+\t\t\t\tport_eth_addr.addr_bytes[4],\n+\t\t\t\tport_eth_addr.addr_bytes[5]);\n+\n+\trte_eth_promiscuous_enable(conf_port_id);\n+\n+\t/* Create communication lists, one per queue. */\n+\tfor (idx_q = 0; idx_q < MAX_QUEUES; idx_q++) {\n+\t\tcomm_list_fwd[idx_q] = NULL;\n+\n+\t\tif (idx_q < conf_queues) {\n+\t\t\tcomm_list_fwd[idx_q] = rte_gpu_comm_create_list(conf_gpu_id, NUM_COMM_ITEMS);\n+\t\t\tif (comm_list_fwd[idx_q] == NULL) {\n+\t\t\t\tfprintf(stderr, \"rte_gpu_comm_create_list returned error %d\\n\", ret);\n+\t\t\t\tgoto exit;\n+\t\t\t}\n+\t\t\tret = rte_gpu_comm_cleanup_list(&(comm_list_fwd[idx_q][0]));\n+\t\t\tif (ret < 0) {\n+\t\t\t\tfprintf(stderr, \"rte_gpu_comm_cleanup_list returned error %d\\n\", ret);\n+\t\t\t\tgoto exit;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tcore_id = 0;\n+\tfor (idx_q = 0; idx_q < conf_queues; idx_q++) {\n+\t\tcore_id = rte_get_next_lcore(core_id, 1, 0);\n+\t\trte_eal_remote_launch(tx_core, NULL, core_id);\n+\n+\t\tcore_id = rte_get_next_lcore(core_id, 1, 0);\n+\t\trte_eal_remote_launch(rx_core, NULL, core_id);\n+\t}\n+\n+\tcore_id = 0;\n+\tRTE_LCORE_FOREACH_WORKER(core_id) {\n+\t\tif (rte_eal_wait_lcore(core_id) < 0) {\n+\t\t\tfprintf(stderr, \"bad exit for core %d.\\n\",\n+\t\t\t\t\tcore_id);\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\tforce_quit = true;\n+\n+\tret = rte_dev_dma_unmap(dev_info.device, (void *)ext_mem.buf_ptr,\n+\t\t\tRTE_BAD_IOVA, ext_mem.buf_len);\n+\tif (ret) {\n+\t\tfprintf(stderr,\n+\t\t\t\t\"rte_dev_dma_unmap 0x%p -> %d (rte_errno = %d)\\n\",\n+\t\t\t\t(uint8_t *)ext_mem.buf_ptr, ret, rte_errno);\n+\t\tgoto exit;\n+\t}\n+\n+\tif (conf_mtype == MEMORY_CPU) {\n+\t\tret = rte_gpu_mem_unregister(conf_gpu_id, ext_mem.buf_ptr);\n+\t\tif (ret < 0) {\n+\t\t\tfprintf(stderr, \"rte_gpu_mem_unregister returned error %d\\n\", ret);\n+\t\t\tgoto exit;\n+\t\t}\n+\n+\t\trte_free(ext_mem.buf_ptr);\n+\n+\t} else {\n+\n+\t\tret = rte_extmem_unregister(ext_mem.buf_ptr, ext_mem.buf_len);\n+\t\tif (ret) {\n+\t\t\tfprintf(stderr, \"rte_extmem_unregister failed with %d.\\n\", ret);\n+\t\t\tgoto exit;\n+\t\t}\n+\n+\t\trte_gpu_mem_free(conf_gpu_id, (void *)ext_mem.buf_ptr);\n+\t}\n+\n+\trte_eth_dev_stop(conf_port_id);\n+\trte_eth_dev_close(conf_port_id);\n \n+exit:\n \t/* clean up the EAL */\n \trte_eal_cleanup();\n \n+\tprintf(\"Bye...\\n\");\n \treturn EXIT_SUCCESS;\n }\n",
    "prefixes": [
        "v1",
        "1/1"
    ]
}