get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/99443/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 99443,
    "url": "https://patchwork.dpdk.org/api/patches/99443/?format=api",
    "web_url": "https://patchwork.dpdk.org/project/dpdk/patch/20210922180418.20663-1-viacheslavo@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210922180418.20663-1-viacheslavo@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210922180418.20663-1-viacheslavo@nvidia.com",
    "date": "2021-09-22T18:04:15",
    "name": "[0/3] ethdev: introduce configurable flexible item",
    "commit_ref": null,
    "pull_url": null,
    "state": null,
    "archived": false,
    "hash": null,
    "submitter": {
        "id": 1926,
        "url": "https://patchwork.dpdk.org/api/people/1926/?format=api",
        "name": "Slava Ovsiienko",
        "email": "viacheslavo@nvidia.com"
    },
    "delegate": null,
    "mbox": "https://patchwork.dpdk.org/project/dpdk/patch/20210922180418.20663-1-viacheslavo@nvidia.com/mbox/",
    "series": [],
    "comments": "https://patchwork.dpdk.org/api/patches/99443/comments/",
    "check": "pending",
    "checks": "https://patchwork.dpdk.org/api/patches/99443/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id C653CA0C45;\n\tWed, 22 Sep 2021 20:04:49 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 6297F411EC;\n\tWed, 22 Sep 2021 20:04:49 +0200 (CEST)",
            "from NAM12-BN8-obe.outbound.protection.outlook.com\n (mail-bn8nam12on2041.outbound.protection.outlook.com [40.107.237.41])\n by mails.dpdk.org (Postfix) with ESMTP id 50C6B411A8\n for <dev@dpdk.org>; Wed, 22 Sep 2021 20:04:48 +0200 (CEST)",
            "from DM6PR03CA0033.namprd03.prod.outlook.com (2603:10b6:5:40::46) by\n MN2PR12MB4550.namprd12.prod.outlook.com (2603:10b6:208:24e::12) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4523.14; Wed, 22 Sep\n 2021 18:04:46 +0000",
            "from DM6NAM11FT045.eop-nam11.prod.protection.outlook.com\n (2603:10b6:5:40:cafe::ab) by DM6PR03CA0033.outlook.office365.com\n (2603:10b6:5:40::46) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4544.13 via Frontend\n Transport; Wed, 22 Sep 2021 18:04:46 +0000",
            "from mail.nvidia.com (216.228.112.35) by\n DM6NAM11FT045.mail.protection.outlook.com (10.13.173.123) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4544.13 via Frontend Transport; Wed, 22 Sep 2021 18:04:46 +0000",
            "from DRHQMAIL107.nvidia.com (10.27.9.16) by HQMAIL111.nvidia.com\n (172.20.187.18) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Wed, 22 Sep\n 2021 18:04:41 +0000",
            "from nvidia.com (172.20.187.5) by DRHQMAIL107.nvidia.com\n (10.27.9.16) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Wed, 22 Sep\n 2021 18:04:39 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=Dxhba18QZE0QCg0UUUkSRM8+rjuHwjRYGUjAqjXO4g7PZv3e/JEE0TW4Rk6+J0jdHSWn7BIiX30JRpotPD7wpy2vqnAfDqbAZK0dr01tMLptMiUb00TCv0BEBqqO6sy/IG4EQbVPJNHmNDDSPKiuJj6hHBuDPcccBeGgdAiakAmRRqITC+zrlAUzA9O/pOxpTVyDOI7kUIks8Ge+A8//SzjQy7vZsZ+NWqh60MT/94A6Bl0fibKE8PFliAGWRZI627/1X2ydvBFvUOyLK6Iwq49VkYPqwp6tebNUQEyOOrzz7LP+vvndLCjgMIAIdJDlUscQOOGQt4jYaBQ7pY8Jcw==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version;\n bh=db8Z9Av/Ct3V4rqnIBmr63y4HfOfTwmDvXLio31IPJc=;\n b=kS4lolwOmqD5xehQBJi6Zjcc8+yHUr5x8QuXIEOqgK5BbJ6wM00rIYYg5T79OxFZMNk0NFNkKoQIYvhm7sQLgFyXroZSlhF5jtDg7Nl0U81f3z6cALaDnkj2cggOf85VoNO3NWXCn9MpBxxH5tL9MgdCzrCsjDChkFgU1Q7g4GWP3pQSoKMQ0pqo8NRcX4sLp5z/jzofcQgaeBgTwM9qf7/sC8UcjJra5Ycrdxy6B1KQpJZxXcocY3RPd0YfPWJpuedMDoEU72ruFXValD8RlzZhDezKfsxp2OlCUaWcvYg/LwFEgW0SkLEP1TkXySUFeFJkXVMUn9Z1HgX28b9IsQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.35) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=db8Z9Av/Ct3V4rqnIBmr63y4HfOfTwmDvXLio31IPJc=;\n b=Dt0pqbFTWPeV6hc1EbMdN0Ltnmlk+SPolspGOzK2QpznqG1VLd4FtOC+uJ/T253oY8+1dju7ZwE1Z21D8FHePI5whj1kAC4nnnURXzr5DGbqkwdWN5uYA6AlvRDm/EfBbBkdJPuyWso9zwggSdFZgsXxHiwkHzzdGoc8KhhDzKSVHU+7qCN3KTuoqT7JtPw1/YsXd4MNQG3sR3JWp6utZHJrbAp5UUmrZdRxieEIS7/sEZT9Ngrw+Vbg5EIv7WFHJQo/NSNTy4Q1pGtULYSc5SqHnbAyrUEL6qwjvOGLkePUvsHLyIwwpTx6VJnDOdhxaOc9JMkVaKl9XYLRoUHyQg==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.35)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.35 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.35; helo=mail.nvidia.com;",
        "From": "Viacheslav Ovsiienko <viacheslavo@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<rasland@nvidia.com>, <matan@nvidia.com>, <shahafs@nvidia.com>,\n <orika@nvidia.com>, <getelson@nvidia.com>, <thomas@monjalon.net>",
        "Date": "Wed, 22 Sep 2021 21:04:15 +0300",
        "Message-ID": "<20210922180418.20663-1-viacheslavo@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.5]",
        "X-ClientProxiedBy": "HQMAIL107.nvidia.com (172.20.187.13) To\n DRHQMAIL107.nvidia.com (10.27.9.16)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "fdf1dcf4-3b87-4194-5708-08d97df37618",
        "X-MS-TrafficTypeDiagnostic": "MN2PR12MB4550:",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-Microsoft-Antispam-PRVS": "\n <MN2PR12MB4550011AC56635772C6735AFDFA29@MN2PR12MB4550.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:10000;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n zMkbj6Ur9gv2zhuy9nDGZw/K6qIegbyqC7VIlYyu2DYFWfWEL6Mj02xSzMmD+fYdr1bP0GO/tanY/GIMOjUUdxCVQaSQ19Edvsr0AlQMhOnm987B8C0VrE14vMrEYL3B3ivDIw2n89HmczuZ5QJ9uzMVtwU0Z459Cno6HWOidf1K4hw6xCIyZvNdyPQ0p7rLVDurPwzzLTPYsz3y1dnQWMSHGgS2eQHkn257BrXc+Vmd9rMoH2mDRvL0eN7BNC7ZScMlzGDAesHMTuQue+2/X+XEDvGHXyP5H4gSxdMFYmpKskyPknQuaqDHKSxJNjWYxEhqqLMsDMN/IP/MKZZgP39x7m+QDgLpuT1ZjZXMCijHPfkzmwbr83Q1HkSxS2fSyO7t9WY1wfL8luPksHBKKrUnNtbtfakwbZIbOAc8ZdPmMuEitGCqlVHEaP0b6Ics2PJxCAFGzLY5eCZCXS29xuKvlI3S5/tgneH/Uwzfnm7fKAePXveeA4QdRRKbzEO3UQQLpV+XciVjYGEQWF/cH4Wsfq1vvP8dBJwjxE+UVKNS5p6y9IZkyoV3OHN9gOUpQWbZRotKnAMVgLwVlTVcsAOGxIp7CbhzVr/3dhra0AQJ/tj5vlZ0Uoep/HZgciBMk1lmSkrPK1nwLSKpqBsRm3S9S+2eEepBbi7p6TpnR5K8udkCKSeraVsIazp3k+EaMnxEWA775TxOVbtakCOGmOLVoSq7YR2qbh1MZ8xbMa+wIVQaqg/vPSCewI+G6xhWpVtpXffoD2MLXppxJe6/oQaFmcRSTuHKyxMNF35Wics=",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.35; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid02.nvidia.com; CAT:NONE;\n SFS:(4636009)(36840700001)(46966006)(47076005)(30864003)(82310400003)(1076003)(356005)(508600001)(83380400001)(16526019)(6666004)(26005)(2616005)(86362001)(2906002)(6916009)(336012)(966005)(316002)(36906005)(426003)(186003)(4326008)(6286002)(70586007)(54906003)(55016002)(5660300002)(7636003)(70206006)(7696005)(8936002)(36860700001)(8676002)(36756003);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "22 Sep 2021 18:04:46.0038 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n fdf1dcf4-3b87-4194-5708-08d97df37618",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.35];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT045.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "MN2PR12MB4550",
        "Subject": "[dpdk-dev] [PATCH 0/3] ethdev: introduce configurable flexible item",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "1. Introduction and Retrospective\n\nNowadays the networks are evolving fast and wide, the network\nstructures are getting more and more complicated, the new\napplication areas are emerging. To address these challenges\nthe new network protocols are continuously being developed,\nconsidered by technical communities, adopted by industry and,\neventually implemented in hardware and software. The DPDK\nframework follows the common trends and if we bother\nto glance at the RTE Flow API header we see the multiple\nnew items were introduced during the last years since\nthe initial release.\n\nThe new protocol adoption and implementation process is\nnot straightforward and takes time, the new protocol passes\ndevelopment, consideration, adoption, and implementation\nphases. The industry tries to mitigate and address the\nforthcoming network protocols, for example, many hardware\nvendors are implementing flexible and configurable network\nprotocol parsers. As DPDK developers, could we anticipate\nthe near future in the same fashion and introduce the similar\nflexibility in RTE Flow API?\n\nLet's check what we already have merged in our project, and\nwe see the nice raw item (rte_flow_item_raw). At the first\nglance, it looks superior and we can try to implement a flow\nmatching on the header of some relatively new tunnel protocol,\nsay on the GENEVE header with variable length options. And,\nunder further consideration, we run into the raw item\nlimitations:\n\n- only fixed size network header can be represented\n- the entire network header pattern of fixed format\n  (header field offsets are fixed) must be provided\n- the search for patterns is not robust (the wrong matches\n  might be triggered), and actually is not supported\n  by existing PMDs\n- no explicitly specified relations with preceding\n  and following items\n- no tunnel hint support\n\nAs the result, implementing the support for tunnel protocols\nlike aforementioned GENEVE with variable extra protocol option\nwith flow raw item becomes very complicated and would require\nmultiple flows and multiple raw items chained in the same\nflow (by the way, there is no support found for chained raw\nitems in implemented drivers).\n\nThis RFC introduces the dedicated flex item (rte_flow_item_flex)\nto handle matches with existing and new network protocol headers\nin a unified fashion.\n\n2. Flex Item Life Cycle\n\nLet's assume there are the requirements to support the new\nnetwork protocol with RTE Flows. What is given within protocol\nspecification:\n\n  - header format\n  - header length, (can be variable, depending on options)\n  - potential presence of extra options following or included\n    in the header the header\n  - the relations with preceding protocols. For example,\n    the GENEVE follows UDP, eCPRI can follow either UDP\n    or L2 header\n  - the relations with following protocols. For example,\n    the next layer after tunnel header can be L2 or L3\n  - whether the new protocol is a tunnel and the header\n    is a splitting point between outer and inner layers\n\nThe supposed way to operate with flex item:\n\n  - application defines the header structures according to\n    protocol specification\n\n  - application calls rte_flow_flex_item_create() with desired\n    configuration according to the protocol specification, it\n    creates the flex item object over specified ethernet device\n    and prepares PMD and underlying hardware to handle flex\n    item. On item creation call PMD backing the specified\n    ethernet device returns the opaque handle identifying\n    the object have been created\n\n  - application uses the rte_flow_item_flex with obtained handle\n    in the flows, the values/masks to match with fields in the\n    header are specified in the flex item per flow as for regular\n    items (except that pattern buffer combines all fields)\n\n  - flows with flex items match with packets in a regular fashion,\n    the values and masks for the new protocol header match are\n    taken from the flex items in the flows\n\n  - application destroys flows with flex items\n\n  - application calls rte_flow_flex_item_release() as part of\n    ethernet device API and destroys the flex item object in\n    PMD and releases the engaged hardware resources\n\n3. Flex Item Structure\n\nThe flex item structure is intended to be used as part of the flow\npattern like regular RTE flow items and provides the mask and\nvalue to match with fields of the protocol item was configured\nfor.\n\n  struct rte_flow_item_flex {\n    void *handle;\n    uint32_t length;\n    const uint8_t* pattern;\n  };\n\nThe handle is some opaque object maintained on per device basis\nby underlying driver.\n\nThe protocol header fields are considered as bit fields, all\noffsets and widths are expressed in bits. The pattern is the\nbuffer containing the bit concatenation of all the fields\npresented at item configuration time, in the same order and\nsame amount. If byte boundary alignment is needed an application\ncan use a dummy type field, this is just some kind of gap filler.\n\nThe length field specifies the pattern buffer length in bytes\nand is needed to allow rte_flow_copy() operations. The approach\nof multiple pattern pointers and lengths (per field) was\nconsidered and found clumsy - it seems to be much suitable for\nthe application to maintain the single structure within the\nsingle pattern buffer.\n\n4. Flex Item Configuration\n\nThe flex item configuration consists of the following parts:\n\n  - header field descriptors:\n    - next header\n    - next protocol\n    - sample to match\n  - input link descriptors\n  - output link descriptors\n\nThe field descriptors tell driver and hardware what data should\nbe extracted from the packet and then presented to match in the\nflows. Each field is a bit pattern. It has width, offset from\nthe header beginning, mode of offset calculation, and offset\nrelated parameters.\n\nThe next header field is special, no data are actually taken\nfrom the packet, but its offset is used as pointer to the next\nheader in the packet, in other word the next header offset\nspecifies the size of the header being parsed by flex item.\n\nThere is one more special field - next protocol, it specifies\nwhere the next protocol identifier is contained and packet data\nsampled from this field will be used to determine the next\nprotocol header type to continue packet parsing. The next\nprotocol field is like eth_type field in MAC2, or proto field\nin IPv4/v6 headers.\n\nThe sample fields are used to represent the data be sampled\nfrom the packet and then matched with established flows.\n\nThere are several methods supposed to calculate field offset\nin runtime depending on configuration and packet content:\n\n  - FIELD_MODE_FIXED - fixed offset. The bit offset from\n    header beginning is permanent and defined by field_base\n    configuration parameter.\n\n  - FIELD_MODE_OFFSET - the field bit offset is extracted\n    from other header field (indirect offset field). The\n    resulting field offset to match is calculated from as:\n\n  field_base + (*field_offset & offset_mask) << field_shift\n\n    This mode is useful to sample some extra options following\n    the main header with field containing main header length.\n    Also, this mode can be used to calculate offset to the\n    next protocol header, for example - IPv4 header contains\n    the 4-bit field with IPv4 header length expressed in dwords.\n    One more example - this mode would allow us to skip GENEVE\n    header variable length options.\n\n  - FIELD_MODE_BITMASK - the field bit offset is extracted\n    from other header field (indirect offset field), the latter\n    is considered as bitmask containing some number of one bits,\n    the resulting field offset to match is calculated as:\n\n  field_base + bitcount(*field_offset & offset_mask) << field_shift\n\n    This mode would be useful to skip the GTP header and its\n    extra options with specified flags.\n\n  - FIELD_MODE_DUMMY - dummy field, optionally used for byte\n    boundary alignment in pattern. Pattern mask and data are\n    ignored in the match. All configuration parameters besides\n    field size and offset are ignored.\n\nThe offset mode list can be extended by vendors according to\nhardware supported options.\n\nThe input link configuration section tells the driver after\nwhat protocols and at what conditions the flex item can follow.\nInput link specified the preceding header pattern, for example\nfor GENEVE it can be UDP item specifying match on destination\nport with value 6081. The flex item can follow multiple header\ntypes and multiple input links should be specified. At flow\ncreation type the item with one of input link types should\nprecede the flex item and driver will select the correct flex\nitem settings, depending on actual flow pattern.\n\nThe output link configuration section tells the driver how\nto continue packet parsing after the flex item protocol.\nIf multiple protocols can follow the flex item header the\nflex item should contain the field with next protocol\nidentifier, and the parsing will be continued depending\non the data contained in this field in the actual packet.\n\nThe flex item fields can participate in RSS hash calculation,\nthe dedicated flag is present in field description to specify\nwhat fields should be provided for hashing.\n\n5. Flex Item Chaining\n\nIf there are multiple protocols supposed to be supported with\nflex items in chained fashion - two or more flex items within\nthe same flow and these ones might be neighbors in pattern - it\nmeans the flex items are mutual referencing.  In this case,\nthe item that occurred first should be created with empty\noutput link list or with the list including existing items,\nand then the second flex item should be created referencing\nthe first flex item as input arc.\n\nAlso, the hardware resources used by flex items to handle\nthe packet can be limited. If there are multiple flex items\nthat are supposed to be used within the same flow it would\nbe nice to provide some hint for the driver that these two\nor more flex items are intended for simultaneous usage.\nThe fields of items should be assigned with hint indices\nand these indices from two or more flex items should not\noverlap (be unique per field). For this case, the driver\nwill try to engage not overlapping hardware resources\nand provide independent handling of the fields with unique\nindices. If the hint index is zero the driver assigns\nresources on its own.\n\n6. Example of New Protocol Handling\n\nLet's suppose we have the requirements to handle the new tunnel\nprotocol that follows UDP header with destination port 0xFADE\nand is followed by MAC header. Let the new protocol header format\nbe like this:\n\n  struct new_protocol_header {\n    rte_be32 header_length; /* header length in dwords, including options */\n    rte_be32 specific0;     /* some protocol data, no intention */\n    rte_be32 specific1;     /* to match in flows on these fields */\n    rte_be32 crucial;       /* data of interest, match is needed */\n    rte_be32 options[0];    /* optional protocol data, variable length */\n  };\n\nThe supposed flex item configuration:\n\n  struct rte_flow_item_flex_field field0 = {\n    .field_mode = FIELD_MODE_DUMMY,  /* Affects match pattern only */\n    .field_size = 96,                /* Skip three dwords from the beginning */\n  };\n  struct rte_flow_item_flex_field field1 = {\n    .field_mode = FIELD_MODE_FIXED,\n    .field_size = 32,       /* Field size is one dword */\n    .field_base = 96,       /* Skip three dwords from the beginning */\n  };\n  struct rte_flow_item_udp spec0 = {\n    .hdr = {\n      .dst_port = RTE_BE16(0xFADE),\n    }\n  };\n  struct rte_flow_item_udp mask0 = {\n    .hdr = {\n      .dst_port = RTE_BE16(0xFFFF),\n    }\n  };\n  struct rte_flow_item_flex_link link0 = {\n    .item = {\n       .type = RTE_FLOW_ITEM_TYPE_UDP,\n       .spec = &spec0,\n       .mask = &mask0,\n  };\n\n  struct rte_flow_item_flex_conf conf = {\n    .next_header = {\n      .field_mode = FIELD_MODE_OFFSET,\n      .field_base = 0,\n      .offset_base = 0,\n      .offset_mask = 0xFFFFFFFF,\n      .offset_shift = 2\t   /* Expressed in dwords, shift left by 2 */\n    },\n    .sample = {\n       &field0,\n       &field1,\n    },\n    .sample_num = 2,\n    .input_link[0] = &link0,\n    .input_num = 1\n  };\n\nLet's suppose we have created the flex item successfully, and PMD\nreturned the handle 0x123456789A. We can use the following item\npattern to match the crucial field in the packet with value 0x00112233:\n\n  struct new_protocol_header spec_pattern =\n  {\n    .crucial = RTE_BE32(0x00112233),\n  };\n  struct new_protocol_header mask_pattern =\n  {\n    .crucial = RTE_BE32(0xFFFFFFFF),\n  };\n  struct rte_flow_item_flex spec_flex = {\n    .handle = 0x123456789A\n    .length = sizeiof(struct new_protocol_header),\n    .pattern = &spec_pattern,\n  };\n  struct rte_flow_item_flex mask_flex = {\n    .length = sizeof(struct new_protocol_header),\n    .pattern = &mask_pattern,\n  };\n  struct rte_flow_item item_to_match = {\n    .type = RTE_FLOW_ITEM_TYPE_FLEX,\n    .spec = &spec_flex,\n    .mask = &mask_flex,\n  };\n\n7. Notes:\n - testpmd and mlx5 PMD parts are coming soon\n - RFC: http://patches.dpdk.org/project/dpdk/patch/20210806085624.16497-1-viacheslavo@nvidia.com/\n\nGregory Etelson (2):\n  ethdev: support flow elements with variable length\n  ethdev: implement RTE flex item API\n\nViacheslav Ovsiienko (1):\n  ethdev: introduce configurable flexible item\n\n doc/guides/prog_guide/rte_flow.rst     |  24 +++\n doc/guides/rel_notes/release_21_11.rst |   7 +\n lib/ethdev/rte_ethdev.h                |   1 +\n lib/ethdev/rte_flow.c                  | 141 +++++++++++++--\n lib/ethdev/rte_flow.h                  | 228 +++++++++++++++++++++++++\n lib/ethdev/rte_flow_driver.h           |  13 ++\n lib/ethdev/version.map                 |   5 +\n 7 files changed, 406 insertions(+), 13 deletions(-)",
    "diff": null,
    "prefixes": [
        "0/3"
    ]
}