get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/96863/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 96863,
    "url": "http://patchwork.dpdk.org/api/patches/96863/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20210812135425.698189-6-radu.nicolau@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210812135425.698189-6-radu.nicolau@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210812135425.698189-6-radu.nicolau@intel.com",
    "date": "2021-08-12T13:54:20",
    "name": "[v2,05/10] ipsec: add support for AEAD algorithms",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "61f97c4cc5814242631f2ba00e93ca2b984d1852",
    "submitter": {
        "id": 743,
        "url": "http://patchwork.dpdk.org/api/people/743/?format=api",
        "name": "Radu Nicolau",
        "email": "radu.nicolau@intel.com"
    },
    "delegate": {
        "id": 6690,
        "url": "http://patchwork.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20210812135425.698189-6-radu.nicolau@intel.com/mbox/",
    "series": [
        {
            "id": 18265,
            "url": "http://patchwork.dpdk.org/api/series/18265/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=18265",
            "date": "2021-08-12T13:54:15",
            "name": "new features for ipsec and security libraries",
            "version": 2,
            "mbox": "http://patchwork.dpdk.org/series/18265/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/96863/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/96863/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 3A730A0C4E;\n\tThu, 12 Aug 2021 16:10:48 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 5644C41215;\n\tThu, 12 Aug 2021 16:10:29 +0200 (CEST)",
            "from mga05.intel.com (mga05.intel.com [192.55.52.43])\n by mails.dpdk.org (Postfix) with ESMTP id 9436241125\n for <dev@dpdk.org>; Thu, 12 Aug 2021 16:10:26 +0200 (CEST)",
            "from fmsmga007.fm.intel.com ([10.253.24.52])\n by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 12 Aug 2021 07:10:20 -0700",
            "from silpixa00400884.ir.intel.com ([10.243.22.82])\n by fmsmga007.fm.intel.com with ESMTP; 12 Aug 2021 07:10:16 -0700"
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,10074\"; a=\"300944403\"",
            "E=Sophos;i=\"5.84,316,1620716400\"; d=\"scan'208\";a=\"300944403\"",
            "E=Sophos;i=\"5.84,316,1620716400\"; d=\"scan'208\";a=\"446554400\""
        ],
        "X-ExtLoop1": "1",
        "From": "Radu Nicolau <radu.nicolau@intel.com>",
        "To": "",
        "Cc": "dev@dpdk.org, mdr@ashroe.eu, konstantin.ananyev@intel.com,\n vladimir.medvedkin@intel.com, bruce.richardson@intel.com,\n hemant.agrawal@nxp.com, gakhil@marvell.com, anoobj@marvell.com,\n declan.doherty@intel.com, abhijit.sinha@intel.com,\n daniel.m.buckley@intel.com, marchana@marvell.com, ktejasree@marvell.com,\n matan@nvidia.com, Radu Nicolau <radu.nicolau@intel.com>,\n Abhijit Sinha <abhijits.sinha@intel.com>",
        "Date": "Thu, 12 Aug 2021 14:54:20 +0100",
        "Message-Id": "<20210812135425.698189-6-radu.nicolau@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20210812135425.698189-1-radu.nicolau@intel.com>",
        "References": "<20210713133542.3550525-1-radu.nicolau@intel.com>\n <20210812135425.698189-1-radu.nicolau@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v2 05/10] ipsec: add support for AEAD algorithms",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add support for AES_CCM, CHACHA20_POLY1305 and AES_GMAC.\n\nSigned-off-by: Declan Doherty <declan.doherty@intel.com>\nSigned-off-by: Radu Nicolau <radu.nicolau@intel.com>\nSigned-off-by: Abhijit Sinha <abhijits.sinha@intel.com>\nSigned-off-by: Daniel Martin Buckley <daniel.m.buckley@intel.com>\n---\n lib/ipsec/crypto.h   | 137 +++++++++++++++++++++++++++++++++++++++++++\n lib/ipsec/esp_inb.c  |  66 ++++++++++++++++++++-\n lib/ipsec/esp_outb.c |  70 +++++++++++++++++++++-\n lib/ipsec/sa.c       |  54 +++++++++++++++--\n lib/ipsec/sa.h       |   6 ++\n 5 files changed, 322 insertions(+), 11 deletions(-)",
    "diff": "diff --git a/lib/ipsec/crypto.h b/lib/ipsec/crypto.h\nindex 3d03034590..598ee9cebd 100644\n--- a/lib/ipsec/crypto.h\n+++ b/lib/ipsec/crypto.h\n@@ -21,6 +21,37 @@ struct aesctr_cnt_blk {\n \tuint32_t cnt;\n } __rte_packed;\n \n+ /*\n+  * CHACHA20-POLY1305 devices have some specific requirements\n+  * for IV and AAD formats.\n+  * Ideally that to be done by the driver itself.\n+  */\n+\n+struct aead_chacha20_poly1305_iv {\n+\tuint32_t salt;\n+\tuint64_t iv;\n+\tuint32_t cnt;\n+} __rte_packed;\n+\n+struct aead_chacha20_poly1305_aad {\n+\tuint32_t spi;\n+\t/*\n+\t * RFC 4106, section 5:\n+\t * Two formats of the AAD are defined:\n+\t * one for 32-bit sequence numbers, and one for 64-bit ESN.\n+\t */\n+\tunion {\n+\t\tuint32_t u32[2];\n+\t\tuint64_t u64;\n+\t} sqn;\n+\tuint32_t align0; /* align to 16B boundary */\n+} __rte_packed;\n+\n+struct chacha20_poly1305_esph_iv {\n+\tstruct rte_esp_hdr esph;\n+\tuint64_t iv;\n+} __rte_packed;\n+\n  /*\n   * AES-GCM devices have some specific requirements for IV and AAD formats.\n   * Ideally that to be done by the driver itself.\n@@ -51,6 +82,47 @@ struct gcm_esph_iv {\n \tuint64_t iv;\n } __rte_packed;\n \n+ /*\n+  * AES-CCM devices have some specific requirements for IV and AAD formats.\n+  * Ideally that to be done by the driver itself.\n+  */\n+union aead_ccm_salt {\n+\tuint32_t salt;\n+\tstruct inner {\n+\t\tuint8_t salt8[3];\n+\t\tuint8_t ccm_flags;\n+\t} inner;\n+} salt_union;\n+\n+\n+struct aead_ccm_iv {\n+\tuint8_t ccm_flags;\n+\tuint8_t salt[3];\n+\tuint64_t iv;\n+\tuint32_t cnt;\n+} __rte_packed;\n+\n+struct aead_ccm_aad {\n+\tuint8_t padding[18];\n+\tuint32_t spi;\n+\t/*\n+\t * RFC 4309, section 5:\n+\t * Two formats of the AAD are defined:\n+\t * one for 32-bit sequence numbers, and one for 64-bit ESN.\n+\t */\n+\tunion {\n+\t\tuint32_t u32[2];\n+\t\tuint64_t u64;\n+\t} sqn;\n+\tuint32_t align0; /* align to 16B boundary */\n+} __rte_packed;\n+\n+struct ccm_esph_iv {\n+\tstruct rte_esp_hdr esph;\n+\tuint64_t iv;\n+} __rte_packed;\n+\n+\n static inline void\n aes_ctr_cnt_blk_fill(struct aesctr_cnt_blk *ctr, uint64_t iv, uint32_t nonce)\n {\n@@ -59,6 +131,16 @@ aes_ctr_cnt_blk_fill(struct aesctr_cnt_blk *ctr, uint64_t iv, uint32_t nonce)\n \tctr->cnt = rte_cpu_to_be_32(1);\n }\n \n+static inline void\n+aead_chacha20_poly1305_iv_fill(struct aead_chacha20_poly1305_iv\n+\t\t\t       *chacha20_poly1305,\n+\t\t\t       uint64_t iv, uint32_t salt)\n+{\n+\tchacha20_poly1305->salt = salt;\n+\tchacha20_poly1305->iv = iv;\n+\tchacha20_poly1305->cnt = rte_cpu_to_be_32(1);\n+}\n+\n static inline void\n aead_gcm_iv_fill(struct aead_gcm_iv *gcm, uint64_t iv, uint32_t salt)\n {\n@@ -67,6 +149,21 @@ aead_gcm_iv_fill(struct aead_gcm_iv *gcm, uint64_t iv, uint32_t salt)\n \tgcm->cnt = rte_cpu_to_be_32(1);\n }\n \n+static inline void\n+aead_ccm_iv_fill(struct aead_ccm_iv *ccm, uint64_t iv, uint32_t salt)\n+{\n+\tunion aead_ccm_salt tsalt;\n+\n+\ttsalt.salt = salt;\n+\tccm->ccm_flags = tsalt.inner.ccm_flags;\n+\tccm->salt[0] = tsalt.inner.salt8[0];\n+\tccm->salt[1] = tsalt.inner.salt8[1];\n+\tccm->salt[2] = tsalt.inner.salt8[2];\n+\tccm->iv = iv;\n+\tccm->cnt = rte_cpu_to_be_32(1);\n+}\n+\n+\n /*\n  * RFC 4106, 5 AAD Construction\n  * spi and sqn should already be converted into network byte order.\n@@ -86,6 +183,25 @@ aead_gcm_aad_fill(struct aead_gcm_aad *aad, rte_be32_t spi, rte_be64_t sqn,\n \taad->align0 = 0;\n }\n \n+/*\n+ * RFC 4309, 5 AAD Construction\n+ * spi and sqn should already be converted into network byte order.\n+ * Make sure that not used bytes are zeroed.\n+ */\n+static inline void\n+aead_ccm_aad_fill(struct aead_ccm_aad *aad, rte_be32_t spi, rte_be64_t sqn,\n+\tint esn)\n+{\n+\taad->spi = spi;\n+\tif (esn)\n+\t\taad->sqn.u64 = sqn;\n+\telse {\n+\t\taad->sqn.u32[0] = sqn_low32(sqn);\n+\t\taad->sqn.u32[1] = 0;\n+\t}\n+\taad->align0 = 0;\n+}\n+\n static inline void\n gen_iv(uint64_t iv[IPSEC_MAX_IV_QWORD], rte_be64_t sqn)\n {\n@@ -93,6 +209,27 @@ gen_iv(uint64_t iv[IPSEC_MAX_IV_QWORD], rte_be64_t sqn)\n \tiv[1] = 0;\n }\n \n+\n+/*\n+ * RFC 4106, 5 AAD Construction\n+ * spi and sqn should already be converted into network byte order.\n+ * Make sure that not used bytes are zeroed.\n+ */\n+static inline void\n+aead_chacha20_poly1305_aad_fill(struct aead_chacha20_poly1305_aad *aad,\n+\t\t\t\t\trte_be32_t spi, rte_be64_t sqn,\n+\t\t\t\t\tint esn)\n+{\n+\taad->spi = spi;\n+\tif (esn)\n+\t\taad->sqn.u64 = sqn;\n+\telse {\n+\t\taad->sqn.u32[0] = sqn_low32(sqn);\n+\t\taad->sqn.u32[1] = 0;\n+\t}\n+\taad->align0 = 0;\n+}\n+\n /*\n  * Helper routine to copy IV\n  * Right now we support only algorithms with IV length equals 0/8/16 bytes.\ndiff --git a/lib/ipsec/esp_inb.c b/lib/ipsec/esp_inb.c\nindex 2b1df6a032..d66c88f05d 100644\n--- a/lib/ipsec/esp_inb.c\n+++ b/lib/ipsec/esp_inb.c\n@@ -63,6 +63,8 @@ inb_cop_prepare(struct rte_crypto_op *cop,\n {\n \tstruct rte_crypto_sym_op *sop;\n \tstruct aead_gcm_iv *gcm;\n+\tstruct aead_ccm_iv *ccm;\n+\tstruct aead_chacha20_poly1305_iv *chacha20_poly1305;\n \tstruct aesctr_cnt_blk *ctr;\n \tuint64_t *ivc, *ivp;\n \tuint32_t algo;\n@@ -83,6 +85,24 @@ inb_cop_prepare(struct rte_crypto_op *cop,\n \t\t\tsa->iv_ofs);\n \t\taead_gcm_iv_fill(gcm, ivp[0], sa->salt);\n \t\tbreak;\n+\tcase ALGO_TYPE_AES_CCM:\n+\t\tsop_aead_prepare(sop, sa, icv, pofs, plen);\n+\n+\t\t/* fill AAD IV (located inside crypto op) */\n+\t\tccm = rte_crypto_op_ctod_offset(cop, struct aead_ccm_iv *,\n+\t\t\tsa->iv_ofs);\n+\t\taead_ccm_iv_fill(ccm, ivp[0], sa->salt);\n+\t\tbreak;\n+\tcase ALGO_TYPE_CHACHA20_POLY1305:\n+\t\tsop_aead_prepare(sop, sa, icv, pofs, plen);\n+\n+\t\t/* fill AAD IV (located inside crypto op) */\n+\t\tchacha20_poly1305 = rte_crypto_op_ctod_offset(cop,\n+\t\t\t\tstruct aead_chacha20_poly1305_iv *,\n+\t\t\t\tsa->iv_ofs);\n+\t\taead_chacha20_poly1305_iv_fill(chacha20_poly1305,\n+\t\t\t\t\t       ivp[0], sa->salt);\n+\t\tbreak;\n \tcase ALGO_TYPE_AES_CBC:\n \tcase ALGO_TYPE_3DES_CBC:\n \t\tsop_ciph_auth_prepare(sop, sa, icv, pofs, plen);\n@@ -91,6 +111,14 @@ inb_cop_prepare(struct rte_crypto_op *cop,\n \t\tivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);\n \t\tcopy_iv(ivc, ivp, sa->iv_len);\n \t\tbreak;\n+\tcase ALGO_TYPE_AES_GMAC:\n+\t\tsop_ciph_auth_prepare(sop, sa, icv, pofs, plen);\n+\n+\t\t/* fill AAD IV (located inside crypto op) */\n+\t\tgcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,\n+\t\t\tsa->iv_ofs);\n+\t\taead_gcm_iv_fill(gcm, ivp[0], sa->salt);\n+\t\tbreak;\n \tcase ALGO_TYPE_AES_CTR:\n \t\tsop_ciph_auth_prepare(sop, sa, icv, pofs, plen);\n \n@@ -110,6 +138,8 @@ inb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,\n \tuint32_t *pofs, uint32_t plen, void *iv)\n {\n \tstruct aead_gcm_iv *gcm;\n+\tstruct aead_ccm_iv *ccm;\n+\tstruct aead_chacha20_poly1305_iv *chacha20_poly1305;\n \tstruct aesctr_cnt_blk *ctr;\n \tuint64_t *ivp;\n \tuint32_t clen;\n@@ -120,9 +150,19 @@ inb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,\n \n \tswitch (sa->algo_type) {\n \tcase ALGO_TYPE_AES_GCM:\n+\tcase ALGO_TYPE_AES_GMAC:\n \t\tgcm = (struct aead_gcm_iv *)iv;\n \t\taead_gcm_iv_fill(gcm, ivp[0], sa->salt);\n \t\tbreak;\n+\tcase ALGO_TYPE_AES_CCM:\n+\t\tccm = (struct aead_ccm_iv *)iv;\n+\t\taead_ccm_iv_fill(ccm, ivp[0], sa->salt);\n+\t\tbreak;\n+\tcase ALGO_TYPE_CHACHA20_POLY1305:\n+\t\tchacha20_poly1305 = (struct aead_chacha20_poly1305_iv *)iv;\n+\t\taead_chacha20_poly1305_iv_fill(chacha20_poly1305,\n+\t\t\t\t\t       ivp[0], sa->salt);\n+\t\tbreak;\n \tcase ALGO_TYPE_AES_CBC:\n \tcase ALGO_TYPE_3DES_CBC:\n \t\tcopy_iv(iv, ivp, sa->iv_len);\n@@ -175,6 +215,8 @@ inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,\n \tconst union sym_op_data *icv)\n {\n \tstruct aead_gcm_aad *aad;\n+\tstruct aead_ccm_aad *caad;\n+\tstruct aead_chacha20_poly1305_aad *chacha_aad;\n \n \t/* insert SQN.hi between ESP trailer and ICV */\n \tif (sa->sqh_len != 0)\n@@ -184,9 +226,27 @@ inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,\n \t * fill AAD fields, if any (aad fields are placed after icv),\n \t * right now we support only one AEAD algorithm: AES-GCM.\n \t */\n-\tif (sa->aad_len != 0) {\n-\t\taad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);\n-\t\taead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));\n+\tswitch (sa->algo_type) {\n+\tcase ALGO_TYPE_AES_GCM:\n+\t\tif (sa->aad_len != 0) {\n+\t\t\taad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);\n+\t\t\taead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));\n+\t\t}\n+\t\tbreak;\n+\tcase ALGO_TYPE_AES_CCM:\n+\t\tif (sa->aad_len != 0) {\n+\t\t\tcaad = (struct aead_ccm_aad *)(icv->va + sa->icv_len);\n+\t\t\taead_ccm_aad_fill(caad, sa->spi, sqc, IS_ESN(sa));\n+\t\t}\n+\t\tbreak;\n+\tcase ALGO_TYPE_CHACHA20_POLY1305:\n+\t\tif (sa->aad_len != 0) {\n+\t\t\tchacha_aad = (struct aead_chacha20_poly1305_aad *)\n+\t\t\t    (icv->va + sa->icv_len);\n+\t\t\taead_chacha20_poly1305_aad_fill(chacha_aad,\n+\t\t\t\t\t\tsa->spi, sqc, IS_ESN(sa));\n+\t\t}\n+\t\tbreak;\n \t}\n }\n \ndiff --git a/lib/ipsec/esp_outb.c b/lib/ipsec/esp_outb.c\nindex 1e181cf2ce..a3f77469c3 100644\n--- a/lib/ipsec/esp_outb.c\n+++ b/lib/ipsec/esp_outb.c\n@@ -63,6 +63,8 @@ outb_cop_prepare(struct rte_crypto_op *cop,\n {\n \tstruct rte_crypto_sym_op *sop;\n \tstruct aead_gcm_iv *gcm;\n+\tstruct aead_ccm_iv *ccm;\n+\tstruct aead_chacha20_poly1305_iv *chacha20_poly1305;\n \tstruct aesctr_cnt_blk *ctr;\n \tuint32_t algo;\n \n@@ -80,6 +82,15 @@ outb_cop_prepare(struct rte_crypto_op *cop,\n \t\t/* NULL case */\n \t\tsop_ciph_auth_prepare(sop, sa, icv, hlen, plen);\n \t\tbreak;\n+\tcase ALGO_TYPE_AES_GMAC:\n+\t\t/* GMAC case */\n+\t\tsop_ciph_auth_prepare(sop, sa, icv, hlen, plen);\n+\n+\t\t/* fill AAD IV (located inside crypto op) */\n+\t\tgcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,\n+\t\t\tsa->iv_ofs);\n+\t\taead_gcm_iv_fill(gcm, ivp[0], sa->salt);\n+\t\tbreak;\n \tcase ALGO_TYPE_AES_GCM:\n \t\t/* AEAD (AES_GCM) case */\n \t\tsop_aead_prepare(sop, sa, icv, hlen, plen);\n@@ -89,6 +100,26 @@ outb_cop_prepare(struct rte_crypto_op *cop,\n \t\t\tsa->iv_ofs);\n \t\taead_gcm_iv_fill(gcm, ivp[0], sa->salt);\n \t\tbreak;\n+\tcase ALGO_TYPE_AES_CCM:\n+\t\t/* AEAD (AES_CCM) case */\n+\t\tsop_aead_prepare(sop, sa, icv, hlen, plen);\n+\n+\t\t/* fill AAD IV (located inside crypto op) */\n+\t\tccm = rte_crypto_op_ctod_offset(cop, struct aead_ccm_iv *,\n+\t\t\tsa->iv_ofs);\n+\t\taead_ccm_iv_fill(ccm, ivp[0], sa->salt);\n+\t\tbreak;\n+\tcase ALGO_TYPE_CHACHA20_POLY1305:\n+\t\t/* AEAD (CHACHA20_POLY) case */\n+\t\tsop_aead_prepare(sop, sa, icv, hlen, plen);\n+\n+\t\t/* fill AAD IV (located inside crypto op) */\n+\t\tchacha20_poly1305 = rte_crypto_op_ctod_offset(cop,\n+\t\t\tstruct aead_chacha20_poly1305_iv *,\n+\t\t\tsa->iv_ofs);\n+\t\taead_chacha20_poly1305_iv_fill(chacha20_poly1305,\n+\t\t\t\t\t       ivp[0], sa->salt);\n+\t\tbreak;\n \tcase ALGO_TYPE_AES_CTR:\n \t\t/* Cipher-Auth (AES-CTR *) case */\n \t\tsop_ciph_auth_prepare(sop, sa, icv, hlen, plen);\n@@ -196,7 +227,9 @@ outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,\n \tconst union sym_op_data *icv)\n {\n \tuint32_t *psqh;\n-\tstruct aead_gcm_aad *aad;\n+\tstruct aead_gcm_aad *gaad;\n+\tstruct aead_ccm_aad *caad;\n+\tstruct aead_chacha20_poly1305_aad *chacha20_poly1305_aad;\n \n \t/* insert SQN.hi between ESP trailer and ICV */\n \tif (sa->sqh_len != 0) {\n@@ -208,9 +241,29 @@ outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,\n \t * fill IV and AAD fields, if any (aad fields are placed after icv),\n \t * right now we support only one AEAD algorithm: AES-GCM .\n \t */\n+\tswitch (sa->algo_type) {\n+\tcase ALGO_TYPE_AES_GCM:\n \tif (sa->aad_len != 0) {\n-\t\taad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);\n-\t\taead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));\n+\t\tgaad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);\n+\t\taead_gcm_aad_fill(gaad, sa->spi, sqc, IS_ESN(sa));\n+\t}\n+\t\tbreak;\n+\tcase ALGO_TYPE_AES_CCM:\n+\tif (sa->aad_len != 0) {\n+\t\tcaad = (struct aead_ccm_aad *)(icv->va + sa->icv_len);\n+\t\taead_ccm_aad_fill(caad, sa->spi, sqc, IS_ESN(sa));\n+\t}\n+\t\tbreak;\n+\tcase ALGO_TYPE_CHACHA20_POLY1305:\n+\tif (sa->aad_len != 0) {\n+\t\tchacha20_poly1305_aad =\t(struct aead_chacha20_poly1305_aad *)\n+\t\t\t(icv->va + sa->icv_len);\n+\t\taead_chacha20_poly1305_aad_fill(chacha20_poly1305_aad,\n+\t\t\tsa->spi, sqc, IS_ESN(sa));\n+\t}\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n \t}\n }\n \n@@ -418,6 +471,8 @@ outb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, uint32_t *pofs,\n {\n \tuint64_t *ivp = iv;\n \tstruct aead_gcm_iv *gcm;\n+\tstruct aead_ccm_iv *ccm;\n+\tstruct aead_chacha20_poly1305_iv *chacha20_poly1305;\n \tstruct aesctr_cnt_blk *ctr;\n \tuint32_t clen;\n \n@@ -426,6 +481,15 @@ outb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, uint32_t *pofs,\n \t\tgcm = iv;\n \t\taead_gcm_iv_fill(gcm, ivp[0], sa->salt);\n \t\tbreak;\n+\tcase ALGO_TYPE_AES_CCM:\n+\t\tccm = iv;\n+\t\taead_ccm_iv_fill(ccm, ivp[0], sa->salt);\n+\t\tbreak;\n+\tcase ALGO_TYPE_CHACHA20_POLY1305:\n+\t\tchacha20_poly1305 = iv;\n+\t\taead_chacha20_poly1305_iv_fill(chacha20_poly1305,\n+\t\t\t\t\t       ivp[0], sa->salt);\n+\t\tbreak;\n \tcase ALGO_TYPE_AES_CTR:\n \t\tctr = iv;\n \t\taes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);\ndiff --git a/lib/ipsec/sa.c b/lib/ipsec/sa.c\nindex e59189d215..720e0f365b 100644\n--- a/lib/ipsec/sa.c\n+++ b/lib/ipsec/sa.c\n@@ -47,6 +47,15 @@ fill_crypto_xform(struct crypto_xform *xform, uint64_t type,\n \t\tif (xfn != NULL)\n \t\t\treturn -EINVAL;\n \t\txform->aead = &xf->aead;\n+\n+\t/* GMAC has only auth */\n+\t} else if (xf->type == RTE_CRYPTO_SYM_XFORM_AUTH &&\n+\t\t\txf->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {\n+\t\tif (xfn != NULL)\n+\t\t\treturn -EINVAL;\n+\t\txform->auth = &xf->auth;\n+\t\txform->cipher = &xfn->cipher;\n+\n \t/*\n \t * CIPHER+AUTH xforms are expected in strict order,\n \t * depending on SA direction:\n@@ -247,12 +256,13 @@ esp_inb_init(struct rte_ipsec_sa *sa)\n \tsa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;\n \n \t/*\n-\t * for AEAD and NULL algorithms we can assume that\n+\t * for AEAD algorithms we can assume that\n \t * auth and cipher offsets would be equal.\n \t */\n \tswitch (sa->algo_type) {\n \tcase ALGO_TYPE_AES_GCM:\n-\tcase ALGO_TYPE_NULL:\n+\tcase ALGO_TYPE_AES_CCM:\n+\tcase ALGO_TYPE_CHACHA20_POLY1305:\n \t\tsa->ctp.auth.raw = sa->ctp.cipher.raw;\n \t\tbreak;\n \tdefault:\n@@ -294,6 +304,8 @@ esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)\n \n \tswitch (algo_type) {\n \tcase ALGO_TYPE_AES_GCM:\n+\tcase ALGO_TYPE_AES_CCM:\n+\tcase ALGO_TYPE_CHACHA20_POLY1305:\n \tcase ALGO_TYPE_AES_CTR:\n \tcase ALGO_TYPE_NULL:\n \t\tsa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr) +\n@@ -305,15 +317,20 @@ esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)\n \t\tsa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr);\n \t\tsa->ctp.cipher.length = sa->iv_len;\n \t\tbreak;\n+\tcase ALGO_TYPE_AES_GMAC:\n+\t\tsa->ctp.cipher.offset = 0;\n+\t\tsa->ctp.cipher.length = 0;\n+\t\tbreak;\n \t}\n \n \t/*\n-\t * for AEAD and NULL algorithms we can assume that\n+\t * for AEAD algorithms we can assume that\n \t * auth and cipher offsets would be equal.\n \t */\n \tswitch (algo_type) {\n \tcase ALGO_TYPE_AES_GCM:\n-\tcase ALGO_TYPE_NULL:\n+\tcase ALGO_TYPE_AES_CCM:\n+\tcase ALGO_TYPE_CHACHA20_POLY1305:\n \t\tsa->ctp.auth.raw = sa->ctp.cipher.raw;\n \t\tbreak;\n \tdefault:\n@@ -374,13 +391,39 @@ esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,\n \t\t\tsa->pad_align = IPSEC_PAD_AES_GCM;\n \t\t\tsa->algo_type = ALGO_TYPE_AES_GCM;\n \t\t\tbreak;\n+\t\tcase RTE_CRYPTO_AEAD_AES_CCM:\n+\t\t\t/* RFC 4309 */\n+\t\t\tsa->aad_len = sizeof(struct aead_ccm_aad);\n+\t\t\tsa->icv_len = cxf->aead->digest_length;\n+\t\t\tsa->iv_ofs = cxf->aead->iv.offset;\n+\t\t\tsa->iv_len = sizeof(uint64_t);\n+\t\t\tsa->pad_align = IPSEC_PAD_AES_CCM;\n+\t\t\tsa->algo_type = ALGO_TYPE_AES_CCM;\n+\t\t\tbreak;\n+\t\tcase RTE_CRYPTO_AEAD_CHACHA20_POLY1305:\n+\t\t\t/* RFC 7634 & 8439*/\n+\t\t\tsa->aad_len = sizeof(struct aead_chacha20_poly1305_aad);\n+\t\t\tsa->icv_len = cxf->aead->digest_length;\n+\t\t\tsa->iv_ofs = cxf->aead->iv.offset;\n+\t\t\tsa->iv_len = sizeof(uint64_t);\n+\t\t\tsa->pad_align = IPSEC_PAD_CHACHA20_POLY1305;\n+\t\t\tsa->algo_type = ALGO_TYPE_CHACHA20_POLY1305;\n+\t\t\tbreak;\n \t\tdefault:\n \t\t\treturn -EINVAL;\n \t\t}\n+\t} else if (cxf->auth->algo == RTE_CRYPTO_AUTH_AES_GMAC) {\n+\t\t/* RFC 4543 */\n+\t\t/* AES-GMAC is a special case of auth that needs IV */\n+\t\tsa->pad_align = IPSEC_PAD_AES_GMAC;\n+\t\tsa->iv_len = sizeof(uint64_t);\n+\t\tsa->icv_len = cxf->auth->digest_length;\n+\t\tsa->iv_ofs = cxf->auth->iv.offset;\n+\t\tsa->algo_type = ALGO_TYPE_AES_GMAC;\n+\n \t} else {\n \t\tsa->icv_len = cxf->auth->digest_length;\n \t\tsa->iv_ofs = cxf->cipher->iv.offset;\n-\t\tsa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;\n \n \t\tswitch (cxf->cipher->algo) {\n \t\tcase RTE_CRYPTO_CIPHER_NULL:\n@@ -414,6 +457,7 @@ esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,\n \t\t}\n \t}\n \n+\tsa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;\n \tsa->udata = prm->userdata;\n \tsa->spi = rte_cpu_to_be_32(prm->ipsec_xform.spi);\n \tsa->salt = prm->ipsec_xform.salt;\ndiff --git a/lib/ipsec/sa.h b/lib/ipsec/sa.h\nindex 1bffe751f5..107ebd1519 100644\n--- a/lib/ipsec/sa.h\n+++ b/lib/ipsec/sa.h\n@@ -19,7 +19,10 @@ enum {\n \tIPSEC_PAD_AES_CBC = IPSEC_MAX_IV_SIZE,\n \tIPSEC_PAD_AES_CTR = IPSEC_PAD_DEFAULT,\n \tIPSEC_PAD_AES_GCM = IPSEC_PAD_DEFAULT,\n+\tIPSEC_PAD_AES_CCM = IPSEC_PAD_DEFAULT,\n+\tIPSEC_PAD_CHACHA20_POLY1305 = IPSEC_PAD_DEFAULT,\n \tIPSEC_PAD_NULL = IPSEC_PAD_DEFAULT,\n+\tIPSEC_PAD_AES_GMAC = IPSEC_PAD_DEFAULT,\n };\n \n /* iv sizes for different algorithms */\n@@ -67,6 +70,9 @@ enum sa_algo_type\t{\n \tALGO_TYPE_AES_CBC,\n \tALGO_TYPE_AES_CTR,\n \tALGO_TYPE_AES_GCM,\n+\tALGO_TYPE_AES_CCM,\n+\tALGO_TYPE_CHACHA20_POLY1305,\n+\tALGO_TYPE_AES_GMAC,\n \tALGO_TYPE_MAX\n };\n \n",
    "prefixes": [
        "v2",
        "05/10"
    ]
}