@@ -56,7 +56,8 @@ The public API headers are grouped by topics:
[dpaa2_qdma] (@ref rte_pmd_dpaa2_qdma.h),
[crypto_scheduler] (@ref rte_cryptodev_scheduler.h),
[dlb2] (@ref rte_pmd_dlb2.h),
- [ifpga] (@ref rte_pmd_ifpga.h)
+ [ifpga] (@ref rte_pmd_ifpga.h),
+ [cnxk] (@ref rte_pmd_cnxk.h)
- **memory**:
[memseg] (@ref rte_memory.h),
@@ -12,6 +12,7 @@ INPUT = @TOPDIR@/doc/api/doxy-api-index.md \
@TOPDIR@/drivers/net/ark \
@TOPDIR@/drivers/net/bnxt \
@TOPDIR@/drivers/net/bonding \
+ @TOPDIR@/drivers/net/cnxk \
@TOPDIR@/drivers/net/dpaa \
@TOPDIR@/drivers/net/dpaa2 \
@TOPDIR@/drivers/net/i40e \
@@ -6,6 +6,7 @@
#include <rte_eventdev.h>
#include <rte_security.h>
#include <rte_security_driver.h>
+#include <rte_pmd_cnxk.h>
#include <cn10k_ethdev.h>
#include <cnxk_security.h>
@@ -502,7 +503,7 @@ cn10k_eth_sec_session_create(void *device,
ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD);
/* Alloc an sa index */
- rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx);
+ rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx, ipsec->spi);
if (rc)
goto mempool_put;
@@ -657,6 +658,109 @@ cn10k_eth_sec_capabilities_get(void *device __rte_unused)
return cn10k_eth_sec_capabilities;
}
+static int
+cn10k_eth_sec_session_update(void *device, struct rte_security_session *sess,
+ struct rte_security_session_conf *conf)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_ot_ipsec_inb_sa *inb_sa_dptr;
+ struct rte_security_ipsec_xform *ipsec;
+ struct rte_crypto_sym_xform *crypto;
+ struct cnxk_eth_sec_sess *eth_sec;
+ bool inbound;
+ int rc;
+
+ if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
+ conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
+ return -ENOENT;
+
+ ipsec = &conf->ipsec;
+ crypto = conf->crypto_xform;
+ inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
+
+ eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
+ if (!eth_sec)
+ return -ENOENT;
+
+ eth_sec->spi = conf->ipsec.spi;
+
+ if (inbound) {
+ inb_sa_dptr = (struct roc_ot_ipsec_inb_sa *)dev->inb.sa_dptr;
+ memset(inb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_inb_sa));
+
+ rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto,
+ true);
+ if (rc)
+ return -EINVAL;
+
+ rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa_dptr, eth_sec->sa,
+ eth_sec->inb,
+ sizeof(struct roc_ot_ipsec_inb_sa));
+ if (rc)
+ return -EINVAL;
+ } else {
+ struct roc_ot_ipsec_outb_sa *outb_sa_dptr;
+
+ outb_sa_dptr = (struct roc_ot_ipsec_outb_sa *)dev->outb.sa_dptr;
+ memset(outb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_outb_sa));
+
+ rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa_dptr, ipsec, crypto);
+ if (rc)
+ return -EINVAL;
+ rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa_dptr, eth_sec->sa,
+ eth_sec->inb,
+ sizeof(struct roc_ot_ipsec_outb_sa));
+ if (rc)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_cnxk_hw_sa_read(void *device, struct rte_security_session *sess,
+ void *data, uint32_t len)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_eth_sec_sess *eth_sec;
+ int rc;
+
+ eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
+ if (eth_sec == NULL)
+ return -EINVAL;
+
+ rc = roc_nix_inl_sa_sync(&dev->nix, eth_sec->sa, eth_sec->inb,
+ ROC_NIX_INL_SA_OP_FLUSH);
+ if (rc)
+ return -EINVAL;
+ rte_delay_ms(1);
+ memcpy(data, eth_sec->sa, len);
+
+ return 0;
+}
+
+int
+rte_pmd_cnxk_hw_sa_write(void *device, struct rte_security_session *sess,
+ void *data, uint32_t len)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_eth_sec_sess *eth_sec;
+ int rc = -EINVAL;
+
+ eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
+ if (eth_sec == NULL)
+ return rc;
+ rc = roc_nix_inl_ctx_write(&dev->nix, data, eth_sec->sa, eth_sec->inb,
+ len);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
void
cn10k_eth_sec_ops_override(void)
{
@@ -670,4 +774,5 @@ cn10k_eth_sec_ops_override(void)
cnxk_eth_sec_ops.session_create = cn10k_eth_sec_session_create;
cnxk_eth_sec_ops.session_destroy = cn10k_eth_sec_session_destroy;
cnxk_eth_sec_ops.capabilities_get = cn10k_eth_sec_capabilities_get;
+ cnxk_eth_sec_ops.session_update = cn10k_eth_sec_session_update;
}
@@ -739,6 +739,12 @@ cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
/* Update HW erratas */
if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
dev->cq_min_4k = 1;
+
+ if (dev->nix.custom_sa_action) {
+ dev->nix.custom_sa_action = 0;
+ plt_info("WARNING: Custom SA action is enabled. It's not supported"
+ " on cn9k device. Disabling it");
+ }
return 0;
}
@@ -262,7 +262,7 @@ cn9k_eth_sec_session_create(void *device,
ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD);
/* Alloc an sa index */
- rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx);
+ rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx, 0);
if (rc)
goto mempool_put;
@@ -628,7 +628,8 @@ int cnxk_ethdev_parse_devargs(struct rte_devargs *devargs,
int cnxk_nix_dev_get_reg(struct rte_eth_dev *eth_dev,
struct rte_dev_reg_info *regs);
/* Security */
-int cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p);
+int cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p,
+ uint32_t spi);
int cnxk_eth_outb_sa_idx_put(struct cnxk_eth_dev *dev, uint32_t idx);
int cnxk_nix_lookup_mem_sa_base_set(struct cnxk_eth_dev *dev);
int cnxk_nix_lookup_mem_sa_base_clear(struct cnxk_eth_dev *dev);
@@ -29,7 +29,8 @@ bitmap_ctzll(uint64_t slab)
}
int
-cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p)
+cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p,
+ uint32_t spi)
{
uint32_t pos, idx;
uint64_t slab;
@@ -42,17 +43,24 @@ cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p)
slab = 0;
/* Scan from the beginning */
plt_bitmap_scan_init(dev->outb.sa_bmap);
- /* Scan bitmap to get the free sa index */
- rc = plt_bitmap_scan(dev->outb.sa_bmap, &pos, &slab);
- /* Empty bitmap */
- if (rc == 0) {
- plt_err("Outbound SA' exhausted, use 'ipsec_out_max_sa' "
- "devargs to increase");
- return -ERANGE;
- }
- /* Get free SA index */
- idx = pos + bitmap_ctzll(slab);
+ if (dev->nix.custom_sa_action) {
+ if (spi > dev->outb.max_sa)
+ return -ENOTSUP;
+ idx = spi;
+ } else {
+ /* Scan bitmap to get the free sa index */
+ rc = plt_bitmap_scan(dev->outb.sa_bmap, &pos, &slab);
+ /* Empty bitmap */
+ if (rc == 0) {
+ plt_err("Outbound SA' exhausted, use 'ipsec_out_max_sa' "
+ "devargs to increase");
+ return -ERANGE;
+ }
+
+ /* Get free SA index */
+ idx = pos + bitmap_ctzll(slab);
+ }
plt_bitmap_clear(dev->outb.sa_bmap, idx);
*idx_p = idx;
return 0;
@@ -205,6 +205,7 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
case RTE_FLOW_ACTION_TYPE_SECURITY:
in_actions[i].type = ROC_NPC_ACTION_TYPE_SEC;
+ in_actions[i].conf = actions->conf;
break;
case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
in_actions[i].type = ROC_NPC_ACTION_TYPE_VLAN_STRIP;
@@ -192,3 +192,5 @@ foreach flag: extra_flags
cflags += flag
endif
endforeach
+
+headers = files('rte_pmd_cnxk.h')
new file mode 100644
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2022 Marvell.
+ */
+
+/**
+ * @file rte_pmd_cnxk.h
+ * CNXK PMD specific functions.
+ *
+ **/
+
+#ifndef _PMD_CNXK_H_
+#define _PMD_CNXK_H_
+
+#include <rte_compat.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_security.h>
+
+/** Algorithm type to be used with security action to
+ * calculate SA_index
+ */
+enum rte_pmd_cnxk_sec_action_alg {
+ /** No swizzling of SPI bits into SA index.
+ * SA_index is from SA_XOR if enabled.
+ */
+ RTE_PMD_CNXK_SEC_ACTION_ALG0,
+ /** SPI<31:28> has 4 upper bits which segment the sequence number space.
+ * Initial SA_index is from SA_XOR if enabled.
+ * SA_alg = { 4'b0, SA_mcam[27:0] + SPI[31:28]}
+ */
+ RTE_PMD_CNXK_SEC_ACTION_ALG1,
+ /** SPI<27:25> segment the sequence number space.
+ * Initial SA_index is from SA_XOR if enabled.
+ * SA_alg = { 7'b0, SA_mcam[24:0] + SPI[27:25]}
+ */
+ RTE_PMD_CNXK_SEC_ACTION_ALG2,
+ /** The inbound SPI maybe "random", therefore we want the MCAM to be
+ * capable of remapping the SPI to an arbitrary SA_index.
+ * SPI to SA is done using a lookup in NIX/NPC cam entry with key as
+ * SPI, MATCH_ID, LFID.
+ */
+ RTE_PMD_CNXK_SEC_ACTION_ALG3,
+};
+
+struct rte_pmd_cnxk_sec_action {
+ /** Used as lookup result for ALG3 */
+ uint32_t sa_index;
+ /** When true XOR initial SA_INDEX with SA_HI/SA_LO to get SA_MCAM */
+ bool sa_xor;
+ /** SA_hi and SA_lo values for xor */
+ uint16_t sa_hi, sa_lo;
+ /** Determines alg to be applied post SA_MCAM computation with/without
+ * XOR.
+ */
+ enum rte_pmd_cnxk_sec_action_alg alg;
+};
+
+/**
+ * Read HW SA context from session.
+ *
+ * @param device
+ * Port identifier of Ethernet device.
+ * @param sess
+ * Handle of the security session.
+ * @param[out] data
+ * Destination pointer to copy SA context for application.
+ * @param len
+ * Length of SA context to copy into data parameter.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+__rte_experimental
+int rte_pmd_cnxk_hw_sa_read(void *device, struct rte_security_session *sess,
+ void *data, uint32_t len);
+/**
+ * Write HW SA context to session.
+ *
+ * @param device
+ * Port identifier of Ethernet device.
+ * @param sess
+ * Handle of the security session.
+ * @param[in] data
+ * Source data pointer from application to copy SA context into session.
+ * @param len
+ * Length of SA context to copy from data parameter.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+__rte_experimental
+int rte_pmd_cnxk_hw_sa_write(void *device, struct rte_security_session *sess,
+ void *data, uint32_t len);
+#endif /* _PMD_CNXK_H_ */
@@ -6,3 +6,10 @@ INTERNAL {
global:
cnxk_nix_inb_mode_set;
};
+
+EXPERIMENTAL {
+ # added in 22.07
+ global:
+ rte_pmd_cnxk_hw_sa_read;
+ rte_pmd_cnxk_hw_sa_write;
+};