@@ -457,6 +457,19 @@ Runtime Config Options
With the above configuration, the driver would disable custom meta aura feature
for the device ``0002:02:00.0``.
+- ``Enable custom sa for inbound inline IPsec`` (default ``0``)
+
+ Custom SA for inbound inline IPsec can be enabled by specifying ``custom_inb_sa``
+ ``devargs`` parameter. This option needs to be given to both ethdev and inline
+ device.
+
+ For example::
+
+ -a 0002:02:00.0,custom_inb_sa=1
+
+ With the above configuration, inline inbound IPsec post processing should be done
+ by the application.
+
.. note::
Above devarg parameters are configurable per device, user needs to pass the
@@ -655,6 +668,18 @@ Runtime Config Options for inline device
With the above configuration, driver would enable packet inject from ARM cores
to crypto to process and send back in Rx path.
+- ``Enable custom sa for inbound inline IPsec`` (default ``0``)
+
+ Custom SA for inbound inline IPsec can be enabled by specifying ``custom_inb_sa``
+ ``devargs`` parameter with both inline device and ethdev.
+
+ For example::
+
+ -a 0002:1d:00.0,custom_inb_sa=1
+
+ With the above configuration, inline inbound IPsec post processing should be done
+ by the application.
+
Port Representors
-----------------
@@ -473,7 +473,10 @@ struct roc_nix {
bool force_rx_aura_bp;
bool custom_meta_aura_ena;
bool rx_inj_ena;
+ bool custom_inb_sa;
uint32_t root_sched_weight;
+ uint16_t inb_cfg_param1;
+ uint16_t inb_cfg_param2;
/* End of input parameters */
/* LMT line base for "Per Core Tx LMT line" mode*/
uintptr_t lmt_base;
@@ -406,6 +406,8 @@ nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
/* CN9K SA size is different */
if (roc_model_is_cn9k())
inb_sa_sz = ROC_NIX_INL_ON_IPSEC_INB_SA_SZ;
+ else if (roc_nix->custom_inb_sa)
+ inb_sa_sz = ROC_NIX_INL_INB_CUSTOM_SA_SZ;
else
inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
@@ -910,6 +912,11 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
cfg.param1 = u.u16;
cfg.param2 = 0;
cfg.opcode = (ROC_IE_OT_MAJOR_OP_PROCESS_INBOUND_IPSEC | (1 << 6));
+
+ if (roc_nix->custom_inb_sa) {
+ cfg.param1 = roc_nix->inb_cfg_param1;
+ cfg.param2 = roc_nix->inb_cfg_param2;
+ }
rc = roc_nix_bpids_alloc(roc_nix, ROC_NIX_INTF_TYPE_CPT_NIX, 1, bpids);
if (rc > 0) {
nix->cpt_nixbpid = bpids[0];
@@ -1769,7 +1776,6 @@ roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr, void *sa_cptr,
if (roc_model_is_cn9k()) {
return 0;
}
-
if (idev)
inl_dev = idev->nix_inl_dev;
@@ -1777,6 +1783,11 @@ roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr, void *sa_cptr,
return -EINVAL;
if (roc_nix) {
+ if (inb && roc_nix->custom_inb_sa && sa_len > ROC_NIX_INL_INB_CUSTOM_SA_SZ) {
+ plt_nix_dbg("SA length: %u is more than allocated length: %u\n", sa_len,
+ ROC_NIX_INL_INB_CUSTOM_SA_SZ);
+ return -EINVAL;
+ }
nix = roc_nix_to_nix_priv(roc_nix);
outb_lf = nix->cpt_lf_base;
@@ -1891,6 +1902,7 @@ roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena, bool inb_inl_dev)
uint16_t max_spi = 0;
uint32_t rq_refs = 0;
uint8_t pkind = 0;
+ size_t inb_sa_sz;
int i;
if (roc_model_is_cn9k())
@@ -1908,6 +1920,7 @@ roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena, bool inb_inl_dev)
if (!nix->inl_inb_ena)
return 0;
sa_base = nix->inb_sa_base;
+ inb_sa_sz = nix->inb_sa_sz;
max_spi = roc_nix->ipsec_in_max_spi;
}
@@ -1919,6 +1932,7 @@ roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena, bool inb_inl_dev)
inl_dev->ts_ena = ts_ena;
max_spi = inl_dev->ipsec_in_max_spi;
sa_base = inl_dev->inb_sa_base;
+ inb_sa_sz = inl_dev->inb_sa_sz;
} else if (inl_dev->ts_ena != ts_ena) {
if (inl_dev->ts_ena)
plt_err("Inline device is already configured with TS enable");
@@ -1937,8 +1951,7 @@ roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena, bool inb_inl_dev)
return 0;
for (i = 0; i < max_spi; i++) {
- sa = ((uint8_t *)sa_base) +
- (i * ROC_NIX_INL_OT_IPSEC_INB_SA_SZ);
+ sa = ((uint8_t *)sa_base) + (i * inb_sa_sz);
((struct roc_ot_ipsec_inb_sa *)sa)->w0.s.pkind = pkind;
}
return 0;
@@ -33,6 +33,7 @@
#define ROC_NIX_INL_MAX_SOFT_EXP_RNGS \
(PLT_MAX_ETHPORTS * ROC_NIX_SOFT_EXP_PER_PORT_MAX_RINGS)
+#define ROC_NIX_INL_INB_CUSTOM_SA_SZ 512
/* Reassembly configuration */
#define ROC_NIX_INL_REAS_ACTIVE_LIMIT 0xFFF
@@ -97,6 +98,7 @@ struct roc_nix_inl_dev {
uint32_t meta_buf_sz;
uint32_t max_ipsec_rules;
uint8_t rx_inj_ena; /* Rx Inject Enable */
+ uint8_t custom_inb_sa;
/* End of input parameters */
#define ROC_NIX_INL_MEM_SZ (2048)
@@ -420,6 +420,8 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
/* CN9K SA is different */
if (roc_model_is_cn9k())
inb_sa_sz = ROC_NIX_INL_ON_IPSEC_INB_SA_SZ;
+ else if (inl_dev->custom_inb_sa)
+ inb_sa_sz = ROC_NIX_INL_INB_CUSTOM_SA_SZ;
else
inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
@@ -942,6 +944,7 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
inl_dev->nb_meta_bufs = roc_inl_dev->nb_meta_bufs;
inl_dev->meta_buf_sz = roc_inl_dev->meta_buf_sz;
inl_dev->soft_exp_poll_freq = roc_inl_dev->soft_exp_poll_freq;
+ inl_dev->custom_inb_sa = roc_inl_dev->custom_inb_sa;
if (roc_inl_dev->rx_inj_ena) {
inl_dev->rx_inj_ena = 1;
@@ -94,6 +94,7 @@ struct nix_inl_dev {
uint32_t nb_meta_bufs;
uint32_t meta_buf_sz;
uint8_t rx_inj_ena; /* Rx Inject Enable */
+ uint8_t custom_inb_sa;
/* NPC */
int *ipsec_index;
@@ -36,7 +36,7 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
if (!dev->ptype_disable)
flags |= NIX_RX_OFFLOAD_PTYPE_F;
- if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && !dev->nix.custom_inb_sa)
flags |= NIX_RX_OFFLOAD_SECURITY_F;
return flags;
@@ -754,6 +754,9 @@ cn10k_eth_sec_session_create(void *device,
else if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
return -ENOTSUP;
+ if (nix->custom_inb_sa)
+ return -ENOTSUP;
+
if (rte_security_dynfield_register() < 0)
return -ENOTSUP;
@@ -1038,6 +1041,8 @@ cn10k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
return cnxk_eth_macsec_session_destroy(dev, sess);
return -ENOENT;
}
+ if (dev->nix.custom_inb_sa)
+ return -ENOTSUP;
lock = eth_sec->inb ? &dev->inb.lock : &dev->outb.lock;
rte_spinlock_lock(lock);
@@ -1269,6 +1269,9 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
dev->rx_offloads = rxmode->offloads;
dev->tx_offloads = txmode->offloads;
+ if (nix->custom_inb_sa)
+ dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
+
/* Prepare rx cfg */
rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
if (dev->rx_offloads &
@@ -281,6 +281,7 @@ parse_val_u16(const char *key, const char *value, void *extra_args)
#define CNXK_FLOW_AGING_POLL_FREQ "aging_poll_freq"
#define CNXK_NIX_RX_INJ_ENABLE "rx_inj_ena"
#define CNXK_CUSTOM_META_AURA_DIS "custom_meta_aura_dis"
+#define CNXK_CUSTOM_INB_SA "custom_inb_sa"
int
cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
@@ -304,6 +305,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
uint16_t scalar_enable = 0;
uint16_t tx_compl_ena = 0;
uint16_t custom_sa_act = 0;
+ uint8_t custom_inb_sa = 0;
struct rte_kvargs *kvlist;
uint32_t meta_buf_sz = 0;
uint16_t no_inl_dev = 0;
@@ -362,6 +364,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
rte_kvargs_process(kvlist, CNXK_NIX_RX_INJ_ENABLE, &parse_flag, &rx_inj_ena);
rte_kvargs_process(kvlist, CNXK_CUSTOM_META_AURA_DIS, &parse_flag,
&custom_meta_aura_dis);
+ rte_kvargs_process(kvlist, CNXK_CUSTOM_INB_SA, &parse_flag, &custom_inb_sa);
rte_kvargs_free(kvlist);
null_devargs:
@@ -381,6 +384,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
dev->nix.lock_rx_ctx = lock_rx_ctx;
dev->nix.custom_sa_action = custom_sa_act;
dev->nix.sqb_slack = sqb_slack;
+ dev->nix.custom_inb_sa = custom_inb_sa;
if (roc_feature_nix_has_own_meta_aura())
dev->nix.meta_buf_sz = meta_buf_sz;
@@ -19,6 +19,7 @@
#define CNXK_NIX_SOFT_EXP_POLL_FREQ "soft_exp_poll_freq"
#define CNXK_MAX_IPSEC_RULES "max_ipsec_rules"
#define CNXK_NIX_INL_RX_INJ_ENABLE "rx_inj_ena"
+#define CNXK_NIX_CUSTOM_INB_SA "custom_inb_sa"
/* Default soft expiry poll freq in usec */
#define CNXK_NIX_SOFT_EXP_POLL_FREQ_DFLT 100
@@ -198,7 +199,7 @@ parse_max_ipsec_rules(const char *key, const char *value, void *extra_args)
}
static int
-parse_inl_rx_inj_ena(const char *key, const char *value, void *extra_args)
+parse_val_u8(const char *key, const char *value, void *extra_args)
{
RTE_SET_USED(key);
uint32_t val;
@@ -412,6 +413,16 @@ rte_pmd_cnxk_inl_ipsec_res(struct rte_mbuf *mbuf)
return (void *)(wqe + 64 + desc_size);
}
+void
+rte_pmd_cnxk_hw_inline_inb_cfg_set(uint16_t portid, struct rte_pmd_cnxk_ipsec_inb_cfg *cfg)
+{
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[portid];
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+ dev->nix.inb_cfg_param1 = cfg->param1;
+ dev->nix.inb_cfg_param2 = cfg->param2;
+}
+
static unsigned int
cnxk_eth_sec_session_get_size(void *device __rte_unused)
{
@@ -481,6 +492,7 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
struct inl_cpt_channel cpt_channel;
uint32_t max_ipsec_rules = 0;
struct rte_kvargs *kvlist;
+ uint8_t custom_inb_sa = 0;
uint32_t nb_meta_bufs = 0;
uint32_t meta_buf_sz = 0;
uint8_t rx_inj_ena = 0;
@@ -510,7 +522,8 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
rte_kvargs_process(kvlist, CNXK_NIX_SOFT_EXP_POLL_FREQ,
&parse_val_u32, &soft_exp_poll_freq);
rte_kvargs_process(kvlist, CNXK_MAX_IPSEC_RULES, &parse_max_ipsec_rules, &max_ipsec_rules);
- rte_kvargs_process(kvlist, CNXK_NIX_INL_RX_INJ_ENABLE, &parse_inl_rx_inj_ena, &rx_inj_ena);
+ rte_kvargs_process(kvlist, CNXK_NIX_INL_RX_INJ_ENABLE, &parse_val_u8, &rx_inj_ena);
+ rte_kvargs_process(kvlist, CNXK_NIX_CUSTOM_INB_SA, &parse_val_u8, &custom_inb_sa);
rte_kvargs_free(kvlist);
null_devargs:
@@ -526,6 +539,7 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
inl_dev->max_ipsec_rules = max_ipsec_rules;
if (roc_feature_nix_has_rx_inject())
inl_dev->rx_inj_ena = rx_inj_ena;
+ inl_dev->custom_inb_sa = custom_inb_sa;
return 0;
exit:
return -EINVAL;
@@ -654,4 +668,5 @@ RTE_PMD_REGISTER_PARAM_STRING(cnxk_nix_inl,
CNXK_NIX_INL_META_BUF_SZ "=<1-U32_MAX>"
CNXK_NIX_SOFT_EXP_POLL_FREQ "=<0-U32_MAX>"
CNXK_MAX_IPSEC_RULES "=<1-4095>"
- CNXK_NIX_INL_RX_INJ_ENABLE "=1");
+ CNXK_NIX_INL_RX_INJ_ENABLE "=1"
+ CNXK_NIX_CUSTOM_INB_SA "=1");
@@ -49,13 +49,13 @@ enum rte_pmd_cnxk_sec_action_alg {
/** CPT queue type for obtaining queue hardware statistics. */
enum rte_pmd_cnxk_cpt_q_stats_type {
- /** Type to get Inline Device LF(s) statistics */
+ /** Type to get Inline Device queue(s) statistics */
RTE_PMD_CNXK_CPT_Q_STATS_INL_DEV,
- /** Type to get Inline Inbound LF which is attached to kernel device
+ /** Type to get Inline Inbound queue which is attached to kernel device
* statistics.
*/
RTE_PMD_CNXK_CPT_Q_STATS_KERNEL,
- /** Type to get CPT LF which is attached to ethdev statistics */
+ /** Type to get CPT queue which is attached to ethdev statistics */
RTE_PMD_CNXK_CPT_Q_STATS_ETHDEV = 2,
};
@@ -513,6 +513,18 @@ union rte_pmd_cnxk_cpt_res_s {
uint64_t u64[2];
};
+/** Inline IPsec inbound queue configuration */
+struct rte_pmd_cnxk_ipsec_inb_cfg {
+ /** Param1 of PROCESS_INBOUND_IPSEC_PACKET as mentioned in the CPT
+ * microcode document.
+ */
+ uint16_t param1;
+ /** Param2 of PROCESS_INBOUND_IPSEC_PACKET as mentioned in the CPT
+ * microcode document.
+ */
+ uint16_t param2;
+};
+
/** Forward structure declaration for inline device queue. Applications obtain a pointer
* to this structure using the ``rte_pmd_cnxk_inl_dev_qptr_get`` API and use it to submit
* CPT instructions (cpt_inst_s) to the inline device via the
@@ -656,4 +668,16 @@ uint16_t rte_pmd_cnxk_inl_dev_submit(struct rte_pmd_cnxk_inl_dev_q *qptr, void *
__rte_experimental
int rte_pmd_cnxk_cpt_q_stats_get(uint16_t portid, enum rte_pmd_cnxk_cpt_q_stats_type type,
struct rte_pmd_cnxk_cpt_q_stats *stats, uint16_t idx);
+
+/**
+ * Set the configuration for hardware inline inbound IPsec processing. This API must be
+ * called before calling the ``rte_eth_dev_configure`` API.
+ *
+ * @param portid
+ * Port identifier of Ethernet device.
+ * @param cfg
+ * Pointer to the IPsec inbound configuration structure.
+ */
+__rte_experimental
+void rte_pmd_cnxk_hw_inline_inb_cfg_set(uint16_t portid, struct rte_pmd_cnxk_ipsec_inb_cfg *cfg);
#endif /* _PMD_CNXK_H_ */
@@ -11,6 +11,7 @@ EXPERIMENTAL {
# added in 23.11
rte_pmd_cnxk_cpt_q_stats_get;
+ rte_pmd_cnxk_hw_inline_inb_cfg_set;
rte_pmd_cnxk_hw_session_base_get;
rte_pmd_cnxk_inl_dev_qptr_get;
rte_pmd_cnxk_inl_dev_submit;