@@ -694,7 +694,7 @@ cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
}
static void
-cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
+cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem, uint64_t meta_aura)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
int i;
@@ -703,6 +703,8 @@ cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
struct cn10k_sso_hws *ws = event_dev->data->ports[i];
ws->lookup_mem = lookup_mem;
ws->tstamp = dev->tstamp;
+ if (meta_aura)
+ ws->meta_aura = meta_aura;
}
}
@@ -713,6 +715,7 @@ cn10k_sso_rx_adapter_queue_add(
const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
{
struct cn10k_eth_rxq *rxq;
+ uint64_t meta_aura;
void *lookup_mem;
int rc;
@@ -726,7 +729,8 @@ cn10k_sso_rx_adapter_queue_add(
return -EINVAL;
rxq = eth_dev->data->rx_queues[0];
lookup_mem = rxq->lookup_mem;
- cn10k_sso_set_priv_mem(event_dev, lookup_mem);
+ meta_aura = rxq->meta_aura;
+ cn10k_sso_set_priv_mem(event_dev, lookup_mem, meta_aura);
cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
return 0;
@@ -127,12 +127,14 @@ cn10k_sso_process_tstamp(uint64_t u64, uint64_t mbuf,
}
static __rte_always_inline void
-cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
- void *lookup_mem, void *tstamp, uintptr_t lbase)
+cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, struct cn10k_sso_hws *ws)
{
uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM;
+ struct cnxk_timesync_info *tstamp = ws->tstamp[port_id];
+ void *lookup_mem = ws->lookup_mem;
+ uintptr_t lbase = ws->lmt_base;
struct rte_event_vector *vec;
- uint64_t aura_handle, laddr;
+ uint64_t meta_aura, laddr;
uint16_t nb_mbufs, non_vec;
uint16_t lmt_id, d_off;
struct rte_mbuf **wqe;
@@ -153,25 +155,31 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
if (flags & NIX_RX_OFFLOAD_TSTAMP_F && tstamp)
mbuf_init |= 8;
+ meta_aura = ws->meta_aura;
nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP);
nb_mbufs = cn10k_nix_recv_pkts_vector(&mbuf_init, wqe, nb_mbufs,
- flags | NIX_RX_VWQE_F, lookup_mem,
- tstamp, lbase);
+ flags | NIX_RX_VWQE_F,
+ lookup_mem, tstamp,
+ lbase, meta_aura);
wqe += nb_mbufs;
non_vec = vec->nb_elem - nb_mbufs;
if (flags & NIX_RX_OFFLOAD_SECURITY_F && non_vec) {
+ uint64_t sg_w1;
+
mbuf = (struct rte_mbuf *)((uintptr_t)wqe[0] -
sizeof(struct rte_mbuf));
/* Pick first mbuf's aura handle assuming all
* mbufs are from a vec and are from same RQ.
*/
- aura_handle = mbuf->pool->pool_id;
+ meta_aura = ws->meta_aura;
+ if (!meta_aura)
+ meta_aura = mbuf->pool->pool_id;
ROC_LMT_BASE_ID_GET(lbase, lmt_id);
laddr = lbase;
laddr += 8;
- d_off = ((uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf);
- d_off += (mbuf_init & 0xFFFF);
+ sg_w1 = *(uint64_t *)(((uintptr_t)wqe[0]) + 72);
+ d_off = sg_w1 - (uintptr_t)mbuf;
sa_base = cnxk_nix_sa_base_get(mbuf_init >> 48, lookup_mem);
sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
}
@@ -208,7 +216,7 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
/* Free remaining meta buffers if any */
if (flags & NIX_RX_OFFLOAD_SECURITY_F && loff) {
- nix_sec_flush_meta(laddr, lmt_id, loff, aura_handle);
+ nix_sec_flush_meta(laddr, lmt_id, loff, meta_aura);
plt_io_wmb();
}
}
@@ -241,8 +249,7 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
uint64_t cq_w5;
m = (struct rte_mbuf *)mbuf;
- d_off = (uintptr_t)(m->buf_addr) - (uintptr_t)m;
- d_off += RTE_PKTMBUF_HEADROOM;
+ d_off = (*(uint64_t *)(u64[1] + 72)) - (uintptr_t)m;
cq_w1 = *(uint64_t *)(u64[1] + 8);
cq_w5 = *(uint64_t *)(u64[1] + 40);
@@ -273,8 +280,7 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
vwqe_hdr = ((vwqe_hdr >> 64) & 0xFFF) | BIT_ULL(31) |
((vwqe_hdr & 0xFFFF) << 48) | ((uint64_t)port << 32);
*(uint64_t *)u64[1] = (uint64_t)vwqe_hdr;
- cn10k_process_vwqe(u64[1], port, flags, ws->lookup_mem,
- ws->tstamp[port], ws->lmt_base);
+ cn10k_process_vwqe(u64[1], port, flags, ws);
/* Mark vector mempool object as get */
RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void *)u64[1]),
(void **)&u64[1], 1, 1);
@@ -148,6 +148,7 @@ struct cn10k_sso_hws {
uint8_t hws_id;
/* PTP timestamp */
struct cnxk_timesync_info **tstamp;
+ uint64_t meta_aura;
/* Add Work Fastpath data */
uint64_t xaq_lmt __rte_cache_aligned;
uint64_t *fc_mem;
@@ -194,7 +194,7 @@ cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
rq->vwqe_ena = 1;
rq->vwqe_first_skip = 0;
- rq->vwqe_aura_handle = roc_npa_aura_handle_to_aura(vmp->pool_id);
+ rq->vwqe_aura_handle = vmp->pool_id;
rq->vwqe_max_sz_exp = rte_log2_u32(sz);
rq->vwqe_wait_tmo =
tmo_ns /
@@ -282,9 +282,13 @@ cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
rxq->lmt_base = dev->nix.lmt_base;
rxq->sa_base = roc_nix_inl_inb_sa_base_get(&dev->nix,
dev->inb.inl_dev);
+ rxq->meta_aura = rq->meta_aura_handle;
+ rxq_sp = cnxk_eth_rxq_to_sp(rxq);
+ /* Assume meta packet from normal aura if meta aura is not setup
+ */
+ if (!rxq->meta_aura)
+ rxq->meta_aura = rxq_sp->qconf.mp->pool_id;
}
- rxq_sp = cnxk_eth_rxq_to_sp(rxq);
- rxq->aura_handle = rxq_sp->qconf.mp->pool_id;
/* Lookup mem */
rxq->lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
@@ -39,7 +39,7 @@ struct cn10k_eth_rxq {
uint16_t data_off;
uint64_t sa_base;
uint64_t lmt_base;
- uint64_t aura_handle;
+ uint64_t meta_aura;
uint16_t rq;
struct cnxk_timesync_info *tstamp;
} __plt_cache_aligned;
@@ -877,7 +877,7 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
- aura_handle = rxq->aura_handle;
+ aura_handle = rxq->meta_aura;
sa_base = rxq->sa_base;
sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
ROC_LMT_BASE_ID_GET(lbase, lmt_id);
@@ -984,7 +984,7 @@ static __rte_always_inline uint16_t
cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
const uint16_t flags, void *lookup_mem,
struct cnxk_timesync_info *tstamp,
- uintptr_t lmt_base)
+ uintptr_t lmt_base, uint64_t meta_aura)
{
struct cn10k_eth_rxq *rxq = args;
const uint64_t mbuf_initializer = (flags & NIX_RX_VWQE_F) ?
@@ -1003,10 +1003,10 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
uint64x2_t rearm2 = vdupq_n_u64(mbuf_initializer);
uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer);
struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
- uint64_t aura_handle, lbase, laddr;
uint8_t loff = 0, lnum = 0, shft = 0;
uint8x16_t f0, f1, f2, f3;
uint16_t lmt_id, d_off;
+ uint64_t lbase, laddr;
uint16_t packets = 0;
uint16_t pkts_left;
uintptr_t sa_base;
@@ -1035,6 +1035,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
if (flags & NIX_RX_VWQE_F) {
+ uint64_t sg_w1;
uint16_t port;
mbuf0 = (struct rte_mbuf *)((uintptr_t)mbufs[0] -
@@ -1042,10 +1043,15 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
/* Pick first mbuf's aura handle assuming all
* mbufs are from a vec and are from same RQ.
*/
- aura_handle = mbuf0->pool->pool_id;
+ if (!meta_aura)
+ meta_aura = mbuf0->pool->pool_id;
/* Calculate offset from mbuf to actual data area */
- d_off = ((uintptr_t)mbuf0->buf_addr - (uintptr_t)mbuf0);
- d_off += (mbuf_initializer & 0xFFFF);
+ /* Zero aura's first skip i.e mbuf setup might not match the actual
+ * offset as first skip is taken from second pass RQ. So compute
+ * using diff b/w first SG pointer and mbuf addr.
+ */
+ sg_w1 = *(uint64_t *)((uintptr_t)mbufs[0] + 72);
+ d_off = (sg_w1 - (uint64_t)mbuf0);
/* Get SA Base from lookup tbl using port_id */
port = mbuf_initializer >> 48;
@@ -1053,7 +1059,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
lbase = lmt_base;
} else {
- aura_handle = rxq->aura_handle;
+ meta_aura = rxq->meta_aura;
d_off = rxq->data_off;
sa_base = rxq->sa_base;
lbase = rxq->lmt_base;
@@ -1721,7 +1727,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
/* Update aura handle */
*(uint64_t *)(laddr - 8) =
(((uint64_t)(15 & 0x1) << 32) |
- roc_npa_aura_handle_to_aura(aura_handle));
+ roc_npa_aura_handle_to_aura(meta_aura));
loff = loff - 15;
shft += 3;
@@ -1744,14 +1750,14 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
/* Update aura handle */
*(uint64_t *)(laddr - 8) =
(((uint64_t)(loff & 0x1) << 32) |
- roc_npa_aura_handle_to_aura(aura_handle));
+ roc_npa_aura_handle_to_aura(meta_aura));
data = (data & ~(0x7UL << shft)) |
(((uint64_t)loff >> 1) << shft);
/* Send up to 16 lmt lines of pointers */
nix_sec_flush_meta_burst(lmt_id, data, lnum + 1,
- aura_handle);
+ meta_aura);
rte_io_wmb();
lnum = 0;
loff = 0;
@@ -1769,13 +1775,13 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
/* Update aura handle */
*(uint64_t *)(laddr - 8) =
(((uint64_t)(loff & 0x1) << 32) |
- roc_npa_aura_handle_to_aura(aura_handle));
+ roc_npa_aura_handle_to_aura(meta_aura));
data = (data & ~(0x7UL << shft)) |
(((uint64_t)loff >> 1) << shft);
/* Send up to 16 lmt lines of pointers */
- nix_sec_flush_meta_burst(lmt_id, data, lnum + 1, aura_handle);
+ nix_sec_flush_meta_burst(lmt_id, data, lnum + 1, meta_aura);
if (flags & NIX_RX_VWQE_F)
plt_io_wmb();
}
@@ -1803,7 +1809,7 @@ static inline uint16_t
cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
const uint16_t flags, void *lookup_mem,
struct cnxk_timesync_info *tstamp,
- uintptr_t lmt_base)
+ uintptr_t lmt_base, uint64_t meta_aura)
{
RTE_SET_USED(args);
RTE_SET_USED(mbufs);
@@ -1812,6 +1818,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
RTE_SET_USED(lookup_mem);
RTE_SET_USED(tstamp);
RTE_SET_USED(lmt_base);
+ RTE_SET_USED(meta_aura);
return 0;
}
@@ -2038,7 +2045,7 @@ NIX_RX_FASTPATH_MODES
void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts) \
{ \
return cn10k_nix_recv_pkts_vector(rx_queue, rx_pkts, pkts, \
- (flags), NULL, NULL, 0); \
+ (flags), NULL, NULL, 0, 0); \
}
#define NIX_RX_RECV_VEC_MSEG(fn, flags) \
@@ -1732,6 +1732,9 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
roc_nix_mac_link_info_get_cb_register(nix,
cnxk_eth_dev_link_status_get_cb);
+ /* Register callback for inline meta pool create */
+ roc_nix_inl_meta_pool_cb_register(cnxk_nix_inl_meta_pool_cb);
+
dev->eth_dev = eth_dev;
dev->configured = 0;
dev->ptype_disable = 0;
@@ -642,6 +642,8 @@ struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_spi(struct cnxk_eth_dev *dev,
struct cnxk_eth_sec_sess *
cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
struct rte_security_session *sess);
+int cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs,
+ bool destroy);
/* Other private functions */
int nix_recalc_mtu(struct rte_eth_dev *eth_dev);
@@ -4,10 +4,14 @@
#include <cnxk_ethdev.h>
+#define CNXK_NIX_INL_META_POOL_NAME "NIX_INL_META_POOL"
+
#define CNXK_NIX_INL_SELFTEST "selftest"
#define CNXK_NIX_INL_IPSEC_IN_MIN_SPI "ipsec_in_min_spi"
#define CNXK_NIX_INL_IPSEC_IN_MAX_SPI "ipsec_in_max_spi"
#define CNXK_INL_CPT_CHANNEL "inl_cpt_channel"
+#define CNXK_NIX_INL_NB_META_BUFS "nb_meta_bufs"
+#define CNXK_NIX_INL_META_BUF_SZ "meta_buf_sz"
struct inl_cpt_channel {
bool is_multi_channel;
@@ -28,6 +32,85 @@ bitmap_ctzll(uint64_t slab)
return __builtin_ctzll(slab);
}
+int
+cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs, bool destroy)
+{
+ const char *mp_name = CNXK_NIX_INL_META_POOL_NAME;
+ struct rte_pktmbuf_pool_private mbp_priv;
+ struct npa_aura_s *aura;
+ struct rte_mempool *mp;
+ uint16_t first_skip;
+ int rc;
+
+ /* Destroy the mempool if requested */
+ if (destroy) {
+ mp = rte_mempool_lookup(mp_name);
+ if (!mp)
+ return -ENOENT;
+
+ if (mp->pool_id != *aura_handle) {
+ plt_err("Meta pool aura mismatch");
+ return -EINVAL;
+ }
+
+ plt_free(mp->pool_config);
+ rte_mempool_free(mp);
+
+ *aura_handle = 0;
+ return 0;
+ }
+
+ /* Need to make it similar to rte_pktmbuf_pool() for sake of OOP
+ * support.
+ */
+ mp = rte_mempool_create_empty(mp_name, nb_bufs, buf_sz, 0,
+ sizeof(struct rte_pktmbuf_pool_private),
+ SOCKET_ID_ANY, 0);
+ if (!mp) {
+ plt_err("Failed to create inline meta pool");
+ return -EIO;
+ }
+
+ /* Indicate to allocate zero aura */
+ aura = plt_zmalloc(sizeof(struct npa_aura_s), 0);
+ if (!aura) {
+ rc = -ENOMEM;
+ goto free_mp;
+ }
+ aura->ena = 1;
+ aura->pool_addr = 0x0;
+
+ rc = rte_mempool_set_ops_byname(mp, rte_mbuf_platform_mempool_ops(),
+ aura);
+ if (rc) {
+ plt_err("Failed to setup mempool ops for meta, rc=%d", rc);
+ goto free_aura;
+ }
+
+ /* Init mempool private area */
+ first_skip = sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
+ memset(&mbp_priv, 0, sizeof(mbp_priv));
+ mbp_priv.mbuf_data_room_size = (buf_sz - first_skip +
+ RTE_PKTMBUF_HEADROOM);
+ rte_pktmbuf_pool_init(mp, &mbp_priv);
+
+ /* Populate buffer */
+ rc = rte_mempool_populate_default(mp);
+ if (rc < 0) {
+ plt_err("Failed to create inline meta pool, rc=%d", rc);
+ goto free_aura;
+ }
+
+ rte_mempool_obj_iter(mp, rte_pktmbuf_init, NULL);
+ *aura_handle = mp->pool_id;
+ return 0;
+free_aura:
+ plt_free(aura);
+free_mp:
+ rte_mempool_free(mp);
+ return rc;
+}
+
int
cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p,
uint32_t spi)
@@ -128,7 +211,7 @@ struct rte_security_ops cnxk_eth_sec_ops = {
};
static int
-parse_ipsec_in_spi_range(const char *key, const char *value, void *extra_args)
+parse_val_u32(const char *key, const char *value, void *extra_args)
{
RTE_SET_USED(key);
uint32_t val;
@@ -184,6 +267,8 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
uint32_t ipsec_in_min_spi = 0;
struct inl_cpt_channel cpt_channel;
struct rte_kvargs *kvlist;
+ uint32_t nb_meta_bufs = 0;
+ uint32_t meta_buf_sz = 0;
uint8_t selftest = 0;
memset(&cpt_channel, 0, sizeof(cpt_channel));
@@ -198,11 +283,15 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
rte_kvargs_process(kvlist, CNXK_NIX_INL_SELFTEST, &parse_selftest,
&selftest);
rte_kvargs_process(kvlist, CNXK_NIX_INL_IPSEC_IN_MIN_SPI,
- &parse_ipsec_in_spi_range, &ipsec_in_min_spi);
+ &parse_val_u32, &ipsec_in_min_spi);
rte_kvargs_process(kvlist, CNXK_NIX_INL_IPSEC_IN_MAX_SPI,
- &parse_ipsec_in_spi_range, &ipsec_in_max_spi);
+ &parse_val_u32, &ipsec_in_max_spi);
rte_kvargs_process(kvlist, CNXK_INL_CPT_CHANNEL, &parse_inl_cpt_channel,
&cpt_channel);
+ rte_kvargs_process(kvlist, CNXK_NIX_INL_NB_META_BUFS, &parse_val_u32,
+ &nb_meta_bufs);
+ rte_kvargs_process(kvlist, CNXK_NIX_INL_META_BUF_SZ, &parse_val_u32,
+ &meta_buf_sz);
rte_kvargs_free(kvlist);
null_devargs:
@@ -212,6 +301,8 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
inl_dev->channel = cpt_channel.channel;
inl_dev->chan_mask = cpt_channel.mask;
inl_dev->is_multi_channel = cpt_channel.is_multi_channel;
+ inl_dev->nb_meta_bufs = nb_meta_bufs;
+ inl_dev->meta_buf_sz = meta_buf_sz;
return 0;
exit:
return -EINVAL;