new file mode 100644
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2024 Marvell.
+ */
+#include "cn20k_ethdev.h"
+#include "cn20k_rx.h"
+#include "cn20k_tx.h"
+
+static int
+cn20k_nix_ptypes_set(struct rte_eth_dev *eth_dev, uint32_t ptype_mask)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+ if (ptype_mask) {
+ dev->rx_offload_flags |= NIX_RX_OFFLOAD_PTYPE_F;
+ dev->ptype_disable = 0;
+ } else {
+ dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_PTYPE_F;
+ dev->ptype_disable = 1;
+ }
+
+ return 0;
+}
+
+static void
+nix_form_default_desc(struct cnxk_eth_dev *dev, struct cn20k_eth_txq *txq, uint16_t qid)
+{
+ union nix_send_hdr_w0_u send_hdr_w0;
+
+ /* Initialize the fields based on basic single segment packet */
+ send_hdr_w0.u = 0;
+ if (dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
+ /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
+ send_hdr_w0.sizem1 = 2;
+ if (dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
+ /* Default: one seg packet would have:
+ * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
+ * => 8/2 - 1 = 3
+ */
+ send_hdr_w0.sizem1 = 3;
+
+ /* To calculate the offset for send_mem,
+ * send_hdr->w0.sizem1 * 2
+ */
+ txq->ts_mem = dev->tstamp.tx_tstamp_iova;
+ }
+ } else {
+ /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
+ send_hdr_w0.sizem1 = 1;
+ }
+ send_hdr_w0.sq = qid;
+ txq->send_hdr_w0 = send_hdr_w0.u;
+ rte_wmb();
+}
+
+static int
+cn20k_nix_tx_compl_setup(struct cnxk_eth_dev *dev, struct cn20k_eth_txq *txq, struct roc_nix_sq *sq,
+ uint16_t nb_desc)
+{
+ struct roc_nix_cq *cq;
+
+ cq = &dev->cqs[sq->cqid];
+ txq->tx_compl.desc_base = (uintptr_t)cq->desc_base;
+ txq->tx_compl.cq_door = cq->door;
+ txq->tx_compl.cq_status = cq->status;
+ txq->tx_compl.wdata = cq->wdata;
+ txq->tx_compl.head = cq->head;
+ txq->tx_compl.qmask = cq->qmask;
+ /* Total array size holding buffers is equal to
+ * number of entries in cq and sq
+ * max buffer in array = desc in cq + desc in sq
+ */
+ txq->tx_compl.nb_desc_mask = (2 * rte_align32pow2(nb_desc)) - 1;
+ txq->tx_compl.ena = true;
+
+ txq->tx_compl.ptr = (struct rte_mbuf **)plt_zmalloc(
+ txq->tx_compl.nb_desc_mask * sizeof(struct rte_mbuf *), 0);
+ if (!txq->tx_compl.ptr)
+ return -1;
+
+ return 0;
+}
+
+static void
+cn20k_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ struct cn20k_eth_txq *txq;
+
+ cnxk_nix_tx_queue_release(eth_dev, qid);
+ txq = eth_dev->data->tx_queues[qid];
+
+ if (nix->tx_compl_ena)
+ plt_free(txq->tx_compl.ptr);
+}
+
+static int
+cn20k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, uint16_t nb_desc,
+ unsigned int socket, const struct rte_eth_txconf *tx_conf)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ uint64_t mark_fmt, mark_flag;
+ struct roc_cpt_lf *inl_lf;
+ struct cn20k_eth_txq *txq;
+ struct roc_nix_sq *sq;
+ uint16_t crypto_qid;
+ int rc;
+
+ RTE_SET_USED(socket);
+
+ /* Common Tx queue setup */
+ rc = cnxk_nix_tx_queue_setup(eth_dev, qid, nb_desc, sizeof(struct cn20k_eth_txq), tx_conf);
+ if (rc)
+ return rc;
+
+ sq = &dev->sqs[qid];
+ /* Update fast path queue */
+ txq = eth_dev->data->tx_queues[qid];
+ txq->fc_mem = sq->fc;
+ if (nix->tx_compl_ena) {
+ rc = cn20k_nix_tx_compl_setup(dev, txq, sq, nb_desc);
+ if (rc)
+ return rc;
+ }
+
+ /* Set Txq flag for MT_LOCKFREE */
+ txq->flag = !!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE);
+
+ /* Store lmt base in tx queue for easy access */
+ txq->lmt_base = nix->lmt_base;
+ txq->io_addr = sq->io_addr;
+ txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
+ txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2;
+
+ /* Fetch CPT LF info for outbound if present */
+ if (dev->outb.lf_base) {
+ crypto_qid = qid % dev->outb.nb_crypto_qs;
+ inl_lf = dev->outb.lf_base + crypto_qid;
+
+ txq->cpt_io_addr = inl_lf->io_addr;
+ txq->cpt_fc = inl_lf->fc_addr;
+ txq->cpt_fc_sw = (int32_t *)((uintptr_t)dev->outb.fc_sw_mem +
+ crypto_qid * RTE_CACHE_LINE_SIZE);
+
+ txq->cpt_desc = inl_lf->nb_desc * 0.7;
+ txq->sa_base = (uint64_t)dev->outb.sa_base;
+ txq->sa_base |= (uint64_t)eth_dev->data->port_id;
+ PLT_STATIC_ASSERT(ROC_NIX_INL_SA_BASE_ALIGN == BIT_ULL(16));
+ }
+
+ /* Restore marking flag from roc */
+ mark_fmt = roc_nix_tm_mark_format_get(nix, &mark_flag);
+ txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+ txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+
+ nix_form_default_desc(dev, txq, qid);
+ txq->lso_tun_fmt = dev->lso_tun_fmt;
+ return 0;
+}
+
+static int
+cn20k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, uint16_t nb_desc,
+ unsigned int socket, const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cn20k_eth_rxq *rxq;
+ struct roc_nix_rq *rq;
+ struct roc_nix_cq *cq;
+ int rc;
+
+ RTE_SET_USED(socket);
+
+ /* CQ Errata needs min 4K ring */
+ if (dev->cq_min_4k && nb_desc < 4096)
+ nb_desc = 4096;
+
+ /* Common Rx queue setup */
+ rc = cnxk_nix_rx_queue_setup(eth_dev, qid, nb_desc, sizeof(struct cn20k_eth_rxq), rx_conf,
+ mp);
+ if (rc)
+ return rc;
+
+ /* Do initial mtu setup for RQ0 before device start */
+ if (!qid) {
+ rc = nix_recalc_mtu(eth_dev);
+ if (rc)
+ return rc;
+ }
+
+ rq = &dev->rqs[qid];
+ cq = &dev->cqs[qid];
+
+ /* Update fast path queue */
+ rxq = eth_dev->data->rx_queues[qid];
+ rxq->rq = qid;
+ rxq->desc = (uintptr_t)cq->desc_base;
+ rxq->cq_door = cq->door;
+ rxq->cq_status = cq->status;
+ rxq->wdata = cq->wdata;
+ rxq->head = cq->head;
+ rxq->qmask = cq->qmask;
+ rxq->tstamp = &dev->tstamp;
+
+ /* Data offset from data to start of mbuf is first_skip */
+ rxq->data_off = rq->first_skip;
+ rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
+
+ /* Setup security related info */
+ if (dev->rx_offload_flags & NIX_RX_OFFLOAD_SECURITY_F) {
+ rxq->lmt_base = dev->nix.lmt_base;
+ rxq->sa_base = roc_nix_inl_inb_sa_base_get(&dev->nix, dev->inb.inl_dev);
+ }
+
+ /* Lookup mem */
+ rxq->lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
+ return 0;
+}
+
+static int
+cn20k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
+{
+ struct cn20k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
+ int rc;
+
+ rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
+ if (rc)
+ return rc;
+
+ /* Clear fc cache pkts to trigger worker stop */
+ txq->fc_cache_pkts = 0;
+
+ return 0;
+}
+
+static int
+cn20k_nix_configure(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int rc;
+
+ /* Common nix configure */
+ rc = cnxk_nix_configure(eth_dev);
+ if (rc)
+ return rc;
+
+ /* reset reassembly dynfield/flag offset */
+ dev->reass_dynfield_off = -1;
+ dev->reass_dynflag_bit = -1;
+
+ plt_nix_dbg("Configured port%d platform specific rx_offload_flags=%x"
+ " tx_offload_flags=0x%x",
+ eth_dev->data->port_id, dev->rx_offload_flags, dev->tx_offload_flags);
+ return 0;
+}
+
+static int
+cn20k_nix_timesync_enable(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int i, rc;
+
+ rc = cnxk_nix_timesync_enable(eth_dev);
+ if (rc)
+ return rc;
+
+ dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
+ dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F;
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
+
+ return 0;
+}
+
+static int
+cn20k_nix_timesync_disable(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int i, rc;
+
+ rc = cnxk_nix_timesync_disable(eth_dev);
+ if (rc)
+ return rc;
+
+ dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F;
+ dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F;
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
+
+ return 0;
+}
+
+static int
+cn20k_nix_timesync_read_tx_timestamp(struct rte_eth_dev *eth_dev, struct timespec *timestamp)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_timesync_info *tstamp = &dev->tstamp;
+ uint64_t ns;
+
+ if (*tstamp->tx_tstamp == 0)
+ return -EINVAL;
+
+ *tstamp->tx_tstamp =
+ ((*tstamp->tx_tstamp >> 32) * NSEC_PER_SEC) + (*tstamp->tx_tstamp & 0xFFFFFFFFUL);
+ ns = rte_timecounter_update(&dev->tx_tstamp_tc, *tstamp->tx_tstamp);
+ *timestamp = rte_ns_to_timespec(ns);
+ *tstamp->tx_tstamp = 0;
+ rte_wmb();
+
+ return 0;
+}
+
+static int
+cn20k_nix_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ int rc;
+
+ /* Common eth dev start */
+ rc = cnxk_nix_dev_start(eth_dev);
+ if (rc)
+ return rc;
+
+ /* Set flags for Rx Inject feature */
+ if (roc_idev_nix_rx_inject_get(nix->port_id))
+ dev->rx_offload_flags |= NIX_RX_SEC_REASSEMBLY_F;
+
+ return 0;
+}
+
+static int
+cn20k_nix_reassembly_capability_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_ip_reassembly_params *reassembly_capa)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int rc = -ENOTSUP;
+ RTE_SET_USED(eth_dev);
+
+ if (!roc_feature_nix_has_reass())
+ return -ENOTSUP;
+
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+ reassembly_capa->timeout_ms = 60 * 1000;
+ reassembly_capa->max_frags = 4;
+ reassembly_capa->flags =
+ RTE_ETH_DEV_REASSEMBLY_F_IPV4 | RTE_ETH_DEV_REASSEMBLY_F_IPV6;
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static int
+cn20k_nix_reassembly_conf_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_ip_reassembly_params *conf)
+{
+ RTE_SET_USED(eth_dev);
+ RTE_SET_USED(conf);
+ return -ENOTSUP;
+}
+
+static int
+cn20k_nix_reassembly_conf_set(struct rte_eth_dev *eth_dev,
+ const struct rte_eth_ip_reassembly_params *conf)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int rc = 0;
+
+ if (!roc_feature_nix_has_reass())
+ return -ENOTSUP;
+
+ if (!conf->flags) {
+ /* Clear offload flags on disable */
+ if (!dev->inb.nb_oop)
+ dev->rx_offload_flags &= ~NIX_RX_REAS_F;
+ dev->inb.reass_en = false;
+ return 0;
+ }
+
+ rc = roc_nix_reassembly_configure(conf->timeout_ms, conf->max_frags);
+ if (!rc && dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+ dev->rx_offload_flags |= NIX_RX_REAS_F;
+ dev->inb.reass_en = true;
+ }
+
+ return rc;
+}
+
+static int
+cn20k_nix_rx_avail_get(struct cn20k_eth_rxq *rxq)
+{
+ uint32_t qmask = rxq->qmask;
+ uint64_t reg, head, tail;
+ int available;
+
+ /* Use LDADDA version to avoid reorder */
+ reg = roc_atomic64_add_sync(rxq->wdata, rxq->cq_status);
+ /* CQ_OP_STATUS operation error */
+ if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) || reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
+ return 0;
+ tail = reg & 0xFFFFF;
+ head = (reg >> 20) & 0xFFFFF;
+ if (tail < head)
+ available = tail - head + qmask + 1;
+ else
+ available = tail - head;
+
+ return available;
+}
+
+static int
+cn20k_rx_descriptor_dump(const struct rte_eth_dev *eth_dev, uint16_t qid, uint16_t offset,
+ uint16_t num, FILE *file)
+{
+ struct cn20k_eth_rxq *rxq = eth_dev->data->rx_queues[qid];
+ const uint64_t data_off = rxq->data_off;
+ const uint32_t qmask = rxq->qmask;
+ const uintptr_t desc = rxq->desc;
+ struct cpt_parse_hdr_s *cpth;
+ uint32_t head = rxq->head;
+ struct nix_cqe_hdr_s *cq;
+ uint16_t count = 0;
+ int available_pkts;
+ uint64_t cq_w1;
+
+ available_pkts = cn20k_nix_rx_avail_get(rxq);
+
+ if ((offset + num - 1) >= available_pkts) {
+ plt_err("Invalid BD num=%u\n", num);
+ return -EINVAL;
+ }
+
+ while (count < num) {
+ cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head) + count + offset);
+ cq_w1 = *((const uint64_t *)cq + 1);
+ if (cq_w1 & BIT(11)) {
+ rte_iova_t buff = *((rte_iova_t *)((uint64_t *)cq + 9));
+ struct rte_mbuf *mbuf = (struct rte_mbuf *)(buff - data_off);
+ cpth = (struct cpt_parse_hdr_s *)((uintptr_t)mbuf + (uint16_t)data_off);
+ roc_cpt_parse_hdr_dump(file, cpth);
+ } else {
+ roc_nix_cqe_dump(file, cq);
+ }
+
+ count++;
+ head &= qmask;
+ }
+ return 0;
+}
+
+/* Update platform specific eth dev ops */
+static void
+nix_eth_dev_ops_override(void)
+{
+ static int init_once;
+
+ if (init_once)
+ return;
+ init_once = 1;
+
+ /* Update platform specific ops */
+ cnxk_eth_dev_ops.dev_configure = cn20k_nix_configure;
+ cnxk_eth_dev_ops.tx_queue_setup = cn20k_nix_tx_queue_setup;
+ cnxk_eth_dev_ops.rx_queue_setup = cn20k_nix_rx_queue_setup;
+ cnxk_eth_dev_ops.tx_queue_release = cn20k_nix_tx_queue_release;
+ cnxk_eth_dev_ops.tx_queue_stop = cn20k_nix_tx_queue_stop;
+ cnxk_eth_dev_ops.dev_start = cn20k_nix_dev_start;
+ cnxk_eth_dev_ops.dev_ptypes_set = cn20k_nix_ptypes_set;
+ cnxk_eth_dev_ops.timesync_enable = cn20k_nix_timesync_enable;
+ cnxk_eth_dev_ops.timesync_disable = cn20k_nix_timesync_disable;
+ cnxk_eth_dev_ops.timesync_read_tx_timestamp = cn20k_nix_timesync_read_tx_timestamp;
+ cnxk_eth_dev_ops.ip_reassembly_capability_get = cn20k_nix_reassembly_capability_get;
+ cnxk_eth_dev_ops.ip_reassembly_conf_get = cn20k_nix_reassembly_conf_get;
+ cnxk_eth_dev_ops.ip_reassembly_conf_set = cn20k_nix_reassembly_conf_set;
+ cnxk_eth_dev_ops.eth_rx_descriptor_dump = cn20k_rx_descriptor_dump;
+}
+
+/* Update platform specific tm ops */
+static void
+nix_tm_ops_override(void)
+{
+ static int init_once;
+
+ if (init_once)
+ return;
+ init_once = 1;
+}
+
+static int
+cn20k_nix_remove(struct rte_pci_device *pci_dev)
+{
+ return cnxk_nix_remove(pci_dev);
+}
+
+static int
+cn20k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ struct rte_eth_dev *eth_dev;
+ int rc;
+
+ rc = roc_plt_init();
+ if (rc) {
+ plt_err("Failed to initialize platform model, rc=%d", rc);
+ return rc;
+ }
+
+ nix_eth_dev_ops_override();
+ nix_tm_ops_override();
+
+ /* Common probe */
+ rc = cnxk_nix_probe(pci_drv, pci_dev);
+ if (rc)
+ return rc;
+
+ /* Find eth dev allocated */
+ eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (!eth_dev) {
+ /* Ignore if ethdev is in mid of detach state in secondary */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+ return -ENOENT;
+ }
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ return 0;
+}
+
+static const struct rte_pci_id cn20k_pci_nix_map[] = {
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN20KA, PCI_DEVID_CNXK_RVU_PF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN20KA, PCI_DEVID_CNXK_RVU_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN20KA, PCI_DEVID_CNXK_RVU_AF_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN20KA, PCI_DEVID_CNXK_RVU_SDP_VF),
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver cn20k_pci_nix = {
+ .id_table = cn20k_pci_nix_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA | RTE_PCI_DRV_INTR_LSC,
+ .probe = cn20k_nix_probe,
+ .remove = cn20k_nix_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_cn20k, cn20k_pci_nix);
+RTE_PMD_REGISTER_PCI_TABLE(net_cn20k, cn20k_pci_nix_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_cn20k, "vfio-pci");
new file mode 100644
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2024 Marvell.
+ */
+#ifndef __CN20K_ETHDEV_H__
+#define __CN20K_ETHDEV_H__
+
+#include <cn20k_rxtx.h>
+#include <cnxk_ethdev.h>
+#include <cnxk_security.h>
+
+#endif /* __CN20K_ETHDEV_H__ */
new file mode 100644
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2024 Marvell.
+ */
+#ifndef __CN20K_RX_H__
+#define __CN20K_RX_H__
+
+#include "cn20k_rxtx.h"
+#include <rte_ethdev.h>
+#include <rte_security_driver.h>
+#include <rte_vect.h>
+
+#define NSEC_PER_SEC 1000000000L
+
+#define NIX_RX_OFFLOAD_NONE (0)
+#define NIX_RX_OFFLOAD_RSS_F BIT(0)
+#define NIX_RX_OFFLOAD_PTYPE_F BIT(1)
+#define NIX_RX_OFFLOAD_CHECKSUM_F BIT(2)
+#define NIX_RX_OFFLOAD_MARK_UPDATE_F BIT(3)
+#define NIX_RX_OFFLOAD_TSTAMP_F BIT(4)
+#define NIX_RX_OFFLOAD_VLAN_STRIP_F BIT(5)
+#define NIX_RX_OFFLOAD_SECURITY_F BIT(6)
+#define NIX_RX_OFFLOAD_MAX (NIX_RX_OFFLOAD_SECURITY_F << 1)
+
+/* Flags to control cqe_to_mbuf conversion function.
+ * Defining it from backwards to denote its been
+ * not used as offload flags to pick function
+ */
+#define NIX_RX_REAS_F BIT(12)
+#define NIX_RX_VWQE_F BIT(13)
+#define NIX_RX_MULTI_SEG_F BIT(14)
+
+#define NIX_RX_SEC_REASSEMBLY_F (NIX_RX_REAS_F | NIX_RX_OFFLOAD_SECURITY_F)
+#endif /* __CN20K_RX_H__ */
new file mode 100644
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2024 Marvell.
+ */
+
+#ifndef __CN20K_RXTX_H__
+#define __CN20K_RXTX_H__
+
+#include <rte_security.h>
+
+/* ROC Constants */
+#include "roc_constants.h"
+
+/* Platform definition */
+#include "roc_platform.h"
+
+/* IO */
+#if defined(__aarch64__)
+#include "roc_io.h"
+#else
+#include "roc_io_generic.h"
+#endif
+
+/* HW structure definition */
+#include "hw/cpt.h"
+#include "hw/nix.h"
+#include "hw/npa.h"
+#include "hw/npc.h"
+#include "hw/ssow.h"
+
+#include "roc_ie_ot.h"
+
+/* NPA */
+#include "roc_npa_dp.h"
+
+/* SSO */
+#include "roc_sso_dp.h"
+
+/* CPT */
+#include "roc_cpt.h"
+
+/* NIX Inline dev */
+#include "roc_nix_inl_dp.h"
+
+#include "cnxk_ethdev_dp.h"
+
+struct cn20k_eth_txq {
+ uint64_t send_hdr_w0;
+ int64_t fc_cache_pkts;
+ uint64_t *fc_mem;
+ uintptr_t lmt_base;
+ rte_iova_t io_addr;
+ uint16_t sqes_per_sqb_log2;
+ int16_t nb_sqb_bufs_adj;
+ uint8_t flag;
+ rte_iova_t cpt_io_addr;
+ uint64_t sa_base;
+ uint64_t *cpt_fc;
+ uint16_t cpt_desc;
+ int32_t *cpt_fc_sw;
+ uint64_t lso_tun_fmt;
+ uint64_t ts_mem;
+ uint64_t mark_flag : 8;
+ uint64_t mark_fmt : 48;
+ struct cnxk_eth_txq_comp tx_compl;
+} __plt_cache_aligned;
+
+struct cn20k_eth_rxq {
+ uint64_t mbuf_initializer;
+ uintptr_t desc;
+ void *lookup_mem;
+ uintptr_t cq_door;
+ uint64_t wdata;
+ int64_t *cq_status;
+ uint32_t head;
+ uint32_t qmask;
+ uint32_t available;
+ uint16_t data_off;
+ uint64_t sa_base;
+ uint64_t lmt_base;
+ uint64_t meta_aura;
+ uintptr_t meta_pool;
+ uint16_t rq;
+ struct cnxk_timesync_info *tstamp;
+} __plt_cache_aligned;
+
+#define LMT_OFF(lmt_addr, lmt_num, offset) \
+ (void *)((uintptr_t)(lmt_addr) + ((uint64_t)(lmt_num) << ROC_LMT_LINE_SIZE_LOG2) + (offset))
+
+#endif /* __CN20K_RXTX_H__ */
new file mode 100644
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2024 Marvell.
+ */
+#ifndef __CN20K_TX_H__
+#define __CN20K_TX_H__
+
+#include "cn20k_rxtx.h"
+#include <rte_eventdev.h>
+#include <rte_vect.h>
+
+#define NIX_TX_OFFLOAD_NONE (0)
+#define NIX_TX_OFFLOAD_L3_L4_CSUM_F BIT(0)
+#define NIX_TX_OFFLOAD_OL3_OL4_CSUM_F BIT(1)
+#define NIX_TX_OFFLOAD_VLAN_QINQ_F BIT(2)
+#define NIX_TX_OFFLOAD_MBUF_NOFF_F BIT(3)
+#define NIX_TX_OFFLOAD_TSO_F BIT(4)
+#define NIX_TX_OFFLOAD_TSTAMP_F BIT(5)
+#define NIX_TX_OFFLOAD_SECURITY_F BIT(6)
+#define NIX_TX_OFFLOAD_MAX (NIX_TX_OFFLOAD_SECURITY_F << 1)
+
+/* Flags to control xmit_prepare function.
+ * Defining it from backwards to denote its been
+ * not used as offload flags to pick function
+ */
+#define NIX_TX_VWQE_F BIT(14)
+#define NIX_TX_MULTI_SEG_F BIT(15)
+
+#define NIX_TX_NEED_SEND_HDR_W1 \
+ (NIX_TX_OFFLOAD_L3_L4_CSUM_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F | \
+ NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)
+
+#define NIX_TX_NEED_EXT_HDR \
+ (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSTAMP_F | NIX_TX_OFFLOAD_TSO_F)
+
+#endif /* __CN20K_TX_H__ */
@@ -59,6 +59,9 @@
#define CNXK_TX_MARK_FMT_MASK (0xFFFFFFFFFFFFull)
+#define CNXK_NIX_CQ_ENTRY_SZ 128
+#define CQE_SZ(x) ((x) * CNXK_NIX_CQ_ENTRY_SZ)
+
struct cnxk_eth_txq_comp {
uintptr_t desc_base;
uintptr_t cq_door;
@@ -14,7 +14,7 @@ else
soc_type = platform
endif
-if soc_type != 'cn9k' and soc_type != 'cn10k'
+if soc_type != 'cn9k' and soc_type != 'cn10k' and soc_type != 'cn20k'
soc_type = 'all'
endif
@@ -231,6 +231,15 @@ sources += files(
endif
endif
+
+if soc_type == 'cn20k' or soc_type == 'all'
+# CN20K
+sources += files(
+ 'cn20k_ethdev.c',
+)
+endif
+
+
deps += ['bus_pci', 'cryptodev', 'eventdev', 'security']
deps += ['common_cnxk', 'mempool_cnxk']