From: Sunil Kumar Kori <skori@marvell.com>
Once PTP status is changed at H/W i.e. enable/disable then
it is propagated to user via registered callback.
So corresponding callback is registered to get PTP status.
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
drivers/net/cnxk/cn10k_ethdev.c | 87 ++++++++++++++++++++++++++++++++++++--
drivers/net/cnxk/cn10k_rx.h | 1 +
drivers/net/cnxk/cn9k_ethdev.c | 73 ++++++++++++++++++++++++++++++++
drivers/net/cnxk/cn9k_rx.h | 1 +
drivers/net/cnxk/cnxk_ethdev.c | 2 +-
drivers/net/cnxk/cnxk_ethdev.h | 5 +++
drivers/net/cnxk/cnxk_ethdev_ops.c | 2 +
7 files changed, 166 insertions(+), 5 deletions(-)
@@ -268,6 +268,76 @@ cn10k_nix_configure(struct rte_eth_dev *eth_dev)
return 0;
}
+/* Function to enable ptp config for VFs */
+static void
+nix_ptp_enable_vf(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+ if (nix_recalc_mtu(eth_dev))
+ plt_err("Failed to set MTU size for ptp");
+
+ dev->scalar_ena = true;
+ dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
+
+ /* Setting up the function pointers as per new offload flags */
+ cn10k_eth_set_rx_function(eth_dev);
+ cn10k_eth_set_tx_function(eth_dev);
+}
+
+static uint16_t
+nix_ptp_vf_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
+{
+ struct cn10k_eth_rxq *rxq = queue;
+ struct cnxk_eth_rxq_sp *rxq_sp;
+ struct rte_eth_dev *eth_dev;
+
+ RTE_SET_USED(mbufs);
+ RTE_SET_USED(pkts);
+
+ rxq_sp = cnxk_eth_rxq_to_sp(rxq);
+ eth_dev = rxq_sp->dev->eth_dev;
+ nix_ptp_enable_vf(eth_dev);
+
+ return 0;
+}
+
+static int
+cn10k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en)
+{
+ struct cnxk_eth_dev *dev = (struct cnxk_eth_dev *)nix;
+ struct rte_eth_dev *eth_dev;
+ struct cn10k_eth_rxq *rxq;
+ int i;
+
+ if (!dev)
+ return -EINVAL;
+
+ eth_dev = dev->eth_dev;
+ if (!eth_dev)
+ return -EINVAL;
+
+ dev->ptp_en = ptp_en;
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ rxq = eth_dev->data->rx_queues[i];
+ rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
+ }
+
+ if (roc_nix_is_vf_or_sdp(nix) && !(roc_nix_is_sdp(nix)) &&
+ !(roc_nix_is_lbk(nix))) {
+ /* In case of VF, setting of MTU cannot be done directly in this
+ * function as this is running as part of MBOX request(PF->VF)
+ * and MTU setting also requires MBOX message to be
+ * sent(VF->PF)
+ */
+ eth_dev->rx_pkt_burst = nix_ptp_vf_burst;
+ rte_mb();
+ }
+
+ return 0;
+}
+
static int
cn10k_nix_dev_start(struct rte_eth_dev *eth_dev)
{
@@ -333,6 +403,7 @@ static int
cn10k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
{
struct rte_eth_dev *eth_dev;
+ struct cnxk_eth_dev *dev;
int rc;
if (RTE_CACHE_LINE_SIZE != 64) {
@@ -354,15 +425,23 @@ cn10k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
if (rc)
return rc;
+ /* Find eth dev allocated */
+ eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (!eth_dev)
+ return -ENOENT;
+
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
- if (!eth_dev)
- return -ENOENT;
-
/* Setup callbacks for secondary process */
cn10k_eth_set_tx_function(eth_dev);
cn10k_eth_set_rx_function(eth_dev);
+ return 0;
}
+
+ dev = cnxk_eth_pmd_priv(eth_dev);
+
+ /* Register up msg callbacks for PTP information */
+ roc_nix_ptp_info_cb_register(&dev->nix, cn10k_nix_ptp_info_update_cb);
+
return 0;
}
@@ -12,6 +12,7 @@
#define NIX_RX_OFFLOAD_PTYPE_F BIT(1)
#define NIX_RX_OFFLOAD_CHECKSUM_F BIT(2)
#define NIX_RX_OFFLOAD_MARK_UPDATE_F BIT(3)
+#define NIX_RX_OFFLOAD_TSTAMP_F BIT(4)
/* Flags to control cqe_to_mbuf conversion function.
* Defining it from backwards to denote its been
@@ -277,6 +277,76 @@ cn9k_nix_configure(struct rte_eth_dev *eth_dev)
return 0;
}
+/* Function to enable ptp config for VFs */
+static void
+nix_ptp_enable_vf(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+ if (nix_recalc_mtu(eth_dev))
+ plt_err("Failed to set MTU size for ptp");
+
+ dev->scalar_ena = true;
+ dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
+
+ /* Setting up the function pointers as per new offload flags */
+ cn9k_eth_set_rx_function(eth_dev);
+ cn9k_eth_set_tx_function(eth_dev);
+}
+
+static uint16_t
+nix_ptp_vf_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
+{
+ struct cn9k_eth_rxq *rxq = queue;
+ struct cnxk_eth_rxq_sp *rxq_sp;
+ struct rte_eth_dev *eth_dev;
+
+ RTE_SET_USED(mbufs);
+ RTE_SET_USED(pkts);
+
+ rxq_sp = cnxk_eth_rxq_to_sp(rxq);
+ eth_dev = rxq_sp->dev->eth_dev;
+ nix_ptp_enable_vf(eth_dev);
+
+ return 0;
+}
+
+static int
+cn9k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en)
+{
+ struct cnxk_eth_dev *dev = (struct cnxk_eth_dev *)nix;
+ struct rte_eth_dev *eth_dev;
+ struct cn9k_eth_rxq *rxq;
+ int i;
+
+ if (!dev)
+ return -EINVAL;
+
+ eth_dev = dev->eth_dev;
+ if (!eth_dev)
+ return -EINVAL;
+
+ dev->ptp_en = ptp_en;
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ rxq = eth_dev->data->rx_queues[i];
+ rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
+ }
+
+ if (roc_nix_is_vf_or_sdp(nix) && !(roc_nix_is_sdp(nix)) &&
+ !(roc_nix_is_lbk(nix))) {
+ /* In case of VF, setting of MTU cannot be done directly in this
+ * function as this is running as part of MBOX request(PF->VF)
+ * and MTU setting also requires MBOX message to be
+ * sent(VF->PF)
+ */
+ eth_dev->rx_pkt_burst = nix_ptp_vf_burst;
+ rte_mb();
+ }
+
+ return 0;
+}
+
static int
cn9k_nix_dev_start(struct rte_eth_dev *eth_dev)
{
@@ -396,6 +466,9 @@ cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
dev->hwcap = 0;
+ /* Register up msg callbacks for PTP information */
+ roc_nix_ptp_info_cb_register(&dev->nix, cn9k_nix_ptp_info_update_cb);
+
/* Update HW erratas */
if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
dev->cq_min_4k = 1;
@@ -13,6 +13,7 @@
#define NIX_RX_OFFLOAD_PTYPE_F BIT(1)
#define NIX_RX_OFFLOAD_CHECKSUM_F BIT(2)
#define NIX_RX_OFFLOAD_MARK_UPDATE_F BIT(3)
+#define NIX_RX_OFFLOAD_TSTAMP_F BIT(4)
/* Flags to control cqe_to_mbuf conversion function.
* Defining it from backwards to denote its been
@@ -59,7 +59,7 @@ nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
}
}
-static int
+int
nix_recalc_mtu(struct rte_eth_dev *eth_dev)
{
struct rte_eth_dev_data *data = eth_dev->data;
@@ -100,6 +100,7 @@
/* Default mark value used when none is provided. */
#define CNXK_FLOW_ACTION_FLAG_DEFAULT 0xffff
+#define CNXK_NIX_TIMESYNC_RX_OFFSET 8
#define PTYPE_NON_TUNNEL_WIDTH 16
#define PTYPE_TUNNEL_WIDTH 12
#define PTYPE_NON_TUNNEL_ARRAY_SZ BIT(PTYPE_NON_TUNNEL_WIDTH)
@@ -153,6 +154,7 @@ struct cnxk_eth_dev {
uint16_t flags;
uint8_t ptype_disable;
bool scalar_ena;
+ bool ptp_en;
/* Pointer back to rte */
struct rte_eth_dev *eth_dev;
@@ -332,6 +334,9 @@ int cnxk_ethdev_parse_devargs(struct rte_devargs *devargs,
int cnxk_nix_dev_get_reg(struct rte_eth_dev *eth_dev,
struct rte_dev_reg_info *regs);
+/* Other private functions */
+int nix_recalc_mtu(struct rte_eth_dev *eth_dev);
+
/* Inlines */
static __rte_always_inline uint64_t
cnxk_pktmbuf_detach(struct rte_mbuf *m)
@@ -385,6 +385,8 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
int rc = -EINVAL;
uint32_t buffsz;
+ frame_size += CNXK_NIX_TIMESYNC_RX_OFFSET * dev->ptp_en;
+
/* Check if MTU is within the allowed range */
if ((frame_size - RTE_ETHER_CRC_LEN) < NIX_MIN_HW_FRS) {
plt_err("MTU is lesser than minimum");