[v6,4/5] net/enetfec: add enqueue and dequeue support

Message ID 20211021044700.12370-5-apeksha.gupta@nxp.com (mailing list archive)
State Changes Requested, archived
Delegated to: Ferruh Yigit
Headers
Series drivers/net: add NXP ENETFEC driver |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Apeksha Gupta Oct. 21, 2021, 4:46 a.m. UTC
  This patch adds burst enqueue and dequeue operations to the enetfec
PMD. Loopback mode is also added, compile time flag 'ENETFEC_LOOPBACK' is
used to enable this feature. By default loopback mode is disabled.
Basic features added like promiscuous enable, basic stats.

Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
Signed-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>
---
 doc/guides/nics/enetfec.rst          |   2 +
 doc/guides/nics/features/enetfec.ini |   2 +
 drivers/net/enetfec/enet_ethdev.c    | 197 ++++++++++++
 drivers/net/enetfec/enet_rxtx.c      | 445 +++++++++++++++++++++++++++
 4 files changed, 646 insertions(+)
 create mode 100644 drivers/net/enetfec/enet_rxtx.c
  

Comments

Ferruh Yigit Oct. 27, 2021, 2:25 p.m. UTC | #1
On 10/21/2021 5:46 AM, Apeksha Gupta wrote:
> This patch adds burst enqueue and dequeue operations to the enetfec
> PMD. Loopback mode is also added, compile time flag 'ENETFEC_LOOPBACK' is
> used to enable this feature. By default loopback mode is disabled.
> Basic features added like promiscuous enable, basic stats.
> 

In the patch title you can prefer "Rx/Tx support" instead of
"enqueue and dequeue support", which is more common usage.

> Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
> Signed-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>

<...>

> --- a/doc/guides/nics/features/enetfec.ini
> +++ b/doc/guides/nics/features/enetfec.ini
> @@ -4,6 +4,8 @@
>   ; Refer to default.ini for the full list of available PMD features.
>   ;
>   [Features]
> +Basic stats	     = Y
> +Promiscuous mode     = Y

Can you please keep the order same with default.ini file.

<...>

> @@ -226,6 +264,110 @@ enetfec_eth_stop(__rte_unused struct rte_eth_dev *dev)
>   	return 0;
>   }
>   
> +static int
> +enetfec_eth_close(__rte_unused struct rte_eth_dev *dev)
> +{

'dev' is used.

> +	enet_free_buffers(dev);
> +	return 0;
> +}
> +
> +static int
> +enetfec_eth_link_update(struct rte_eth_dev *dev,
> +			int wait_to_complete __rte_unused)
> +{
> +	struct rte_eth_link link;
> +	unsigned int lstatus = 1;
> +
> +	if (dev == NULL) {
> +		ENETFEC_PMD_ERR("Invalid device in link_update.\n");

Duplicated '\n'.

> +		return 0;
> +	}
> +
> +	memset(&link, 0, sizeof(struct rte_eth_link));
> +
> +	link.link_status = lstatus;
> +	link.link_speed = ETH_SPEED_NUM_1G;
> +
> +	ENETFEC_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id,
> +			 "Up");
> +
> +	return rte_eth_linkstatus_set(dev, &link);
> +}
> +
> +static int
> +enetfec_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)

'dev' is used.

<...>

> +static int
> +enetfec_stats_get(struct rte_eth_dev *dev,
> +	      struct rte_eth_stats *stats)
> +{
> +	struct enetfec_private *fep = dev->data->dev_private;
> +	struct rte_eth_stats *eth_stats = &fep->stats;
> +
> +	if (stats == NULL)
> +		return -1;

No need to check this, ethdev layer already does.

> +
> +	memset(stats, 0, sizeof(struct rte_eth_stats));
> +

Same here, ethdev does this.

<...>

> +
> +	/*
> +	 * Set default mac address
> +	 */
> +	macaddr.addr_bytes[0] = 1;
> +	macaddr.addr_bytes[1] = 1;
> +	macaddr.addr_bytes[2] = 1;
> +	macaddr.addr_bytes[3] = 1;
> +	macaddr.addr_bytes[4] = 1;
> +	macaddr.addr_bytes[5] = 1;

if it is fixed, you can set the addr while declaring the variable:
struct rte_ether_addr macaddr = {
     .addr_bytes = { 0x1, 0x1, 0x1, 0x1, 0x1, 0x1 }
};


<...>

> index 0000000000..445fa97e77
> --- /dev/null
> +++ b/drivers/net/enetfec/enet_rxtx.c
> @@ -0,0 +1,445 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright 2021 NXP
> + */
> +
> +#include <signal.h>
> +#include <rte_mbuf.h>
> +#include <rte_io.h>
> +#include "enet_regs.h"
> +#include "enet_ethdev.h"
> +#include "enet_pmd_logs.h"
> +
> +#define ENETFEC_LOOPBACK	0
> +#define ENETFEC_DUMP		0
> +

Instead of compile time flags, why not convert them to devargs so
they can be updated without recompile?

This also make sure all code is enabled and prevent possible dead
code by time.

<...>

> +
> +#if ENETFEC_LOOPBACK
> +static volatile bool lb_quit;
> +
> +static void fec_signal_handler(int signum)
> +{
> +	if (signum == SIGINT || signum == SIGTSTP || signum == SIGTERM) {
> +		printf("\n\n %s: Signal %d received, preparing to exit...\n",
> +				__func__, signum);
> +		lb_quit = true;
> +	}
> +}

Not sure if handling signals in the driver is a good idea, this is more an
application level desicion. Please remember that DPDK is library and this
PMD is one of many PMDs in the library.

Also please don't use 'printf' directly.

> +
> +static void
> +enetfec_lb_rxtx(void *rxq1)
> +{
> +	struct rte_mempool *pool;
> +	struct bufdesc *rx_bdp = NULL, *tx_bdp = NULL;
> +	struct rte_mbuf *mbuf = NULL, *new_mbuf = NULL;
> +	unsigned short status;
> +	unsigned short pkt_len = 0;
> +	int index_r = 0, index_t = 0;
> +	u8 *data;
> +	struct enetfec_priv_rx_q *rxq  = (struct enetfec_priv_rx_q *)rxq1;
> +	struct rte_eth_stats *stats = &rxq->fep->stats;
> +	unsigned int i;
> +	struct enetfec_private *fep;
> +	struct enetfec_priv_tx_q *txq;
> +	fep = rxq->fep->dev->data->dev_private;
> +	txq = fep->tx_queues[0];
> +
> +	pool = rxq->pool;
> +	rx_bdp = rxq->bd.cur;
> +	tx_bdp = txq->bd.cur;
> +
> +	signal(SIGTSTP, fec_signal_handler);
> +	while (!lb_quit) {
> +chk_again:
> +		status = rte_le_to_cpu_16(rte_read16(&rx_bdp->bd_sc));
> +		if (status & RX_BD_EMPTY) {
> +			if (!lb_quit)
> +				goto chk_again;
> +			rxq->bd.cur = rx_bdp;
> +			txq->bd.cur = tx_bdp;
> +			return;
> +		}
> +
> +		/* Check for errors. */
> +		status ^= RX_BD_LAST;
> +		if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO |
> +			RX_BD_CR | RX_BD_OV | RX_BD_LAST |
> +			RX_BD_TR)) {
> +			stats->ierrors++;
> +			if (status & RX_BD_OV) {
> +				/* FIFO overrun */
> +				ENETFEC_PMD_ERR("rx_fifo_error\n");
> +				goto rx_processing_done;
> +			}
> +			if (status & (RX_BD_LG | RX_BD_SH
> +						| RX_BD_LAST)) {
> +				/* Frame too long or too short. */
> +				ENETFEC_PMD_ERR("rx_length_error\n");
> +				if (status & RX_BD_LAST)
> +					ENETFEC_PMD_ERR("rcv is not +last\n");

duplicated '\n', but more importantly this is datapath, are you sure to use
debug logs in datapath?

'ENETFEC_DP_LOG' should be the to use in the datapath, since it is optimized
out based on the default value it has, to not impact datapath.
  
Apeksha Gupta Nov. 8, 2021, 6:47 p.m. UTC | #2
> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@intel.com>
> Sent: Wednesday, October 27, 2021 7:56 PM
> To: Apeksha Gupta <apeksha.gupta@nxp.com>; david.marchand@redhat.com;
> andrew.rybchenko@oktetlabs.ru; ferruh.yigit@intel.com
> Cc: dev@dpdk.org; Sachin Saxena <sachin.saxena@nxp.com>; Hemant Agrawal
> <hemant.agrawal@nxp.com>
> Subject: [EXT] Re: [dpdk-dev] [PATCH v6 4/5] net/enetfec: add enqueue and
> dequeue support
> 
> Caution: EXT Email
> 
> On 10/21/2021 5:46 AM, Apeksha Gupta wrote:
> > This patch adds burst enqueue and dequeue operations to the enetfec
> > PMD. Loopback mode is also added, compile time flag 'ENETFEC_LOOPBACK'
> is
> > used to enable this feature. By default loopback mode is disabled.
> > Basic features added like promiscuous enable, basic stats.
> >
> 
> In the patch title you can prefer "Rx/Tx support" instead of
> "enqueue and dequeue support", which is more common usage.
> 
> > Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
> > Signed-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>
> 
> <...>
> 
> > --- a/doc/guides/nics/features/enetfec.ini
> > +++ b/doc/guides/nics/features/enetfec.ini
> > @@ -4,6 +4,8 @@
> >   ; Refer to default.ini for the full list of available PMD features.
> >   ;
> >   [Features]
> > +Basic stats       = Y
> > +Promiscuous mode     = Y
> 
> Can you please keep the order same with default.ini file.
[Apeksha] yes, updated in v7 patch series.

> 
> <...>
> 
> > @@ -226,6 +264,110 @@ enetfec_eth_stop(__rte_unused struct rte_eth_dev
> *dev)
> >       return 0;
> >   }
> >
> > +static int
> > +enetfec_eth_close(__rte_unused struct rte_eth_dev *dev)
> > +{
> 
> 'dev' is used.
> 
> > +     enet_free_buffers(dev);
> > +     return 0;
> > +}
> > +
> > +static int
> > +enetfec_eth_link_update(struct rte_eth_dev *dev,
> > +                     int wait_to_complete __rte_unused)
> > +{
> > +     struct rte_eth_link link;
> > +     unsigned int lstatus = 1;
> > +
> > +     if (dev == NULL) {
> > +             ENETFEC_PMD_ERR("Invalid device in link_update.\n");
> 
> Duplicated '\n'.
> 
> > +             return 0;
> > +     }
> > +
> > +     memset(&link, 0, sizeof(struct rte_eth_link));
> > +
> > +     link.link_status = lstatus;
> > +     link.link_speed = ETH_SPEED_NUM_1G;
> > +
> > +     ENETFEC_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id,
> > +                      "Up");
> > +
> > +     return rte_eth_linkstatus_set(dev, &link);
> > +}
> > +
> > +static int
> > +enetfec_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
> 
> 'dev' is used.
> 
> <...>
> 
> > +static int
> > +enetfec_stats_get(struct rte_eth_dev *dev,
> > +           struct rte_eth_stats *stats)
> > +{
> > +     struct enetfec_private *fep = dev->data->dev_private;
> > +     struct rte_eth_stats *eth_stats = &fep->stats;
> > +
> > +     if (stats == NULL)
> > +             return -1;
> 
> No need to check this, ethdev layer already does.
> 
> > +
> > +     memset(stats, 0, sizeof(struct rte_eth_stats));
> > +
> 
> Same here, ethdev does this.
> 
> <...>
> 
> > +
> > +     /*
> > +      * Set default mac address
> > +      */
> > +     macaddr.addr_bytes[0] = 1;
> > +     macaddr.addr_bytes[1] = 1;
> > +     macaddr.addr_bytes[2] = 1;
> > +     macaddr.addr_bytes[3] = 1;
> > +     macaddr.addr_bytes[4] = 1;
> > +     macaddr.addr_bytes[5] = 1;
> 
> if it is fixed, you can set the addr while declaring the variable:
> struct rte_ether_addr macaddr = {
>      .addr_bytes = { 0x1, 0x1, 0x1, 0x1, 0x1, 0x1 }
> };
[Apeksha] As suggested all above comments are handled in v7 patch series.
> 
> 
> <...>
> 
> > index 0000000000..445fa97e77
> > --- /dev/null
> > +++ b/drivers/net/enetfec/enet_rxtx.c
> > @@ -0,0 +1,445 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright 2021 NXP
> > + */
> > +
> > +#include <signal.h>
> > +#include <rte_mbuf.h>
> > +#include <rte_io.h>
> > +#include "enet_regs.h"
> > +#include "enet_ethdev.h"
> > +#include "enet_pmd_logs.h"
> > +
> > +#define ENETFEC_LOOPBACK     0
> > +#define ENETFEC_DUMP         0
> > +
> 
> Instead of compile time flags, why not convert them to devargs so
> they can be updated without recompile?
> 
> This also make sure all code is enabled and prevent possible dead
> code by time.
[Apeksha] These are added for debug purpose only, we didn't want the per packet check. We are removing this functionality.
	As suggested, later we will implement and add it in proper format.
> 
> <...>
> 
> > +
> > +#if ENETFEC_LOOPBACK
> > +static volatile bool lb_quit;
> > +
> > +static void fec_signal_handler(int signum)
> > +{
> > +     if (signum == SIGINT || signum == SIGTSTP || signum == SIGTERM) {
> > +             printf("\n\n %s: Signal %d received, preparing to exit...\n",
> > +                             __func__, signum);
> > +             lb_quit = true;
> > +     }
> > +}
> 
> Not sure if handling signals in the driver is a good idea, this is more an
> application level desicion. Please remember that DPDK is library and this
> PMD is one of many PMDs in the library.
> 
> Also please don't use 'printf' directly.
> 
> > +
> > +static void
> > +enetfec_lb_rxtx(void *rxq1)
> > +{
> > +     struct rte_mempool *pool;
> > +     struct bufdesc *rx_bdp = NULL, *tx_bdp = NULL;
> > +     struct rte_mbuf *mbuf = NULL, *new_mbuf = NULL;
> > +     unsigned short status;
> > +     unsigned short pkt_len = 0;
> > +     int index_r = 0, index_t = 0;
> > +     u8 *data;
> > +     struct enetfec_priv_rx_q *rxq  = (struct enetfec_priv_rx_q *)rxq1;
> > +     struct rte_eth_stats *stats = &rxq->fep->stats;
> > +     unsigned int i;
> > +     struct enetfec_private *fep;
> > +     struct enetfec_priv_tx_q *txq;
> > +     fep = rxq->fep->dev->data->dev_private;
> > +     txq = fep->tx_queues[0];
> > +
> > +     pool = rxq->pool;
> > +     rx_bdp = rxq->bd.cur;
> > +     tx_bdp = txq->bd.cur;
> > +
> > +     signal(SIGTSTP, fec_signal_handler);
> > +     while (!lb_quit) {
> > +chk_again:
> > +             status = rte_le_to_cpu_16(rte_read16(&rx_bdp->bd_sc));
> > +             if (status & RX_BD_EMPTY) {
> > +                     if (!lb_quit)
> > +                             goto chk_again;
> > +                     rxq->bd.cur = rx_bdp;
> > +                     txq->bd.cur = tx_bdp;
> > +                     return;
> > +             }
> > +
> > +             /* Check for errors. */
> > +             status ^= RX_BD_LAST;
> > +             if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO |
> > +                     RX_BD_CR | RX_BD_OV | RX_BD_LAST |
> > +                     RX_BD_TR)) {
> > +                     stats->ierrors++;
> > +                     if (status & RX_BD_OV) {
> > +                             /* FIFO overrun */
> > +                             ENETFEC_PMD_ERR("rx_fifo_error\n");
> > +                             goto rx_processing_done;
> > +                     }
> > +                     if (status & (RX_BD_LG | RX_BD_SH
> > +                                             | RX_BD_LAST)) {
> > +                             /* Frame too long or too short. */
> > +                             ENETFEC_PMD_ERR("rx_length_error\n");
> > +                             if (status & RX_BD_LAST)
> > +                                     ENETFEC_PMD_ERR("rcv is not +last\n");
> 
> duplicated '\n', but more importantly this is datapath, are you sure to use
> debug logs in datapath?
> 
> 'ENETFEC_DP_LOG' should be the to use in the datapath, since it is optimized
> out based on the default value it has, to not impact datapath.
  

Patch

diff --git a/doc/guides/nics/enetfec.rst b/doc/guides/nics/enetfec.rst
index dfcd032098..47836630b6 100644
--- a/doc/guides/nics/enetfec.rst
+++ b/doc/guides/nics/enetfec.rst
@@ -82,6 +82,8 @@  driver.
 ENETFEC Features
 ~~~~~~~~~~~~~~~~~
 
+- Basic stats
+- Promiscuous
 - Linux
 - ARMv8
 
diff --git a/doc/guides/nics/features/enetfec.ini b/doc/guides/nics/features/enetfec.ini
index bdfbdbd9d4..7e0fb148ac 100644
--- a/doc/guides/nics/features/enetfec.ini
+++ b/doc/guides/nics/features/enetfec.ini
@@ -4,6 +4,8 @@ 
 ; Refer to default.ini for the full list of available PMD features.
 ;
 [Features]
+Basic stats	     = Y
+Promiscuous mode     = Y
 Linux		     = Y
 ARMv8		     = Y
 Usage doc	     = Y
diff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c
index 0ff93363c7..4419952443 100644
--- a/drivers/net/enetfec/enet_ethdev.c
+++ b/drivers/net/enetfec/enet_ethdev.c
@@ -41,6 +41,8 @@ 
 #define ENETFEC_RAFL_V			0x8
 #define ENETFEC_OPD_V			0xFFF0
 
+/* Extended buffer descriptor */
+#define ENETFEC_EXTENDED_BD		0
 #define NUM_OF_QUEUES			6
 
 uint32_t e_cntl;
@@ -179,6 +181,40 @@  enetfec_restart(struct rte_eth_dev *dev)
 	rte_delay_us(10);
 }
 
+static void
+enet_free_buffers(struct rte_eth_dev *dev)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+	unsigned int i, q;
+	struct rte_mbuf *mbuf;
+	struct bufdesc  *bdp;
+	struct enetfec_priv_rx_q *rxq;
+	struct enetfec_priv_tx_q *txq;
+
+	for (q = 0; q < dev->data->nb_rx_queues; q++) {
+		rxq = fep->rx_queues[q];
+		bdp = rxq->bd.base;
+		for (i = 0; i < rxq->bd.ring_size; i++) {
+			mbuf = rxq->rx_mbuf[i];
+			rxq->rx_mbuf[i] = NULL;
+			if (mbuf)
+				rte_pktmbuf_free(mbuf);
+			bdp = enet_get_nextdesc(bdp, &rxq->bd);
+		}
+	}
+
+	for (q = 0; q < dev->data->nb_tx_queues; q++) {
+		txq = fep->tx_queues[q];
+		bdp = txq->bd.base;
+		for (i = 0; i < txq->bd.ring_size; i++) {
+			mbuf = txq->tx_mbuf[i];
+			txq->tx_mbuf[i] = NULL;
+			if (mbuf)
+				rte_pktmbuf_free(mbuf);
+		}
+	}
+}
+
 static int
 enetfec_eth_configure(__rte_unused struct rte_eth_dev *dev)
 {
@@ -192,6 +228,8 @@  static int
 enetfec_eth_start(struct rte_eth_dev *dev)
 {
 	enetfec_restart(dev);
+	dev->rx_pkt_burst = &enetfec_recv_pkts;
+	dev->tx_pkt_burst = &enetfec_xmit_pkts;
 
 	return 0;
 }
@@ -226,6 +264,110 @@  enetfec_eth_stop(__rte_unused struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+enetfec_eth_close(__rte_unused struct rte_eth_dev *dev)
+{
+	enet_free_buffers(dev);
+	return 0;
+}
+
+static int
+enetfec_eth_link_update(struct rte_eth_dev *dev,
+			int wait_to_complete __rte_unused)
+{
+	struct rte_eth_link link;
+	unsigned int lstatus = 1;
+
+	if (dev == NULL) {
+		ENETFEC_PMD_ERR("Invalid device in link_update.\n");
+		return 0;
+	}
+
+	memset(&link, 0, sizeof(struct rte_eth_link));
+
+	link.link_status = lstatus;
+	link.link_speed = ETH_SPEED_NUM_1G;
+
+	ENETFEC_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id,
+			 "Up");
+
+	return rte_eth_linkstatus_set(dev, &link);
+}
+
+static int
+enetfec_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+	uint32_t tmp;
+
+	tmp = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
+	tmp |= 0x8;
+	tmp &= ~0x2;
+	rte_write32(rte_cpu_to_le_32(tmp),
+		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
+
+	return 0;
+}
+
+static int
+enetfec_multicast_enable(struct rte_eth_dev *dev)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+
+	rte_write32(rte_cpu_to_le_32(0xffffffff),
+			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
+	rte_write32(rte_cpu_to_le_32(0xffffffff),
+			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
+	dev->data->all_multicast = 1;
+
+	rte_write32(rte_cpu_to_le_32(0x04400002),
+			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
+	rte_write32(rte_cpu_to_le_32(0x10800049),
+			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
+
+	return 0;
+}
+
+/* Set a MAC change in hardware. */
+static int
+enetfec_set_mac_address(struct rte_eth_dev *dev,
+		    struct rte_ether_addr *addr)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+
+	writel(addr->addr_bytes[3] | (addr->addr_bytes[2] << 8) |
+		(addr->addr_bytes[1] << 16) | (addr->addr_bytes[0] << 24),
+		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_PALR);
+	writel((addr->addr_bytes[5] << 16) | (addr->addr_bytes[4] << 24),
+		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_PAUR);
+
+	rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
+
+	return 0;
+}
+
+static int
+enetfec_stats_get(struct rte_eth_dev *dev,
+	      struct rte_eth_stats *stats)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+	struct rte_eth_stats *eth_stats = &fep->stats;
+
+	if (stats == NULL)
+		return -1;
+
+	memset(stats, 0, sizeof(struct rte_eth_stats));
+
+	stats->ipackets = eth_stats->ipackets;
+	stats->ibytes = eth_stats->ibytes;
+	stats->ierrors = eth_stats->ierrors;
+	stats->opackets = eth_stats->opackets;
+	stats->obytes = eth_stats->obytes;
+	stats->oerrors = eth_stats->oerrors;
+
+	return 0;
+}
+
 static int
 enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
 	struct rte_eth_dev_info *dev_info)
@@ -237,6 +379,18 @@  enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
 	return 0;
 }
 
+static void
+enet_free_queue(struct rte_eth_dev *dev)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+	unsigned int i;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
+		rte_free(fep->rx_queues[i]);
+	for (i = 0; i < dev->data->nb_tx_queues; i++)
+		rte_free(fep->rx_queues[i]);
+}
+
 static const unsigned short offset_des_active_rxq[] = {
 	ENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2
 };
@@ -442,6 +596,12 @@  static const struct eth_dev_ops enetfec_ops = {
 	.dev_configure          = enetfec_eth_configure,
 	.dev_start              = enetfec_eth_start,
 	.dev_stop               = enetfec_eth_stop,
+	.dev_close              = enetfec_eth_close,
+	.link_update            = enetfec_eth_link_update,
+	.promiscuous_enable     = enetfec_promiscuous_enable,
+	.allmulticast_enable    = enetfec_multicast_enable,
+	.mac_addr_set           = enetfec_set_mac_address,
+	.stats_get              = enetfec_stats_get,
 	.dev_infos_get          = enetfec_eth_info,
 	.rx_queue_setup         = enetfec_rx_queue_setup,
 	.tx_queue_setup         = enetfec_tx_queue_setup
@@ -468,6 +628,7 @@  pmd_enetfec_probe(struct rte_vdev_device *vdev)
 	int rc;
 	int i;
 	unsigned int bdsize;
+	struct rte_ether_addr macaddr;
 
 	name = rte_vdev_device_name(vdev);
 	if (name == NULL)
@@ -510,6 +671,27 @@  pmd_enetfec_probe(struct rte_vdev_device *vdev)
 		fep->bd_addr_p = fep->bd_addr_p + bdsize;
 	}
 
+	/* Copy the station address into the dev structure, */
+	dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
+	if (dev->data->mac_addrs == NULL) {
+		ENETFEC_PMD_ERR("Failed to allocate mem %d to store MAC addresses",
+			ETHER_ADDR_LEN);
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	/*
+	 * Set default mac address
+	 */
+	macaddr.addr_bytes[0] = 1;
+	macaddr.addr_bytes[1] = 1;
+	macaddr.addr_bytes[2] = 1;
+	macaddr.addr_bytes[3] = 1;
+	macaddr.addr_bytes[4] = 1;
+	macaddr.addr_bytes[5] = 1;
+	enetfec_set_mac_address(dev, &macaddr);
+
+	fep->bufdesc_ex = ENETFEC_EXTENDED_BD;
 	rc = enetfec_eth_init(dev);
 	if (rc)
 		goto failed_init;
@@ -518,6 +700,8 @@  pmd_enetfec_probe(struct rte_vdev_device *vdev)
 
 failed_init:
 	ENETFEC_PMD_ERR("Failed to init");
+err:
+	rte_eth_dev_release_port(dev);
 	return rc;
 }
 
@@ -525,6 +709,8 @@  static int
 pmd_enetfec_remove(struct rte_vdev_device *vdev)
 {
 	struct rte_eth_dev *eth_dev = NULL;
+	struct enetfec_private *fep;
+	struct enetfec_priv_rx_q *rxq;
 	int ret;
 
 	/* find the ethdev entry */
@@ -532,11 +718,22 @@  pmd_enetfec_remove(struct rte_vdev_device *vdev)
 	if (eth_dev == NULL)
 		return -ENODEV;
 
+	fep = eth_dev->data->dev_private;
+	/* Free descriptor base of first RX queue as it was configured
+	 * first in enetfec_eth_init().
+	 */
+	rxq = fep->rx_queues[0];
+	rte_free(rxq->bd.base);
+	enet_free_queue(eth_dev);
+	enetfec_eth_stop(eth_dev);
+
 	ret = rte_eth_dev_release_port(eth_dev);
 	if (ret != 0)
 		return -EINVAL;
 
 	ENETFEC_PMD_INFO("Closing sw device");
+	munmap(fep->hw_baseaddr_v, fep->cbus_size);
+
 	return 0;
 }
 
diff --git a/drivers/net/enetfec/enet_rxtx.c b/drivers/net/enetfec/enet_rxtx.c
new file mode 100644
index 0000000000..445fa97e77
--- /dev/null
+++ b/drivers/net/enetfec/enet_rxtx.c
@@ -0,0 +1,445 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 NXP
+ */
+
+#include <signal.h>
+#include <rte_mbuf.h>
+#include <rte_io.h>
+#include "enet_regs.h"
+#include "enet_ethdev.h"
+#include "enet_pmd_logs.h"
+
+#define ENETFEC_LOOPBACK	0
+#define ENETFEC_DUMP		0
+
+#if ENETFEC_DUMP
+static void
+enet_dump(struct enetfec_priv_tx_q *txq)
+{
+	struct bufdesc *bdp;
+	int index = 0;
+
+	ENETFEC_PMD_DEBUG("TX ring dump\n");
+	ENETFEC_PMD_DEBUG("Nr     SC     addr       len  MBUF\n");
+
+	bdp = txq->bd.base;
+	do {
+		ENETFEC_PMD_DEBUG("%3u %c%c 0x%04x 0x%08x %4u %p\n",
+			index,
+			bdp == txq->bd.cur ? 'S' : ' ',
+			bdp == txq->dirty_tx ? 'H' : ' ',
+			rte_read16(rte_le_to_cpu_16(&bdp->bd_sc)),
+			rte_read32(rte_le_to_cpu_32(&bdp->bd_bufaddr)),
+			rte_read16(rte_le_to_cpu_16(&bdp->bd_datlen)),
+			txq->tx_mbuf[index]);
+		bdp = enet_get_nextdesc(bdp, &txq->bd);
+		index++;
+	} while (bdp != txq->bd.base);
+}
+
+static void
+enet_dump_rx(struct enetfec_priv_rx_q *rxq)
+{
+	struct bufdesc *bdp;
+	int index = 0;
+
+	ENETFEC_PMD_DEBUG("RX ring dump\n");
+	ENETFEC_PMD_DEBUG("Nr     SC     addr       len  MBUF\n");
+
+	bdp = rxq->bd.base;
+	do {
+		ENETFEC_PMD_DEBUG("%3u %c 0x%04x 0x%08x %4u %p\n",
+			index,
+			bdp == rxq->bd.cur ? 'S' : ' ',
+			rte_read16(rte_le_to_cpu_16(&bdp->bd_sc)),
+			rte_read32(rte_le_to_cpu_32(&bdp->bd_bufaddr)),
+			rte_read16(rte_le_to_cpu_16(&bdp->bd_datlen)),
+			rxq->rx_mbuf[index]);
+		rte_pktmbuf_dump(stdout, rxq->rx_mbuf[index],
+				rxq->rx_mbuf[index]->pkt_len);
+		bdp = enet_get_nextdesc(bdp, &rxq->bd);
+		index++;
+	} while (bdp != rxq->bd.base);
+}
+#endif
+
+#if ENETFEC_LOOPBACK
+static volatile bool lb_quit;
+
+static void fec_signal_handler(int signum)
+{
+	if (signum == SIGINT || signum == SIGTSTP || signum == SIGTERM) {
+		printf("\n\n %s: Signal %d received, preparing to exit...\n",
+				__func__, signum);
+		lb_quit = true;
+	}
+}
+
+static void
+enetfec_lb_rxtx(void *rxq1)
+{
+	struct rte_mempool *pool;
+	struct bufdesc *rx_bdp = NULL, *tx_bdp = NULL;
+	struct rte_mbuf *mbuf = NULL, *new_mbuf = NULL;
+	unsigned short status;
+	unsigned short pkt_len = 0;
+	int index_r = 0, index_t = 0;
+	u8 *data;
+	struct enetfec_priv_rx_q *rxq  = (struct enetfec_priv_rx_q *)rxq1;
+	struct rte_eth_stats *stats = &rxq->fep->stats;
+	unsigned int i;
+	struct enetfec_private *fep;
+	struct enetfec_priv_tx_q *txq;
+	fep = rxq->fep->dev->data->dev_private;
+	txq = fep->tx_queues[0];
+
+	pool = rxq->pool;
+	rx_bdp = rxq->bd.cur;
+	tx_bdp = txq->bd.cur;
+
+	signal(SIGTSTP, fec_signal_handler);
+	while (!lb_quit) {
+chk_again:
+		status = rte_le_to_cpu_16(rte_read16(&rx_bdp->bd_sc));
+		if (status & RX_BD_EMPTY) {
+			if (!lb_quit)
+				goto chk_again;
+			rxq->bd.cur = rx_bdp;
+			txq->bd.cur = tx_bdp;
+			return;
+		}
+
+		/* Check for errors. */
+		status ^= RX_BD_LAST;
+		if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO |
+			RX_BD_CR | RX_BD_OV | RX_BD_LAST |
+			RX_BD_TR)) {
+			stats->ierrors++;
+			if (status & RX_BD_OV) {
+				/* FIFO overrun */
+				ENETFEC_PMD_ERR("rx_fifo_error\n");
+				goto rx_processing_done;
+			}
+			if (status & (RX_BD_LG | RX_BD_SH
+						| RX_BD_LAST)) {
+				/* Frame too long or too short. */
+				ENETFEC_PMD_ERR("rx_length_error\n");
+				if (status & RX_BD_LAST)
+					ENETFEC_PMD_ERR("rcv is not +last\n");
+			}
+			/* CRC Error */
+			if (status & RX_BD_CR)
+				ENETFEC_PMD_ERR("rx_crc_errors\n");
+
+			/* Report late collisions as a frame error. */
+			if (status & (RX_BD_NO | RX_BD_TR))
+				ENETFEC_PMD_ERR("rx_frame_error\n");
+			mbuf = NULL;
+			goto rx_processing_done;
+		}
+
+		new_mbuf = rte_pktmbuf_alloc(pool);
+		if (unlikely(!new_mbuf)) {
+			stats->ierrors++;
+			break;
+		}
+		/* Process the incoming frame. */
+		pkt_len = rte_le_to_cpu_16(rte_read16(&rx_bdp->bd_datlen));
+
+		/* shows data with respect to the data_off field. */
+		index_r = enet_get_bd_index(rx_bdp, &rxq->bd);
+		mbuf = rxq->rx_mbuf[index_r];
+
+		/* adjust pkt_len */
+		rte_pktmbuf_append((struct rte_mbuf *)mbuf, pkt_len - 4);
+		if (rxq->fep->quirks & QUIRK_RACC)
+			rte_pktmbuf_adj(mbuf, 2);
+
+		/* Replace Buffer in BD */
+		rxq->rx_mbuf[index_r] = new_mbuf;
+		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(new_mbuf)),
+				&rx_bdp->bd_bufaddr);
+
+rx_processing_done:
+		/* when rx_processing_done clear the status flags
+		 * for this buffer
+		 */
+		status &= ~RX_BD_STATS;
+
+		/* Mark the buffer empty */
+		status |= RX_BD_EMPTY;
+
+		/* Make sure the updates to rest of the descriptor are
+		 * performed before transferring ownership.
+		 */
+		rte_wmb();
+		rte_write16(rte_cpu_to_le_16(status), &rx_bdp->bd_sc);
+
+		/* Update BD pointer to next entry */
+		rx_bdp = enet_get_nextdesc(rx_bdp, &rxq->bd);
+
+		/* Doing this here will keep the FEC running while we process
+		 * incoming frames.
+		 */
+		rte_write32(0, rxq->bd.active_reg_desc);
+
+		/* TX begins: First clean the ring then process packet */
+		index_t = enet_get_bd_index(tx_bdp, &txq->bd);
+		status = rte_le_to_cpu_16(rte_read16(&tx_bdp->bd_sc));
+		if (status & TX_BD_READY)
+			stats->oerrors++;
+			break;
+		if (txq->tx_mbuf[index_t]) {
+			rte_pktmbuf_free(txq->tx_mbuf[index_t]);
+			txq->tx_mbuf[index_t] = NULL;
+		}
+
+		if (mbuf == NULL)
+			continue;
+
+		/* Fill in a Tx ring entry */
+		status &= ~TX_BD_STATS;
+
+		/* Set buffer length and buffer pointer */
+		pkt_len = rte_pktmbuf_pkt_len(mbuf);
+		status |= (TX_BD_LAST);
+		data = rte_pktmbuf_mtod(mbuf, void *);
+
+		for (i = 0; i <= pkt_len; i += RTE_CACHE_LINE_SIZE)
+			dcbf(data + i);
+		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
+			&tx_bdp->bd_bufaddr);
+		rte_write16(rte_cpu_to_le_16(pkt_len), &tx_bdp->bd_datlen);
+
+		/* Make sure the updates to rest of the descriptor are performed
+		 * before transferring ownership.
+		 */
+		status |= (TX_BD_READY | TX_BD_TC);
+		rte_wmb();
+		rte_write16(rte_cpu_to_le_16(status), &tx_bdp->bd_sc);
+
+		/* Trigger transmission start */
+		rte_write32(0, txq->bd.active_reg_desc);
+
+		/* Save mbuf pointer to clean later */
+		txq->tx_mbuf[index_t] = mbuf;
+
+		/* If this was the last BD in the ring, start at the
+		 * beginning again.
+		 */
+		tx_bdp = enet_get_nextdesc(tx_bdp, &txq->bd);
+	}
+}
+#endif
+
+/* This function does enetfec_rx_queue processing. Dequeue packet from Rx queue
+ * When update through the ring, just set the empty indicator.
+ */
+uint16_t
+enetfec_recv_pkts(void *rxq1, __rte_unused struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	struct rte_mempool *pool;
+	struct bufdesc *bdp;
+	struct rte_mbuf *mbuf, *new_mbuf = NULL;
+	unsigned short status;
+	unsigned short pkt_len;
+	int pkt_received = 0, index = 0;
+	void *data;
+	struct enetfec_priv_rx_q *rxq  = (struct enetfec_priv_rx_q *)rxq1;
+	struct rte_eth_stats *stats = &rxq->fep->stats;
+	pool = rxq->pool;
+	bdp = rxq->bd.cur;
+#if ENETFEC_LOOPBACK
+	enetfec_lb_rxtx(rxq1);
+#endif
+	/* Process the incoming packet */
+	status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
+	while ((status & RX_BD_EMPTY) == 0) {
+		if (pkt_received >= nb_pkts)
+			break;
+
+		new_mbuf = rte_pktmbuf_alloc(pool);
+		if (unlikely(new_mbuf == NULL)) {
+			stats->ierrors++;
+			break;
+		}
+		/* Check for errors. */
+		status ^= RX_BD_LAST;
+		if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO |
+			RX_BD_CR | RX_BD_OV | RX_BD_LAST |
+			RX_BD_TR)) {
+			stats->ierrors++;
+			if (status & RX_BD_OV) {
+				/* FIFO overrun */
+				/* enet_dump_rx(rxq); */
+				ENETFEC_PMD_ERR("rx_fifo_error\n");
+				goto rx_processing_done;
+			}
+			if (status & (RX_BD_LG | RX_BD_SH
+						| RX_BD_LAST)) {
+				/* Frame too long or too short. */
+				ENETFEC_PMD_ERR("rx_length_error\n");
+				if (status & RX_BD_LAST)
+					ENETFEC_PMD_ERR("rcv is not +last\n");
+			}
+			if (status & RX_BD_CR) {     /* CRC Error */
+				ENETFEC_PMD_ERR("rx_crc_errors\n");
+			}
+			/* Report late collisions as a frame error. */
+			if (status & (RX_BD_NO | RX_BD_TR))
+				ENETFEC_PMD_ERR("rx_frame_error\n");
+			goto rx_processing_done;
+		}
+
+		/* Process the incoming frame. */
+		stats->ipackets++;
+		pkt_len = rte_le_to_cpu_16(rte_read16(&bdp->bd_datlen));
+		stats->ibytes += pkt_len;
+
+		/* shows data with respect to the data_off field. */
+		index = enet_get_bd_index(bdp, &rxq->bd);
+		mbuf = rxq->rx_mbuf[index];
+
+		data = rte_pktmbuf_mtod(mbuf, uint8_t *);
+		rte_prefetch0(data);
+		rte_pktmbuf_append((struct rte_mbuf *)mbuf,
+				pkt_len - 4);
+
+		if (rxq->fep->quirks & QUIRK_RACC)
+			data = rte_pktmbuf_adj(mbuf, 2);
+
+		rx_pkts[pkt_received] = mbuf;
+		pkt_received++;
+		rxq->rx_mbuf[index] = new_mbuf;
+		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(new_mbuf)),
+				&bdp->bd_bufaddr);
+rx_processing_done:
+		/* when rx_processing_done clear the status flags
+		 * for this buffer
+		 */
+		status &= ~RX_BD_STATS;
+
+		/* Mark the buffer empty */
+		status |= RX_BD_EMPTY;
+
+		if (rxq->fep->bufdesc_ex) {
+			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+			rte_write32(rte_cpu_to_le_32(RX_BD_INT),
+				    &ebdp->bd_esc);
+			rte_write32(0, &ebdp->bd_prot);
+			rte_write32(0, &ebdp->bd_bdu);
+		}
+
+		/* Make sure the updates to rest of the descriptor are
+		 * performed before transferring ownership.
+		 */
+		rte_wmb();
+		rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
+
+		/* Update BD pointer to next entry */
+		bdp = enet_get_nextdesc(bdp, &rxq->bd);
+
+		/* Doing this here will keep the FEC running while we process
+		 * incoming frames.
+		 */
+		rte_write32(0, rxq->bd.active_reg_desc);
+		status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
+	}
+	rxq->bd.cur = bdp;
+	return pkt_received;
+}
+
+uint16_t
+enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	struct enetfec_priv_tx_q *txq  =
+			(struct enetfec_priv_tx_q *)tx_queue;
+	struct rte_eth_stats *stats = &txq->fep->stats;
+	struct bufdesc *bdp, *last_bdp;
+	struct rte_mbuf *mbuf;
+	unsigned short status;
+	unsigned short buflen;
+	unsigned int index, estatus = 0;
+	unsigned int i, pkt_transmitted = 0;
+	u8 *data;
+	int tx_st = 1;
+
+	while (tx_st) {
+		if (pkt_transmitted >= nb_pkts) {
+			tx_st = 0;
+			break;
+		}
+		bdp = txq->bd.cur;
+		/* First clean the ring */
+		index = enet_get_bd_index(bdp, &txq->bd);
+		status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
+
+		if (status & TX_BD_READY) {
+			stats->oerrors++;
+			break;
+		}
+		if (txq->tx_mbuf[index]) {
+			rte_pktmbuf_free(txq->tx_mbuf[index]);
+			txq->tx_mbuf[index] = NULL;
+		}
+
+		mbuf = *(tx_pkts);
+		tx_pkts++;
+
+		/* Fill in a Tx ring entry */
+		last_bdp = bdp;
+		status &= ~TX_BD_STATS;
+
+		/* Set buffer length and buffer pointer */
+		buflen = rte_pktmbuf_pkt_len(mbuf);
+		stats->opackets++;
+		stats->obytes += buflen;
+
+		if (mbuf->nb_segs > 1) {
+			ENETFEC_PMD_DEBUG("SG not supported");
+			return -1;
+		}
+		status |= (TX_BD_LAST);
+		data = rte_pktmbuf_mtod(mbuf, void *);
+		for (i = 0; i <= buflen; i += RTE_CACHE_LINE_SIZE)
+			dcbf(data + i);
+
+		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
+			    &bdp->bd_bufaddr);
+		rte_write16(rte_cpu_to_le_16(buflen), &bdp->bd_datlen);
+
+		if (txq->fep->bufdesc_ex) {
+			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+			rte_write32(0, &ebdp->bd_bdu);
+			rte_write32(rte_cpu_to_le_32(estatus),
+				    &ebdp->bd_esc);
+		}
+
+		index = enet_get_bd_index(last_bdp, &txq->bd);
+		/* Save mbuf pointer */
+		txq->tx_mbuf[index] = mbuf;
+
+		/* Make sure the updates to rest of the descriptor are performed
+		 * before transferring ownership.
+		 */
+		status |= (TX_BD_READY | TX_BD_TC);
+		rte_wmb();
+		rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
+
+		/* Trigger transmission start */
+		rte_write32(0, txq->bd.active_reg_desc);
+		pkt_transmitted++;
+
+		/* If this was the last BD in the ring, start at the
+		 * beginning again.
+		 */
+		bdp = enet_get_nextdesc(last_bdp, &txq->bd);
+
+		/* Make sure the update to bdp and tx_skbuff are performed
+		 * before txq->bd.cur.
+		 */
+		txq->bd.cur = bdp;
+	}
+	return nb_pkts;
+}