[3/4] drivers/net/enetfec: queue configuration

Message ID 20210430043424.19752-4-apeksha.gupta@nxp.com (mailing list archive)
State Changes Requested, archived
Delegated to: Andrew Rybchenko
Headers
Series drivers/net: add NXP ENETFEC driver |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Apeksha Gupta April 30, 2021, 4:34 a.m. UTC
  This patch added RX/TX queue configuration setup operations.
On packet Rx the respective BD Ring status bit is set which is then
used for packet processing.

Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
Signed-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>
---
 drivers/net/enetfec/enet_ethdev.c | 223 ++++++++++++++++++++++++++++++
 1 file changed, 223 insertions(+)
  

Comments

Andrew Rybchenko June 8, 2021, 1:38 p.m. UTC | #1
Summary is incorrect.
"net/enetfec: support queue configuration" ?

On 4/30/21 7:34 AM, Apeksha Gupta wrote:
> This patch added RX/TX queue configuration setup operations.

RX -> Rx, TX -> Tx

> On packet Rx the respective BD Ring status bit is set which is then
> used for packet processing.
> 
> Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
> Signed-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>
> ---
>  drivers/net/enetfec/enet_ethdev.c | 223 ++++++++++++++++++++++++++++++
>  1 file changed, 223 insertions(+)
> 
> diff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c
> index 5f4f2cf9e..b4816179a 100644
> --- a/drivers/net/enetfec/enet_ethdev.c
> +++ b/drivers/net/enetfec/enet_ethdev.c
> @@ -48,6 +48,19 @@
>  
>  int enetfec_logtype_pmd;
>  
> +/* Supported Rx offloads */
> +static uint64_t dev_rx_offloads_sup =
> +		DEV_RX_OFFLOAD_IPV4_CKSUM |
> +		DEV_RX_OFFLOAD_UDP_CKSUM |
> +		DEV_RX_OFFLOAD_TCP_CKSUM |
> +		DEV_RX_OFFLOAD_VLAN_STRIP |
> +		DEV_RX_OFFLOAD_CHECKSUM;
> +
> +static uint64_t dev_tx_offloads_sup =
> +		DEV_TX_OFFLOAD_IPV4_CKSUM |
> +		DEV_TX_OFFLOAD_UDP_CKSUM |
> +		DEV_TX_OFFLOAD_TCP_CKSUM;
> +
>  /*
>   * This function is called to start or restart the FEC during a link
>   * change, transmit timeout or to reconfigure the FEC. The network
> @@ -176,8 +189,218 @@ enetfec_eth_open(struct rte_eth_dev *dev)
>  	return 0;
>  }
>  
> +
> +static int
> +enetfec_eth_configure(__rte_unused struct rte_eth_dev *dev)
> +{
> +	ENET_PMD_INFO("%s: returning zero ", __func__);
> +	return 0;
> +}
> +
> +static int
> +enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
> +	     struct rte_eth_dev_info *dev_info)
> +{
> +	dev_info->max_rx_queues = ENET_MAX_Q;
> +	dev_info->max_tx_queues = ENET_MAX_Q;
> +	dev_info->min_mtu = RTE_ETHER_MIN_MTU;

max_mtu?

> +	dev_info->rx_offload_capa = dev_rx_offloads_sup;
> +	dev_info->tx_offload_capa = dev_tx_offloads_sup;
> +
> +	return 0;
> +}
> +
> +static const unsigned short offset_des_active_rxq[] = {
> +	ENET_RDAR_0, ENET_RDAR_1, ENET_RDAR_2
> +};
> +
> +static const unsigned short offset_des_active_txq[] = {
> +	ENET_TDAR_0, ENET_TDAR_1, ENET_TDAR_2
> +};
> +
> +static int
> +enetfec_tx_queue_setup(struct rte_eth_dev *dev,
> +			uint16_t queue_idx,
> +			uint16_t nb_desc,
> +			__rte_unused unsigned int socket_id,
> +			__rte_unused const struct rte_eth_txconf *tx_conf)

It is incorrect to silently ignore tx_conf. Not everything in
it has corresponding capabilties to be advertised by the driver
and checked by ethdev. So, you need to check that not supported
configuration is not requested. E.g. deferred start.

> +{
> +	struct enetfec_private *fep = dev->data->dev_private;
> +	unsigned int i;
> +	struct bufdesc *bdp, *bd_base;
> +	struct enetfec_priv_tx_q *txq;
> +	unsigned int size;
> +	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
> +			sizeof(struct bufdesc);
> +	unsigned int dsize_log2 = fls64(dsize);
> +
> +	/* allocate transmit queue */
> +	txq = rte_zmalloc(NULL, sizeof(*txq), RTE_CACHE_LINE_SIZE);
> +	if (!txq) {

Compare vs NULL

> +		ENET_PMD_ERR("transmit queue allocation failed");
> +		return -ENOMEM;
> +	}
> +
> +	if (nb_desc > MAX_TX_BD_RING_SIZE) {
> +		nb_desc = MAX_TX_BD_RING_SIZE;
> +		ENET_PMD_WARN("modified the nb_desc to MAX_TX_BD_RING_SIZE\n");
> +	}
> +	txq->bd.ring_size = nb_desc;
> +	fep->total_tx_ring_size += txq->bd.ring_size;
> +	fep->tx_queues[queue_idx] = txq;
> +
> +	rte_write32(fep->bd_addr_p_t[queue_idx],
> +		fep->hw_baseaddr_v + ENET_TD_START(queue_idx));
> +
> +	/* Set transmit descriptor base. */
> +	txq = fep->tx_queues[queue_idx];
> +	txq->fep = fep;
> +	size = dsize * txq->bd.ring_size;
> +	bd_base = (struct bufdesc *)fep->dma_baseaddr_t[queue_idx];
> +	txq->bd.que_id = queue_idx;
> +	txq->bd.base = bd_base;
> +	txq->bd.cur = bd_base;
> +	txq->bd.d_size = dsize;
> +	txq->bd.d_size_log2 = dsize_log2;
> +	txq->bd.active_reg_desc =
> +			fep->hw_baseaddr_v + offset_des_active_txq[queue_idx];
> +	bd_base = (struct bufdesc *)(((void *)bd_base) + size);
> +	txq->bd.last = (struct bufdesc *)(((void *)bd_base) - dsize);
> +	bdp = txq->bd.base;
> +	bdp = txq->bd.cur;
> +
> +	for (i = 0; i < txq->bd.ring_size; i++) {
> +		/* Initialize the BD for every fragment in the page. */
> +		rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
> +		if (txq->tx_mbuf[i]) {

Compare vs NULL

> +			rte_pktmbuf_free(txq->tx_mbuf[i]);
> +			txq->tx_mbuf[i] = NULL;
> +		}
> +		rte_write32(rte_cpu_to_le_32(0), &bdp->bd_bufaddr);
> +		bdp = enet_get_nextdesc(bdp, &txq->bd);
> +	}
> +
> +	/* Set the last buffer to wrap */
> +	bdp = enet_get_prevdesc(bdp, &txq->bd);
> +	rte_write16((rte_cpu_to_le_16(TX_BD_WRAP) |
> +		     rte_read16(&bdp->bd_sc)), &bdp->bd_sc);
> +	txq->dirty_tx = bdp;
> +	dev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx];
> +	return 0;
> +}
> +
> +static int
> +enetfec_rx_queue_setup(struct rte_eth_dev *dev,
> +			uint16_t queue_idx,
> +			uint16_t nb_rx_desc,
> +			 __rte_unused unsigned int socket_id,
> +			 __rte_unused const struct rte_eth_rxconf *rx_conf,
> +			struct rte_mempool *mb_pool)
> +{
> +	struct enetfec_private *fep = dev->data->dev_private;
> +	unsigned int i;
> +	struct bufdesc *bd_base;
> +	struct bufdesc  *bdp;
> +	struct enetfec_priv_rx_q *rxq;
> +	unsigned int size;
> +	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
> +			sizeof(struct bufdesc);
> +	unsigned int dsize_log2 = fls64(dsize);
> +
> +	/* allocate receive queue */
> +	rxq = rte_zmalloc(NULL, sizeof(*rxq), RTE_CACHE_LINE_SIZE);
> +	if (!rxq) {

Compare vs NULL

> +		ENET_PMD_ERR("receive queue allocation failed");
> +		return -ENOMEM;
> +	}
> +
> +	if (nb_rx_desc > MAX_RX_BD_RING_SIZE) {
> +		nb_rx_desc = MAX_RX_BD_RING_SIZE;
> +		ENET_PMD_WARN("modified the nb_desc to MAX_RX_BD_RING_SIZE\n");
> +	}
> +
> +	rxq->bd.ring_size = nb_rx_desc;
> +	fep->total_rx_ring_size += rxq->bd.ring_size;
> +	fep->rx_queues[queue_idx] = rxq;
> +
> +	rte_write32(fep->bd_addr_p_r[queue_idx],
> +			fep->hw_baseaddr_v + ENET_RD_START(queue_idx));
> +	rte_write32(PKT_MAX_BUF_SIZE,
> +			fep->hw_baseaddr_v + ENET_MRB_SIZE(queue_idx));
> +
> +	/* Set receive descriptor base. */
> +	rxq = fep->rx_queues[queue_idx];
> +	rxq->pool = mb_pool;
> +	size = dsize * rxq->bd.ring_size;
> +	bd_base = (struct bufdesc *)fep->dma_baseaddr_r[queue_idx];
> +	rxq->bd.que_id = queue_idx;
> +	rxq->bd.base = bd_base;
> +	rxq->bd.cur = bd_base;
> +	rxq->bd.d_size = dsize;
> +	rxq->bd.d_size_log2 = dsize_log2;
> +	rxq->bd.active_reg_desc =
> +			fep->hw_baseaddr_v + offset_des_active_rxq[queue_idx];
> +	bd_base = (struct bufdesc *)(((void *)bd_base) + size);
> +	rxq->bd.last = (struct bufdesc *)(((void *)bd_base) - dsize);
> +
> +	rxq->fep = fep;
> +	bdp = rxq->bd.base;
> +	rxq->bd.cur = bdp;
> +
> +	for (i = 0; i < nb_rx_desc; i++) {
> +		/* Initialize Rx buffers from pktmbuf pool */
> +		struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mb_pool);
> +		if (mbuf == NULL) {
> +			ENET_PMD_ERR("mbuf failed\n");
> +			goto err_alloc;
> +		}
> +
> +		/* Get the virtual address & physical address */
> +		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
> +				&bdp->bd_bufaddr);
> +
> +		rxq->rx_mbuf[i] = mbuf;
> +		rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), &bdp->bd_sc);
> +
> +		bdp = enet_get_nextdesc(bdp, &rxq->bd);
> +	}
> +
> +	/* Initialize the receive buffer descriptors. */
> +	bdp = rxq->bd.cur;
> +	for (i = 0; i < rxq->bd.ring_size; i++) {
> +		/* Initialize the BD for every fragment in the page. */
> +		if (rte_read32(&bdp->bd_bufaddr))

Compare vs 0

> +			rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY),
> +				&bdp->bd_sc);
> +		else
> +			rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
> +
> +		bdp = enet_get_nextdesc(bdp, &rxq->bd);
> +	}
> +
> +	/* Set the last buffer to wrap */
> +	bdp = enet_get_prevdesc(bdp, &rxq->bd);
> +	rte_write16((rte_cpu_to_le_16(RX_BD_WRAP) |
> +		     rte_read16(&bdp->bd_sc)),	&bdp->bd_sc);
> +	dev->data->rx_queues[queue_idx] = fep->rx_queues[queue_idx];
> +	rte_write32(0x0, fep->rx_queues[queue_idx]->bd.active_reg_desc);
> +	return 0;
> +
> +err_alloc:
> +	for (i = 0; i < nb_rx_desc; i++) {
> +		rte_pktmbuf_free(rxq->rx_mbuf[i]);
> +		rxq->rx_mbuf[i] = NULL;
> +	}
> +	rte_free(rxq);
> +	return -1;

Callback returns negative errno, not -1

> +}
> +
>  static const struct eth_dev_ops ops = {
>  	.dev_start = enetfec_eth_open,
> +	.dev_configure = enetfec_eth_configure,
> +	.dev_infos_get = enetfec_eth_info,
> +	.rx_queue_setup = enetfec_rx_queue_setup,
> +	.tx_queue_setup = enetfec_tx_queue_setup,

Order in the structure should be in the same order as
order in the eth_dev_ops for consistency.

>  };
>  
>  static int
>
  
Sachin Saxena (OSS) July 4, 2021, 6:46 a.m. UTC | #2
On 30-Apr-21 10:04 AM, Apeksha Gupta wrote:
> This patch added RX/TX queue configuration setup operations.
added -> adds
> On packet Rx the respective BD Ring status bit is set which is then
Suggestion:  Rx -> reception
> used for packet processing.
>
> Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
> Signed-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>
> ---
>   drivers/net/enetfec/enet_ethdev.c | 223 ++++++++++++++++++++++++++++++
>   1 file changed, 223 insertions(+)
>
> diff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c
> index 5f4f2cf9e..b4816179a 100644
> --- a/drivers/net/enetfec/enet_ethdev.c
> +++ b/drivers/net/enetfec/enet_ethdev.c
> @@ -48,6 +48,19 @@
>   
>   int enetfec_logtype_pmd;
>   
> +/* Supported Rx offloads */
> +static uint64_t dev_rx_offloads_sup =
> +		DEV_RX_OFFLOAD_IPV4_CKSUM |
> +		DEV_RX_OFFLOAD_UDP_CKSUM |
> +		DEV_RX_OFFLOAD_TCP_CKSUM |
> +		DEV_RX_OFFLOAD_VLAN_STRIP |
> +		DEV_RX_OFFLOAD_CHECKSUM;
> +
> +static uint64_t dev_tx_offloads_sup =
> +		DEV_TX_OFFLOAD_IPV4_CKSUM |
> +		DEV_TX_OFFLOAD_UDP_CKSUM |
> +		DEV_TX_OFFLOAD_TCP_CKSUM;
> +
>   /*
>    * This function is called to start or restart the FEC during a link
>    * change, transmit timeout or to reconfigure the FEC. The network
> @@ -176,8 +189,218 @@ enetfec_eth_open(struct rte_eth_dev *dev)
>   	return 0;
>   }
>   
> +
> +static int
> +enetfec_eth_configure(__rte_unused struct rte_eth_dev *dev)
> +{
> +	ENET_PMD_INFO("%s: returning zero ", __func__);
> +	return 0;
> +}
> +
Remove this if not required.
> +static int
> +enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
> +	     struct rte_eth_dev_info *dev_info)
> +{
> +	dev_info->max_rx_queues = ENET_MAX_Q;
> +	dev_info->max_tx_queues = ENET_MAX_Q;
> +	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
> +	dev_info->rx_offload_capa = dev_rx_offloads_sup;
> +	dev_info->tx_offload_capa = dev_tx_offloads_sup;
> +
> +	return 0;
> +}
> +
> +static const unsigned short offset_des_active_rxq[] = {
> +	ENET_RDAR_0, ENET_RDAR_1, ENET_RDAR_2
> +};
> +
> +static const unsigned short offset_des_active_txq[] = {
> +	ENET_TDAR_0, ENET_TDAR_1, ENET_TDAR_2
> +};
> +
> +static int
> +enetfec_tx_queue_setup(struct rte_eth_dev *dev,
> +			uint16_t queue_idx,
> +			uint16_t nb_desc,
> +			__rte_unused unsigned int socket_id,
> +			__rte_unused const struct rte_eth_txconf *tx_conf)
> +{
> +	struct enetfec_private *fep = dev->data->dev_private;
> +	unsigned int i;
> +	struct bufdesc *bdp, *bd_base;
> +	struct enetfec_priv_tx_q *txq;
> +	unsigned int size;
> +	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
> +			sizeof(struct bufdesc);
> +	unsigned int dsize_log2 = fls64(dsize);
> +
> +	/* allocate transmit queue */
> +	txq = rte_zmalloc(NULL, sizeof(*txq), RTE_CACHE_LINE_SIZE);
> +	if (!txq) {
> +		ENET_PMD_ERR("transmit queue allocation failed");
> +		return -ENOMEM;
> +	}
> +
> +	if (nb_desc > MAX_TX_BD_RING_SIZE) {
> +		nb_desc = MAX_TX_BD_RING_SIZE;
> +		ENET_PMD_WARN("modified the nb_desc to MAX_TX_BD_RING_SIZE\n");
> +	}
> +	txq->bd.ring_size = nb_desc;
> +	fep->total_tx_ring_size += txq->bd.ring_size;
> +	fep->tx_queues[queue_idx] = txq;
> +
> +	rte_write32(fep->bd_addr_p_t[queue_idx],
> +		fep->hw_baseaddr_v + ENET_TD_START(queue_idx));
Do we need  rte_cpu_to_le_* ?
> +
> +	/* Set transmit descriptor base. */
> +	txq = fep->tx_queues[queue_idx];
> +	txq->fep = fep;
> +	size = dsize * txq->bd.ring_size;
> +	bd_base = (struct bufdesc *)fep->dma_baseaddr_t[queue_idx];
> +	txq->bd.que_id = queue_idx;
> +	txq->bd.base = bd_base;
> +	txq->bd.cur = bd_base;
> +	txq->bd.d_size = dsize;
> +	txq->bd.d_size_log2 = dsize_log2;
> +	txq->bd.active_reg_desc =
> +			fep->hw_baseaddr_v + offset_des_active_txq[queue_idx];
> +	bd_base = (struct bufdesc *)(((void *)bd_base) + size);
> +	txq->bd.last = (struct bufdesc *)(((void *)bd_base) - dsize);
> +	bdp = txq->bd.base;
> +	bdp = txq->bd.cur;
> +
> +	for (i = 0; i < txq->bd.ring_size; i++) {
> +		/* Initialize the BD for every fragment in the page. */
> +		rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);

rte_cpu_to_le_16(0) has no effect on '0'.

> +		if (txq->tx_mbuf[i]) {
Compare vs NULL
> +			rte_pktmbuf_free(txq->tx_mbuf[i]);
> +			txq->tx_mbuf[i] = NULL;
> +		}
> +		rte_write32(rte_cpu_to_le_32(0), &bdp->bd_bufaddr);

rte_cpu_to_le_32 has no effect.

> +		bdp = enet_get_nextdesc(bdp, &txq->bd);
> +	}
> +
> +	/* Set the last buffer to wrap */
> +	bdp = enet_get_prevdesc(bdp, &txq->bd);
> +	rte_write16((rte_cpu_to_le_16(TX_BD_WRAP) |
> +		     rte_read16(&bdp->bd_sc)), &bdp->bd_sc);
> +	txq->dirty_tx = bdp;
> +	dev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx];
> +	return 0;
> +}
> +
> +static int
> +enetfec_rx_queue_setup(struct rte_eth_dev *dev,
> +			uint16_t queue_idx,
> +			uint16_t nb_rx_desc,
> +			 __rte_unused unsigned int socket_id,
> +			 __rte_unused const struct rte_eth_rxconf *rx_conf,
> +			struct rte_mempool *mb_pool)
> +{
> +	struct enetfec_private *fep = dev->data->dev_private;
> +	unsigned int i;
> +	struct bufdesc *bd_base;
> +	struct bufdesc  *bdp;
> +	struct enetfec_priv_rx_q *rxq;
> +	unsigned int size;
> +	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
> +			sizeof(struct bufdesc);
> +	unsigned int dsize_log2 = fls64(dsize);
> +
> +	/* allocate receive queue */
> +	rxq = rte_zmalloc(NULL, sizeof(*rxq), RTE_CACHE_LINE_SIZE);
> +	if (!rxq) {
> +		ENET_PMD_ERR("receive queue allocation failed");
> +		return -ENOMEM;
> +	}
> +
> +	if (nb_rx_desc > MAX_RX_BD_RING_SIZE) {
> +		nb_rx_desc = MAX_RX_BD_RING_SIZE;
> +		ENET_PMD_WARN("modified the nb_desc to MAX_RX_BD_RING_SIZE\n");
> +	}
> +
> +	rxq->bd.ring_size = nb_rx_desc;
> +	fep->total_rx_ring_size += rxq->bd.ring_size;
> +	fep->rx_queues[queue_idx] = rxq;
> +
> +	rte_write32(fep->bd_addr_p_r[queue_idx],
> +			fep->hw_baseaddr_v + ENET_RD_START(queue_idx));
> +	rte_write32(PKT_MAX_BUF_SIZE,
> +			fep->hw_baseaddr_v + ENET_MRB_SIZE(queue_idx));
Please check if we need  rte_cpu_to_le_* .
> +
> +	/* Set receive descriptor base. */
> +	rxq = fep->rx_queues[queue_idx];
> +	rxq->pool = mb_pool;
> +	size = dsize * rxq->bd.ring_size;
> +	bd_base = (struct bufdesc *)fep->dma_baseaddr_r[queue_idx];
> +	rxq->bd.que_id = queue_idx;
> +	rxq->bd.base = bd_base;
> +	rxq->bd.cur = bd_base;
> +	rxq->bd.d_size = dsize;
> +	rxq->bd.d_size_log2 = dsize_log2;
> +	rxq->bd.active_reg_desc =
> +			fep->hw_baseaddr_v + offset_des_active_rxq[queue_idx];
> +	bd_base = (struct bufdesc *)(((void *)bd_base) + size);
> +	rxq->bd.last = (struct bufdesc *)(((void *)bd_base) - dsize);
> +
> +	rxq->fep = fep;
> +	bdp = rxq->bd.base;
> +	rxq->bd.cur = bdp;
> +
> +	for (i = 0; i < nb_rx_desc; i++) {
> +		/* Initialize Rx buffers from pktmbuf pool */
> +		struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mb_pool);
> +		if (mbuf == NULL) {
> +			ENET_PMD_ERR("mbuf failed\n");
> +			goto err_alloc;
> +		}
> +
> +		/* Get the virtual address & physical address */
> +		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
> +				&bdp->bd_bufaddr);
> +
> +		rxq->rx_mbuf[i] = mbuf;
> +		rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), &bdp->bd_sc);
> +
> +		bdp = enet_get_nextdesc(bdp, &rxq->bd);
> +	}
> +
> +	/* Initialize the receive buffer descriptors. */
> +	bdp = rxq->bd.cur;
> +	for (i = 0; i < rxq->bd.ring_size; i++) {
> +		/* Initialize the BD for every fragment in the page. */
> +		if (rte_read32(&bdp->bd_bufaddr))
Compare vs 0
> +			rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY),
> +				&bdp->bd_sc);
> +		else
> +			rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
No effect of rte_cpu_to_le_16.
> +
> +		bdp = enet_get_nextdesc(bdp, &rxq->bd);
> +	}
> +
> +	/* Set the last buffer to wrap */
> +	bdp = enet_get_prevdesc(bdp, &rxq->bd);
> +	rte_write16((rte_cpu_to_le_16(RX_BD_WRAP) |
> +		     rte_read16(&bdp->bd_sc)),	&bdp->bd_sc);
> +	dev->data->rx_queues[queue_idx] = fep->rx_queues[queue_idx];
> +	rte_write32(0x0, fep->rx_queues[queue_idx]->bd.active_reg_desc);
> +	return 0;
> +
> +err_alloc:
> +	for (i = 0; i < nb_rx_desc; i++) {
> +		rte_pktmbuf_free(rxq->rx_mbuf[i]);
Only free, if mbuf was allocated earlier. (rxq->rx_mbuf[i] != NULL)
> +		rxq->rx_mbuf[i] = NULL;
> +	}
> +	rte_free(rxq);
> +	return -1;
> +}
> +
>   static const struct eth_dev_ops ops = {
>   	.dev_start = enetfec_eth_open,
> +	.dev_configure = enetfec_eth_configure,
Better to introduce function pointers later when they will be implemented.

> +	.dev_infos_get = enetfec_eth_info,
> +	.rx_queue_setup = enetfec_rx_queue_setup,
> +	.tx_queue_setup = enetfec_tx_queue_setup,
>   };
>   
>   static int
  

Patch

diff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c
index 5f4f2cf9e..b4816179a 100644
--- a/drivers/net/enetfec/enet_ethdev.c
+++ b/drivers/net/enetfec/enet_ethdev.c
@@ -48,6 +48,19 @@ 
 
 int enetfec_logtype_pmd;
 
+/* Supported Rx offloads */
+static uint64_t dev_rx_offloads_sup =
+		DEV_RX_OFFLOAD_IPV4_CKSUM |
+		DEV_RX_OFFLOAD_UDP_CKSUM |
+		DEV_RX_OFFLOAD_TCP_CKSUM |
+		DEV_RX_OFFLOAD_VLAN_STRIP |
+		DEV_RX_OFFLOAD_CHECKSUM;
+
+static uint64_t dev_tx_offloads_sup =
+		DEV_TX_OFFLOAD_IPV4_CKSUM |
+		DEV_TX_OFFLOAD_UDP_CKSUM |
+		DEV_TX_OFFLOAD_TCP_CKSUM;
+
 /*
  * This function is called to start or restart the FEC during a link
  * change, transmit timeout or to reconfigure the FEC. The network
@@ -176,8 +189,218 @@  enetfec_eth_open(struct rte_eth_dev *dev)
 	return 0;
 }
 
+
+static int
+enetfec_eth_configure(__rte_unused struct rte_eth_dev *dev)
+{
+	ENET_PMD_INFO("%s: returning zero ", __func__);
+	return 0;
+}
+
+static int
+enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
+	     struct rte_eth_dev_info *dev_info)
+{
+	dev_info->max_rx_queues = ENET_MAX_Q;
+	dev_info->max_tx_queues = ENET_MAX_Q;
+	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+	dev_info->rx_offload_capa = dev_rx_offloads_sup;
+	dev_info->tx_offload_capa = dev_tx_offloads_sup;
+
+	return 0;
+}
+
+static const unsigned short offset_des_active_rxq[] = {
+	ENET_RDAR_0, ENET_RDAR_1, ENET_RDAR_2
+};
+
+static const unsigned short offset_des_active_txq[] = {
+	ENET_TDAR_0, ENET_TDAR_1, ENET_TDAR_2
+};
+
+static int
+enetfec_tx_queue_setup(struct rte_eth_dev *dev,
+			uint16_t queue_idx,
+			uint16_t nb_desc,
+			__rte_unused unsigned int socket_id,
+			__rte_unused const struct rte_eth_txconf *tx_conf)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+	unsigned int i;
+	struct bufdesc *bdp, *bd_base;
+	struct enetfec_priv_tx_q *txq;
+	unsigned int size;
+	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
+			sizeof(struct bufdesc);
+	unsigned int dsize_log2 = fls64(dsize);
+
+	/* allocate transmit queue */
+	txq = rte_zmalloc(NULL, sizeof(*txq), RTE_CACHE_LINE_SIZE);
+	if (!txq) {
+		ENET_PMD_ERR("transmit queue allocation failed");
+		return -ENOMEM;
+	}
+
+	if (nb_desc > MAX_TX_BD_RING_SIZE) {
+		nb_desc = MAX_TX_BD_RING_SIZE;
+		ENET_PMD_WARN("modified the nb_desc to MAX_TX_BD_RING_SIZE\n");
+	}
+	txq->bd.ring_size = nb_desc;
+	fep->total_tx_ring_size += txq->bd.ring_size;
+	fep->tx_queues[queue_idx] = txq;
+
+	rte_write32(fep->bd_addr_p_t[queue_idx],
+		fep->hw_baseaddr_v + ENET_TD_START(queue_idx));
+
+	/* Set transmit descriptor base. */
+	txq = fep->tx_queues[queue_idx];
+	txq->fep = fep;
+	size = dsize * txq->bd.ring_size;
+	bd_base = (struct bufdesc *)fep->dma_baseaddr_t[queue_idx];
+	txq->bd.que_id = queue_idx;
+	txq->bd.base = bd_base;
+	txq->bd.cur = bd_base;
+	txq->bd.d_size = dsize;
+	txq->bd.d_size_log2 = dsize_log2;
+	txq->bd.active_reg_desc =
+			fep->hw_baseaddr_v + offset_des_active_txq[queue_idx];
+	bd_base = (struct bufdesc *)(((void *)bd_base) + size);
+	txq->bd.last = (struct bufdesc *)(((void *)bd_base) - dsize);
+	bdp = txq->bd.base;
+	bdp = txq->bd.cur;
+
+	for (i = 0; i < txq->bd.ring_size; i++) {
+		/* Initialize the BD for every fragment in the page. */
+		rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
+		if (txq->tx_mbuf[i]) {
+			rte_pktmbuf_free(txq->tx_mbuf[i]);
+			txq->tx_mbuf[i] = NULL;
+		}
+		rte_write32(rte_cpu_to_le_32(0), &bdp->bd_bufaddr);
+		bdp = enet_get_nextdesc(bdp, &txq->bd);
+	}
+
+	/* Set the last buffer to wrap */
+	bdp = enet_get_prevdesc(bdp, &txq->bd);
+	rte_write16((rte_cpu_to_le_16(TX_BD_WRAP) |
+		     rte_read16(&bdp->bd_sc)), &bdp->bd_sc);
+	txq->dirty_tx = bdp;
+	dev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx];
+	return 0;
+}
+
+static int
+enetfec_rx_queue_setup(struct rte_eth_dev *dev,
+			uint16_t queue_idx,
+			uint16_t nb_rx_desc,
+			 __rte_unused unsigned int socket_id,
+			 __rte_unused const struct rte_eth_rxconf *rx_conf,
+			struct rte_mempool *mb_pool)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+	unsigned int i;
+	struct bufdesc *bd_base;
+	struct bufdesc  *bdp;
+	struct enetfec_priv_rx_q *rxq;
+	unsigned int size;
+	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
+			sizeof(struct bufdesc);
+	unsigned int dsize_log2 = fls64(dsize);
+
+	/* allocate receive queue */
+	rxq = rte_zmalloc(NULL, sizeof(*rxq), RTE_CACHE_LINE_SIZE);
+	if (!rxq) {
+		ENET_PMD_ERR("receive queue allocation failed");
+		return -ENOMEM;
+	}
+
+	if (nb_rx_desc > MAX_RX_BD_RING_SIZE) {
+		nb_rx_desc = MAX_RX_BD_RING_SIZE;
+		ENET_PMD_WARN("modified the nb_desc to MAX_RX_BD_RING_SIZE\n");
+	}
+
+	rxq->bd.ring_size = nb_rx_desc;
+	fep->total_rx_ring_size += rxq->bd.ring_size;
+	fep->rx_queues[queue_idx] = rxq;
+
+	rte_write32(fep->bd_addr_p_r[queue_idx],
+			fep->hw_baseaddr_v + ENET_RD_START(queue_idx));
+	rte_write32(PKT_MAX_BUF_SIZE,
+			fep->hw_baseaddr_v + ENET_MRB_SIZE(queue_idx));
+
+	/* Set receive descriptor base. */
+	rxq = fep->rx_queues[queue_idx];
+	rxq->pool = mb_pool;
+	size = dsize * rxq->bd.ring_size;
+	bd_base = (struct bufdesc *)fep->dma_baseaddr_r[queue_idx];
+	rxq->bd.que_id = queue_idx;
+	rxq->bd.base = bd_base;
+	rxq->bd.cur = bd_base;
+	rxq->bd.d_size = dsize;
+	rxq->bd.d_size_log2 = dsize_log2;
+	rxq->bd.active_reg_desc =
+			fep->hw_baseaddr_v + offset_des_active_rxq[queue_idx];
+	bd_base = (struct bufdesc *)(((void *)bd_base) + size);
+	rxq->bd.last = (struct bufdesc *)(((void *)bd_base) - dsize);
+
+	rxq->fep = fep;
+	bdp = rxq->bd.base;
+	rxq->bd.cur = bdp;
+
+	for (i = 0; i < nb_rx_desc; i++) {
+		/* Initialize Rx buffers from pktmbuf pool */
+		struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mb_pool);
+		if (mbuf == NULL) {
+			ENET_PMD_ERR("mbuf failed\n");
+			goto err_alloc;
+		}
+
+		/* Get the virtual address & physical address */
+		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
+				&bdp->bd_bufaddr);
+
+		rxq->rx_mbuf[i] = mbuf;
+		rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), &bdp->bd_sc);
+
+		bdp = enet_get_nextdesc(bdp, &rxq->bd);
+	}
+
+	/* Initialize the receive buffer descriptors. */
+	bdp = rxq->bd.cur;
+	for (i = 0; i < rxq->bd.ring_size; i++) {
+		/* Initialize the BD for every fragment in the page. */
+		if (rte_read32(&bdp->bd_bufaddr))
+			rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY),
+				&bdp->bd_sc);
+		else
+			rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
+
+		bdp = enet_get_nextdesc(bdp, &rxq->bd);
+	}
+
+	/* Set the last buffer to wrap */
+	bdp = enet_get_prevdesc(bdp, &rxq->bd);
+	rte_write16((rte_cpu_to_le_16(RX_BD_WRAP) |
+		     rte_read16(&bdp->bd_sc)),	&bdp->bd_sc);
+	dev->data->rx_queues[queue_idx] = fep->rx_queues[queue_idx];
+	rte_write32(0x0, fep->rx_queues[queue_idx]->bd.active_reg_desc);
+	return 0;
+
+err_alloc:
+	for (i = 0; i < nb_rx_desc; i++) {
+		rte_pktmbuf_free(rxq->rx_mbuf[i]);
+		rxq->rx_mbuf[i] = NULL;
+	}
+	rte_free(rxq);
+	return -1;
+}
+
 static const struct eth_dev_ops ops = {
 	.dev_start = enetfec_eth_open,
+	.dev_configure = enetfec_eth_configure,
+	.dev_infos_get = enetfec_eth_info,
+	.rx_queue_setup = enetfec_rx_queue_setup,
+	.tx_queue_setup = enetfec_tx_queue_setup,
 };
 
 static int