[v5,3/6] examples/ioat: add rawdev copy mode support

Message ID 20190920073714.1314-4-marcinx.baran@intel.com (mailing list archive)
State Superseded, archived
Headers
Series examples/ioat: sample app on ioat driver usage |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Marcin Baran Sept. 20, 2019, 7:37 a.m. UTC
  Added support for copying packets using
rawdev device. Each port's Rx queue is
assigned DMA channel for copy.

Signed-off-by: Marcin Baran <marcinx.baran@intel.com>
Signed-off-by: Pawel Modrak <pawelx.modrak@intel.com>
---
 examples/ioat/ioatfwd.c | 236 ++++++++++++++++++++++++++++++++--------
 1 file changed, 189 insertions(+), 47 deletions(-)
  

Comments

Bruce Richardson Sept. 27, 2019, 10:05 a.m. UTC | #1
On Fri, Sep 20, 2019 at 09:37:11AM +0200, Marcin Baran wrote:
> Added support for copying packets using
> rawdev device. Each port's Rx queue is
> assigned DMA channel for copy.
> 
> Signed-off-by: Marcin Baran <marcinx.baran@intel.com>
> Signed-off-by: Pawel Modrak <pawelx.modrak@intel.com>
> ---
>  examples/ioat/ioatfwd.c | 236 ++++++++++++++++++++++++++++++++--------
>  1 file changed, 189 insertions(+), 47 deletions(-)
> 
> diff --git a/examples/ioat/ioatfwd.c b/examples/ioat/ioatfwd.c
> index 3a092c6cf..c66ce7e49 100644
> --- a/examples/ioat/ioatfwd.c
> +++ b/examples/ioat/ioatfwd.c
> @@ -121,6 +121,50 @@ pktmbuf_sw_copy(struct rte_mbuf *src, struct rte_mbuf *dst)
>  		rte_pktmbuf_mtod(src, char *), src->data_len);
>  }
>  
> +static uint32_t
> +ioat_enqueue_packets(struct rte_mbuf **pkts,
> +	uint32_t nb_rx, uint16_t dev_id)
> +{
> +	int ret;
> +	uint32_t i;
> +	struct rte_mbuf *pkts_copy[MAX_PKT_BURST];
> +
> +	const uint64_t addr_offset = RTE_PTR_DIFF(pkts[0]->buf_addr,
> +		&pkts[0]->rearm_data);
> +
> +	ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
> +		(void *)pkts_copy, nb_rx);
> +
> +	if (unlikely(ret < 0))
> +		rte_exit(EXIT_FAILURE, "Unable to allocate memory.\n");
> +
> +	for (i = 0; i < nb_rx; i++) {
> +		/* Perform data copy */
> +		ret = rte_ioat_enqueue_copy(dev_id,
> +			pkts[i]->buf_iova
> +			- addr_offset,
> +			pkts_copy[i]->buf_iova
> +			- addr_offset,
> +			rte_pktmbuf_data_len(pkts[i])
> +			+ addr_offset,
> +			(uintptr_t)pkts[i],
> +			(uintptr_t)pkts_copy[i],
> +			0 /* nofence */);
> +
> +		if (ret != 1)
> +			break;
> +	}
> +
> +	ret = i;
> +	/* Free any not enqueued packets. */
> +	rte_mempool_put_bulk(ioat_pktmbuf_pool, (void *)&pkts[i], nb_rx - i);
> +	rte_mempool_put_bulk(ioat_pktmbuf_pool, (void *)&pkts_copy[i],
> +		nb_rx - i);
> +
> +
> +	return ret;
> +}
> +
>  /* Receive packets on one port and enqueue to IOAT rawdev or rte_ring. */
>  static void
>  ioat_rx_port(struct rxtx_port_config *rx_config)
> @@ -136,32 +180,40 @@ ioat_rx_port(struct rxtx_port_config *rx_config)
>  		if (nb_rx == 0)
>  			continue;
>  
> -		/* Perform packet software copy, free source packets */
> -		int ret;
> -		struct rte_mbuf *pkts_burst_copy[MAX_PKT_BURST];
> -
> -		ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
> -			(void *)pkts_burst_copy, nb_rx);
> -
> -		if (unlikely(ret < 0))
> -			rte_exit(EXIT_FAILURE,
> -				"Unable to allocate memory.\n");
> -
> -		for (j = 0; j < nb_rx; j++)
> -			pktmbuf_sw_copy(pkts_burst[j],
> -				pkts_burst_copy[j]);
> -
> -		rte_mempool_put_bulk(ioat_pktmbuf_pool,
> -			(void *)pkts_burst, nb_rx);
> -
> -		nb_enq = rte_ring_enqueue_burst(
> -			rx_config->rx_to_tx_ring,
> -			(void *)pkts_burst_copy, nb_rx, NULL);
> -
> -		/* Free any not enqueued packets. */
> -		rte_mempool_put_bulk(ioat_pktmbuf_pool,
> -			(void *)&pkts_burst_copy[nb_enq],
> -			nb_rx - nb_enq);
> +		if (copy_mode == COPY_MODE_IOAT_NUM) {
> +			/* Perform packet hardware copy */
> +			nb_enq = ioat_enqueue_packets(pkts_burst,
> +				nb_rx, rx_config->ioat_ids[i]);
> +			if (nb_enq > 0)
> +				rte_ioat_do_copies(rx_config->ioat_ids[i]);
> +		} else {
> +			/* Perform packet software copy, free source packets */
> +			int ret;
> +			struct rte_mbuf *pkts_burst_copy[MAX_PKT_BURST];
> +
> +			ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
> +				(void *)pkts_burst_copy, nb_rx);
> +
> +			if (unlikely(ret < 0))
> +				rte_exit(EXIT_FAILURE,
> +					"Unable to allocate memory.\n");
> +
> +			for (j = 0; j < nb_rx; j++)
> +				pktmbuf_sw_copy(pkts_burst[j],
> +					pkts_burst_copy[j]);
> +
> +			rte_mempool_put_bulk(ioat_pktmbuf_pool,
> +				(void *)pkts_burst, nb_rx);
> +
> +			nb_enq = rte_ring_enqueue_burst(
> +				rx_config->rx_to_tx_ring,
> +				(void *)pkts_burst_copy, nb_rx, NULL);
> +
> +			/* Free any not enqueued packets. */
> +			rte_mempool_put_bulk(ioat_pktmbuf_pool,
> +				(void *)&pkts_burst_copy[nb_enq],
> +				nb_rx - nb_enq);
> +		}

Would the diff in this patch be smaller if you switched the order of the
branches so that the SW copy leg, which was added first, was processed
first? You could even add in a dummy branch in patch 2, so that the
indentation for that section remains unchanged.

/Bruce
  
Marcin Baran Sept. 27, 2019, 2:03 p.m. UTC | #2
-----Original Message-----
From: Bruce Richardson <bruce.richardson@intel.com> 
Sent: Friday, September 27, 2019 12:06 PM
To: Baran, MarcinX <marcinx.baran@intel.com>
Cc: dev@dpdk.org; Modrak, PawelX <pawelx.modrak@intel.com>
Subject: Re: [dpdk-dev] [PATCH v5 3/6] examples/ioat: add rawdev copy mode support

On Fri, Sep 20, 2019 at 09:37:11AM +0200, Marcin Baran wrote:
> Added support for copying packets using rawdev device. Each port's Rx 
> queue is assigned DMA channel for copy.
> 
> Signed-off-by: Marcin Baran <marcinx.baran@intel.com>
> Signed-off-by: Pawel Modrak <pawelx.modrak@intel.com>
> ---
>  examples/ioat/ioatfwd.c | 236 
> ++++++++++++++++++++++++++++++++--------
>  1 file changed, 189 insertions(+), 47 deletions(-)
> 
> diff --git a/examples/ioat/ioatfwd.c b/examples/ioat/ioatfwd.c index 
> 3a092c6cf..c66ce7e49 100644
> --- a/examples/ioat/ioatfwd.c
> +++ b/examples/ioat/ioatfwd.c
> @@ -121,6 +121,50 @@ pktmbuf_sw_copy(struct rte_mbuf *src, struct rte_mbuf *dst)
>  		rte_pktmbuf_mtod(src, char *), src->data_len);  }
>  
> +static uint32_t
> +ioat_enqueue_packets(struct rte_mbuf **pkts,
> +	uint32_t nb_rx, uint16_t dev_id)
> +{
> +	int ret;
> +	uint32_t i;
> +	struct rte_mbuf *pkts_copy[MAX_PKT_BURST];
> +
> +	const uint64_t addr_offset = RTE_PTR_DIFF(pkts[0]->buf_addr,
> +		&pkts[0]->rearm_data);
> +
> +	ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
> +		(void *)pkts_copy, nb_rx);
> +
> +	if (unlikely(ret < 0))
> +		rte_exit(EXIT_FAILURE, "Unable to allocate memory.\n");
> +
> +	for (i = 0; i < nb_rx; i++) {
> +		/* Perform data copy */
> +		ret = rte_ioat_enqueue_copy(dev_id,
> +			pkts[i]->buf_iova
> +			- addr_offset,
> +			pkts_copy[i]->buf_iova
> +			- addr_offset,
> +			rte_pktmbuf_data_len(pkts[i])
> +			+ addr_offset,
> +			(uintptr_t)pkts[i],
> +			(uintptr_t)pkts_copy[i],
> +			0 /* nofence */);
> +
> +		if (ret != 1)
> +			break;
> +	}
> +
> +	ret = i;
> +	/* Free any not enqueued packets. */
> +	rte_mempool_put_bulk(ioat_pktmbuf_pool, (void *)&pkts[i], nb_rx - i);
> +	rte_mempool_put_bulk(ioat_pktmbuf_pool, (void *)&pkts_copy[i],
> +		nb_rx - i);
> +
> +
> +	return ret;
> +}
> +
>  /* Receive packets on one port and enqueue to IOAT rawdev or 
> rte_ring. */  static void  ioat_rx_port(struct rxtx_port_config 
> *rx_config) @@ -136,32 +180,40 @@ ioat_rx_port(struct rxtx_port_config 
> *rx_config)
>  		if (nb_rx == 0)
>  			continue;
>  
> -		/* Perform packet software copy, free source packets */
> -		int ret;
> -		struct rte_mbuf *pkts_burst_copy[MAX_PKT_BURST];
> -
> -		ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
> -			(void *)pkts_burst_copy, nb_rx);
> -
> -		if (unlikely(ret < 0))
> -			rte_exit(EXIT_FAILURE,
> -				"Unable to allocate memory.\n");
> -
> -		for (j = 0; j < nb_rx; j++)
> -			pktmbuf_sw_copy(pkts_burst[j],
> -				pkts_burst_copy[j]);
> -
> -		rte_mempool_put_bulk(ioat_pktmbuf_pool,
> -			(void *)pkts_burst, nb_rx);
> -
> -		nb_enq = rte_ring_enqueue_burst(
> -			rx_config->rx_to_tx_ring,
> -			(void *)pkts_burst_copy, nb_rx, NULL);
> -
> -		/* Free any not enqueued packets. */
> -		rte_mempool_put_bulk(ioat_pktmbuf_pool,
> -			(void *)&pkts_burst_copy[nb_enq],
> -			nb_rx - nb_enq);
> +		if (copy_mode == COPY_MODE_IOAT_NUM) {
> +			/* Perform packet hardware copy */
> +			nb_enq = ioat_enqueue_packets(pkts_burst,
> +				nb_rx, rx_config->ioat_ids[i]);
> +			if (nb_enq > 0)
> +				rte_ioat_do_copies(rx_config->ioat_ids[i]);
> +		} else {
> +			/* Perform packet software copy, free source packets */
> +			int ret;
> +			struct rte_mbuf *pkts_burst_copy[MAX_PKT_BURST];
> +
> +			ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
> +				(void *)pkts_burst_copy, nb_rx);
> +
> +			if (unlikely(ret < 0))
> +				rte_exit(EXIT_FAILURE,
> +					"Unable to allocate memory.\n");
> +
> +			for (j = 0; j < nb_rx; j++)
> +				pktmbuf_sw_copy(pkts_burst[j],
> +					pkts_burst_copy[j]);
> +
> +			rte_mempool_put_bulk(ioat_pktmbuf_pool,
> +				(void *)pkts_burst, nb_rx);
> +
> +			nb_enq = rte_ring_enqueue_burst(
> +				rx_config->rx_to_tx_ring,
> +				(void *)pkts_burst_copy, nb_rx, NULL);
> +
> +			/* Free any not enqueued packets. */
> +			rte_mempool_put_bulk(ioat_pktmbuf_pool,
> +				(void *)&pkts_burst_copy[nb_enq],
> +				nb_rx - nb_enq);
> +		}

Would the diff in this patch be smaller if you switched the order of the branches so that the SW copy leg, which was added first, was processed first? You could even add in a dummy branch in patch 2, so that the indentation for that section remains unchanged.

/Bruce
[Marcin] Switched the order and added dummy branch in patch 2. Also changed ioat_tx_port() function the same way in v6.
  

Patch

diff --git a/examples/ioat/ioatfwd.c b/examples/ioat/ioatfwd.c
index 3a092c6cf..c66ce7e49 100644
--- a/examples/ioat/ioatfwd.c
+++ b/examples/ioat/ioatfwd.c
@@ -121,6 +121,50 @@  pktmbuf_sw_copy(struct rte_mbuf *src, struct rte_mbuf *dst)
 		rte_pktmbuf_mtod(src, char *), src->data_len);
 }
 
+static uint32_t
+ioat_enqueue_packets(struct rte_mbuf **pkts,
+	uint32_t nb_rx, uint16_t dev_id)
+{
+	int ret;
+	uint32_t i;
+	struct rte_mbuf *pkts_copy[MAX_PKT_BURST];
+
+	const uint64_t addr_offset = RTE_PTR_DIFF(pkts[0]->buf_addr,
+		&pkts[0]->rearm_data);
+
+	ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
+		(void *)pkts_copy, nb_rx);
+
+	if (unlikely(ret < 0))
+		rte_exit(EXIT_FAILURE, "Unable to allocate memory.\n");
+
+	for (i = 0; i < nb_rx; i++) {
+		/* Perform data copy */
+		ret = rte_ioat_enqueue_copy(dev_id,
+			pkts[i]->buf_iova
+			- addr_offset,
+			pkts_copy[i]->buf_iova
+			- addr_offset,
+			rte_pktmbuf_data_len(pkts[i])
+			+ addr_offset,
+			(uintptr_t)pkts[i],
+			(uintptr_t)pkts_copy[i],
+			0 /* nofence */);
+
+		if (ret != 1)
+			break;
+	}
+
+	ret = i;
+	/* Free any not enqueued packets. */
+	rte_mempool_put_bulk(ioat_pktmbuf_pool, (void *)&pkts[i], nb_rx - i);
+	rte_mempool_put_bulk(ioat_pktmbuf_pool, (void *)&pkts_copy[i],
+		nb_rx - i);
+
+
+	return ret;
+}
+
 /* Receive packets on one port and enqueue to IOAT rawdev or rte_ring. */
 static void
 ioat_rx_port(struct rxtx_port_config *rx_config)
@@ -136,32 +180,40 @@  ioat_rx_port(struct rxtx_port_config *rx_config)
 		if (nb_rx == 0)
 			continue;
 
-		/* Perform packet software copy, free source packets */
-		int ret;
-		struct rte_mbuf *pkts_burst_copy[MAX_PKT_BURST];
-
-		ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
-			(void *)pkts_burst_copy, nb_rx);
-
-		if (unlikely(ret < 0))
-			rte_exit(EXIT_FAILURE,
-				"Unable to allocate memory.\n");
-
-		for (j = 0; j < nb_rx; j++)
-			pktmbuf_sw_copy(pkts_burst[j],
-				pkts_burst_copy[j]);
-
-		rte_mempool_put_bulk(ioat_pktmbuf_pool,
-			(void *)pkts_burst, nb_rx);
-
-		nb_enq = rte_ring_enqueue_burst(
-			rx_config->rx_to_tx_ring,
-			(void *)pkts_burst_copy, nb_rx, NULL);
-
-		/* Free any not enqueued packets. */
-		rte_mempool_put_bulk(ioat_pktmbuf_pool,
-			(void *)&pkts_burst_copy[nb_enq],
-			nb_rx - nb_enq);
+		if (copy_mode == COPY_MODE_IOAT_NUM) {
+			/* Perform packet hardware copy */
+			nb_enq = ioat_enqueue_packets(pkts_burst,
+				nb_rx, rx_config->ioat_ids[i]);
+			if (nb_enq > 0)
+				rte_ioat_do_copies(rx_config->ioat_ids[i]);
+		} else {
+			/* Perform packet software copy, free source packets */
+			int ret;
+			struct rte_mbuf *pkts_burst_copy[MAX_PKT_BURST];
+
+			ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
+				(void *)pkts_burst_copy, nb_rx);
+
+			if (unlikely(ret < 0))
+				rte_exit(EXIT_FAILURE,
+					"Unable to allocate memory.\n");
+
+			for (j = 0; j < nb_rx; j++)
+				pktmbuf_sw_copy(pkts_burst[j],
+					pkts_burst_copy[j]);
+
+			rte_mempool_put_bulk(ioat_pktmbuf_pool,
+				(void *)pkts_burst, nb_rx);
+
+			nb_enq = rte_ring_enqueue_burst(
+				rx_config->rx_to_tx_ring,
+				(void *)pkts_burst_copy, nb_rx, NULL);
+
+			/* Free any not enqueued packets. */
+			rte_mempool_put_bulk(ioat_pktmbuf_pool,
+				(void *)&pkts_burst_copy[nb_enq],
+				nb_rx - nb_enq);
+		}
 	}
 }
 
@@ -169,31 +221,64 @@  ioat_rx_port(struct rxtx_port_config *rx_config)
 static void
 ioat_tx_port(struct rxtx_port_config *tx_config)
 {
-	uint32_t i, nb_dq = 0;
+	uint32_t i, j, nb_dq = 0;
+	struct rte_mbuf *mbufs_src[MAX_PKT_BURST];
 	struct rte_mbuf *mbufs_dst[MAX_PKT_BURST];
 
-	/* Deque the mbufs from rx_to_tx_ring. */
-	nb_dq = rte_ring_dequeue_burst(tx_config->rx_to_tx_ring,
-		(void *)mbufs_dst, MAX_PKT_BURST, NULL);
+	if (copy_mode == COPY_MODE_IOAT_NUM) {
+		/* Deque the mbufs from IOAT device. */
+		for (i = 0; i < tx_config->nb_queues; i++) {
+			nb_dq = rte_ioat_completed_copies(
+				tx_config->ioat_ids[i], MAX_PKT_BURST,
+				(void *)mbufs_src, (void *)mbufs_dst);
 
-	if (nb_dq == 0)
-		return;
+			if (nb_dq == 0)
+				break;
 
-	/* Update macs if enabled */
-	if (mac_updating) {
-		for (i = 0; i < nb_dq; i++)
-			update_mac_addrs(mbufs_dst[i],
-				tx_config->rxtx_port);
-	}
+			rte_mempool_put_bulk(ioat_pktmbuf_pool,
+				(void *)mbufs_src, nb_dq);
 
-	const uint16_t nb_tx = rte_eth_tx_burst(tx_config->rxtx_port,
-		0, (void *)mbufs_dst, nb_dq);
+			/* Update macs if enabled */
+			if (mac_updating) {
+				for (j = 0; j < nb_dq; j++)
+					update_mac_addrs(mbufs_dst[j],
+						tx_config->rxtx_port);
+			}
+
+			const uint16_t nb_tx = rte_eth_tx_burst(
+				tx_config->rxtx_port, 0,
+				(void *)mbufs_dst, nb_dq);
 
-	/* Free any unsent packets. */
-	if (unlikely(nb_tx < nb_dq))
-		rte_mempool_put_bulk(ioat_pktmbuf_pool,
-		(void *)&mbufs_dst[nb_tx],
-			nb_dq - nb_tx);
+			/* Free any unsent packets. */
+			if (unlikely(nb_tx < nb_dq))
+				rte_mempool_put_bulk(ioat_pktmbuf_pool,
+				(void *)&mbufs_dst[nb_tx],
+					nb_dq - nb_tx);
+		}
+	} else {
+		/* Deque the mbufs from rx_to_tx_ring. */
+		nb_dq = rte_ring_dequeue_burst(tx_config->rx_to_tx_ring,
+			(void *)mbufs_dst, MAX_PKT_BURST, NULL);
+
+		if (nb_dq == 0)
+			return;
+
+		/* Update macs if enabled */
+		if (mac_updating) {
+			for (i = 0; i < nb_dq; i++)
+				update_mac_addrs(mbufs_dst[i],
+					tx_config->rxtx_port);
+		}
+
+		const uint16_t nb_tx = rte_eth_tx_burst(tx_config->rxtx_port,
+			0, (void *)mbufs_dst, nb_dq);
+
+		/* Free any unsent packets. */
+		if (unlikely(nb_tx < nb_dq))
+			rte_mempool_put_bulk(ioat_pktmbuf_pool,
+			(void *)&mbufs_dst[nb_tx],
+				nb_dq - nb_tx);
+	}
 }
 
 /* Main rx and tx loop if only one slave lcore available */
@@ -389,6 +474,52 @@  check_link_status(uint32_t port_mask)
 	return retval;
 }
 
+static void
+configure_rawdev_queue(uint32_t dev_id)
+{
+	struct rte_ioat_rawdev_config dev_config = { .ring_size = ring_size };
+	struct rte_rawdev_info info = { .dev_private = &dev_config };
+
+	if (rte_rawdev_configure(dev_id, &info) != 0) {
+		rte_exit(EXIT_FAILURE,
+			"Error with rte_rawdev_configure()\n");
+	}
+	if (rte_rawdev_start(dev_id) != 0) {
+		rte_exit(EXIT_FAILURE,
+			"Error with rte_rawdev_start()\n");
+	}
+}
+
+static void
+assign_rawdevs(void)
+{
+	uint16_t nb_rawdev = 0, rdev_id = 0;
+	uint32_t i, j;
+
+	for (i = 0; i < cfg.nb_ports; i++) {
+		for (j = 0; j < cfg.ports[i].nb_queues; j++) {
+			struct rte_rawdev_info rdev_info = { 0 };
+
+			do {
+				if (rdev_id == rte_rawdev_count())
+					goto end;
+				rte_rawdev_info_get(rdev_id++, &rdev_info);
+			} while (strcmp(rdev_info.driver_name,
+				IOAT_PMD_RAWDEV_NAME_STR) != 0);
+
+			cfg.ports[i].ioat_ids[j] = rdev_id - 1;
+			configure_rawdev_queue(cfg.ports[i].ioat_ids[j]);
+			++nb_rawdev;
+		}
+	}
+end:
+	if (nb_rawdev < cfg.nb_ports * cfg.ports[0].nb_queues)
+		rte_exit(EXIT_FAILURE,
+			"Not enough IOAT rawdevs (%u) for all queues (%u).\n",
+			nb_rawdev, cfg.nb_ports * cfg.ports[0].nb_queues);
+	RTE_LOG(INFO, IOAT, "Number of used rawdevs: %u.\n", nb_rawdev);
+}
+
 static void
 assign_rings(void)
 {
@@ -585,18 +716,29 @@  main(int argc, char **argv)
 		rte_exit(EXIT_FAILURE,
 			"There should be at least one slave lcore.\n");
 
-	assign_rings();
+	if (copy_mode == COPY_MODE_IOAT_NUM)
+		assign_rawdevs();
+	else /* copy_mode == COPY_MODE_SW_NUM */
+		assign_rings();
 
 	start_forwarding_cores();
 
 	/* force_quit is true when we get here */
 	rte_eal_mp_wait_lcore();
 
+	uint32_t j;
 	for (i = 0; i < cfg.nb_ports; i++) {
 		printf("Closing port %d\n", cfg.ports[i].rxtx_port);
 		rte_eth_dev_stop(cfg.ports[i].rxtx_port);
 		rte_eth_dev_close(cfg.ports[i].rxtx_port);
-		rte_ring_free(cfg.ports[i].rx_to_tx_ring);
+		if (copy_mode == COPY_MODE_IOAT_NUM) {
+			for (j = 0; j < cfg.ports[i].nb_queues; j++) {
+				printf("Stopping rawdev %d\n",
+					cfg.ports[i].ioat_ids[j]);
+				rte_rawdev_stop(cfg.ports[i].ioat_ids[j]);
+			}
+		} else /* copy_mode == COPY_MODE_SW_NUM */
+			rte_ring_free(cfg.ports[i].rx_to_tx_ring);
 	}
 
 	printf("Bye...\n");