[v3,2/3] net/af_xdp: use correct fill queue addresses

Message ID 20200210114009.49590-3-ciara.loftus@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series AF_XDP PMD Fixes |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail apply issues

Commit Message

Loftus, Ciara Feb. 10, 2020, 11:40 a.m. UTC
  The fill queue addresses should start at the beginning of the mempool
object instead of the beginning of the mbuf. This is because the umem
frame headroom includes the mp hdrobj size. Starting at this point
ensures AF_XDP doesn't write past the available room in the frame, in
the case of larger packets which are close to the size of the mbuf.

Fixes: d8a210774e1d ("net/af_xdp: support unaligned umem chunks")
Cc: stable@dpdk.org

Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
---
 drivers/net/af_xdp/rte_eth_af_xdp.c | 25 +++++++++++++++++--------
 1 file changed, 17 insertions(+), 8 deletions(-)
  

Comments

Xiaolong Ye Feb. 13, 2020, 3:09 a.m. UTC | #1
On 02/10, Ciara Loftus wrote:
>The fill queue addresses should start at the beginning of the mempool
>object instead of the beginning of the mbuf. This is because the umem
>frame headroom includes the mp hdrobj size. Starting at this point
>ensures AF_XDP doesn't write past the available room in the frame, in
>the case of larger packets which are close to the size of the mbuf.
>
>Fixes: d8a210774e1d ("net/af_xdp: support unaligned umem chunks")
>Cc: stable@dpdk.org
>
>Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
>---
> drivers/net/af_xdp/rte_eth_af_xdp.c | 25 +++++++++++++++++--------
> 1 file changed, 17 insertions(+), 8 deletions(-)
>
>diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
>index 8b189119c..1e98cd44f 100644
>--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
>+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
>@@ -172,7 +172,8 @@ reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
> 		uint64_t addr;
> 
> 		fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
>-		addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer;
>+		addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer -
>+				umem->mb_pool->header_size;
> 		*fq_addr = addr;
> 	}
> 
>@@ -271,8 +272,11 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
> 		addr = xsk_umem__extract_addr(addr);
> 
> 		bufs[i] = (struct rte_mbuf *)
>-				xsk_umem__get_data(umem->buffer, addr);
>-		bufs[i]->data_off = offset - sizeof(struct rte_mbuf);
>+				xsk_umem__get_data(umem->buffer, addr +
>+					umem->mb_pool->header_size);
>+		bufs[i]->data_off = offset - sizeof(struct rte_mbuf) -
>+			rte_pktmbuf_priv_size(umem->mb_pool) -
>+			umem->mb_pool->header_size;
> 
> 		rte_pktmbuf_pkt_len(bufs[i]) = len;
> 		rte_pktmbuf_data_len(bufs[i]) = len;
>@@ -385,7 +389,8 @@ pull_umem_cq(struct xsk_umem_info *umem, int size)
> #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
> 		addr = xsk_umem__extract_addr(addr);
> 		rte_pktmbuf_free((struct rte_mbuf *)
>-					xsk_umem__get_data(umem->buffer, addr));
>+					xsk_umem__get_data(umem->buffer,
>+					addr + umem->mb_pool->header_size));
> #else
> 		rte_ring_enqueue(umem->buf_ring, (void *)addr);
> #endif
>@@ -443,9 +448,11 @@ af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
> 			}
> 			desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
> 			desc->len = mbuf->pkt_len;
>-			addr = (uint64_t)mbuf - (uint64_t)umem->buffer;
>+			addr = (uint64_t)mbuf - (uint64_t)umem->buffer -
>+					umem->mb_pool->header_size;
> 			offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
>-					(uint64_t)mbuf;
>+					(uint64_t)mbuf +
>+					umem->mb_pool->header_size;
> 			offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
> 			desc->addr = addr | offset;
> 			count++;
>@@ -466,9 +473,11 @@ af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
> 			desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
> 			desc->len = mbuf->pkt_len;
> 
>-			addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer;
>+			addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer -
>+					umem->mb_pool->header_size;
> 			offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) -
>-					(uint64_t)local_mbuf;
>+					(uint64_t)local_mbuf +
>+					umem->mb_pool->header_size;
> 			pkt = xsk_umem__get_data(umem->buffer, addr + offset);
> 			offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
> 			desc->addr = addr | offset;
>-- 
>2.17.1
>

Reviewed-by: Xiaolong Ye <xiaolong.ye@intel.com>
  

Patch

diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index 8b189119c..1e98cd44f 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -172,7 +172,8 @@  reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
 		uint64_t addr;
 
 		fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
-		addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer;
+		addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer -
+				umem->mb_pool->header_size;
 		*fq_addr = addr;
 	}
 
@@ -271,8 +272,11 @@  af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		addr = xsk_umem__extract_addr(addr);
 
 		bufs[i] = (struct rte_mbuf *)
-				xsk_umem__get_data(umem->buffer, addr);
-		bufs[i]->data_off = offset - sizeof(struct rte_mbuf);
+				xsk_umem__get_data(umem->buffer, addr +
+					umem->mb_pool->header_size);
+		bufs[i]->data_off = offset - sizeof(struct rte_mbuf) -
+			rte_pktmbuf_priv_size(umem->mb_pool) -
+			umem->mb_pool->header_size;
 
 		rte_pktmbuf_pkt_len(bufs[i]) = len;
 		rte_pktmbuf_data_len(bufs[i]) = len;
@@ -385,7 +389,8 @@  pull_umem_cq(struct xsk_umem_info *umem, int size)
 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
 		addr = xsk_umem__extract_addr(addr);
 		rte_pktmbuf_free((struct rte_mbuf *)
-					xsk_umem__get_data(umem->buffer, addr));
+					xsk_umem__get_data(umem->buffer,
+					addr + umem->mb_pool->header_size));
 #else
 		rte_ring_enqueue(umem->buf_ring, (void *)addr);
 #endif
@@ -443,9 +448,11 @@  af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 			}
 			desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
 			desc->len = mbuf->pkt_len;
-			addr = (uint64_t)mbuf - (uint64_t)umem->buffer;
+			addr = (uint64_t)mbuf - (uint64_t)umem->buffer -
+					umem->mb_pool->header_size;
 			offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
-					(uint64_t)mbuf;
+					(uint64_t)mbuf +
+					umem->mb_pool->header_size;
 			offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
 			desc->addr = addr | offset;
 			count++;
@@ -466,9 +473,11 @@  af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 			desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
 			desc->len = mbuf->pkt_len;
 
-			addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer;
+			addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer -
+					umem->mb_pool->header_size;
 			offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) -
-					(uint64_t)local_mbuf;
+					(uint64_t)local_mbuf +
+					umem->mb_pool->header_size;
 			pkt = xsk_umem__get_data(umem->buffer, addr + offset);
 			offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
 			desc->addr = addr | offset;