[2/2] net/af_xdp: reserve fill queue before socket create
Checks
Commit Message
Some zero copy AF_XDP drivers eg. ice require that there are addresses
already in the fill queue before the socket is created. Otherwise you may
see log messages such as:
XSK buffer pool does not provide enough addresses to fill 2047 buffers on
Rx ring 0
This commit ensures that the addresses are available before creating the
socket, instead of after.
Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
---
drivers/net/af_xdp/rte_eth_af_xdp.c | 28 ++++++++++++++--------------
1 file changed, 14 insertions(+), 14 deletions(-)
Comments
On 2/18/2022 11:20 AM, Ciara Loftus wrote:
> Some zero copy AF_XDP drivers eg. ice require that there are addresses
> already in the fill queue before the socket is created. Otherwise you may
> see log messages such as:
>
> XSK buffer pool does not provide enough addresses to fill 2047 buffers on
> Rx ring 0
>
I confirm the above log is gone with the patch,
for the record I am seeing below instead now:
[ +0.346578] ice 0000:86:00.1: Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring 0
[ +0.032472] device enp134s0f1 left promiscuous mode
> This commit ensures that the addresses are available before creating the
> socket, instead of after.
>
> Signed-off-by: Ciara Loftus<ciara.loftus@intel.com>
Tested-by: Ferruh Yigit <ferruh.yigit@intel.com>
@@ -1284,6 +1284,20 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
return -ENOMEM;
txq->umem = rxq->umem;
+#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
+ ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size);
+ if (ret) {
+ AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
+ goto out_umem;
+ }
+#endif
+
+ ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq);
+ if (ret) {
+ AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
+ goto out_umem;
+ }
+
cfg.rx_size = ring_size;
cfg.tx_size = ring_size;
cfg.libbpf_flags = 0;
@@ -1335,14 +1349,6 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
}
}
-#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
- ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size);
- if (ret) {
- AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
- goto out_xsk;
- }
-#endif
-
if (rxq->busy_budget) {
ret = configure_preferred_busy_poll(rxq);
if (ret) {
@@ -1351,12 +1357,6 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
}
}
- ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq);
- if (ret) {
- AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
- goto out_xsk;
- }
-
return 0;
out_xsk: