diff mbox series

[v2] net/af_xdp: don't allow umem sharing for xsks with same ctx

Message ID 20201013131008.4070-1-ciara.loftus@intel.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers show
Series [v2] net/af_xdp: don't allow umem sharing for xsks with same ctx | expand

Checks

Context Check Description
ci/Intel-compilation success Compilation OK
ci/iol-mellanox-Performance success Performance Testing PASS
ci/travis-robot success Travis build: passed
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-testing success Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/checkpatch success coding style OK

Commit Message

Ciara Loftus Oct. 13, 2020, 1:10 p.m. UTC
AF_XDP PMDs who wish to share a UMEM must have a unique context
(ctx) ie. netdev,qid tuple. For instance, the following will not
work since both PMDs' contexts are identical.

  --vdev net_af_xdp0,iface=ens786f1,start_queue=0,shared_umem=1
  --vdev net_af_xdp1,iface=ens786f1,start_queue=0,shared_umem=1

Supporting this scenario would require locks, which would impact
the performance of the more typical cases - xsks with different
netdev,qid tuples.

Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
Fixes: 74b46340e2d4 ("net/af_xdp: support shared UMEM")
---
v2:
* Add doc update
* Fix commit message style issues
* Update commit message with more information

 doc/guides/nics/af_xdp.rst          | 25 ++++++++++++++++
 drivers/net/af_xdp/rte_eth_af_xdp.c | 44 +++++++++++++++++++++++------
 2 files changed, 60 insertions(+), 9 deletions(-)

Comments

Ferruh Yigit Oct. 13, 2020, 3:57 p.m. UTC | #1
On 10/13/2020 2:10 PM, Ciara Loftus wrote:
> AF_XDP PMDs who wish to share a UMEM must have a unique context
> (ctx) ie. netdev,qid tuple. For instance, the following will not
> work since both PMDs' contexts are identical.
> 
>    --vdev net_af_xdp0,iface=ens786f1,start_queue=0,shared_umem=1
>    --vdev net_af_xdp1,iface=ens786f1,start_queue=0,shared_umem=1
> 
> Supporting this scenario would require locks, which would impact
> the performance of the more typical cases - xsks with different
> netdev,qid tuples.
> 
 > Fixes: 74b46340e2d4 ("net/af_xdp: support shared UMEM")
 >
> Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
 >

Applied to dpdk-next-net/main, thanks.
diff mbox series

Patch

diff --git a/doc/guides/nics/af_xdp.rst b/doc/guides/nics/af_xdp.rst
index be268fe7ff..052e59a3ae 100644
--- a/doc/guides/nics/af_xdp.rst
+++ b/doc/guides/nics/af_xdp.rst
@@ -82,3 +82,28 @@  Limitations
 
   Note: The AF_XDP PMD will fail to initialise if an MTU which violates the driver's
   conditions as above is set prior to launching the application.
+
+- **Shared UMEM**
+
+  The sharing of UMEM is only supported for AF_XDP sockets with unique contexts.
+  The context refers to the netdev,qid tuple.
+
+  The following combination will fail:
+
+  .. code-block:: console
+
+    --vdev net_af_xdp0,iface=ens786f1,shared_umem=1 \
+    --vdev net_af_xdp1,iface=ens786f1,shared_umem=1 \
+
+  Either of the following however is permitted since either the netdev or qid differs
+  between the two vdevs:
+
+  .. code-block:: console
+
+    --vdev net_af_xdp0,iface=ens786f1,shared_umem=1 \
+    --vdev net_af_xdp1,iface=ens786f1,start_queue=1,shared_umem=1 \
+
+  .. code-block:: console
+
+    --vdev net_af_xdp0,iface=ens786f1,shared_umem=1 \
+    --vdev net_af_xdp1,iface=ens786f2,shared_umem=1 \
\ No newline at end of file
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index eaf2c9c873..9e0e5c254a 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -634,16 +634,35 @@  find_internal_resource(struct pmd_internals *port_int)
 	return list;
 }
 
+/* Check if the netdev,qid context already exists */
+static inline bool
+ctx_exists(struct pkt_rx_queue *rxq, const char *ifname,
+		struct pkt_rx_queue *list_rxq, const char *list_ifname)
+{
+	bool exists = false;
+
+	if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx &&
+			!strncmp(ifname, list_ifname, IFNAMSIZ)) {
+		AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n",
+					ifname, rxq->xsk_queue_idx);
+		exists = true;
+	}
+
+	return exists;
+}
+
 /* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */
-static inline struct xsk_umem_info *
-get_shared_umem(struct pkt_rx_queue *rxq) {
+static inline int
+get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
+			struct xsk_umem_info **umem)
+{
 	struct internal_list *list;
 	struct pmd_internals *internals;
-	int i = 0;
+	int i = 0, ret = 0;
 	struct rte_mempool *mb_pool = rxq->mb_pool;
 
 	if (mb_pool == NULL)
-		return NULL;
+		return ret;
 
 	pthread_mutex_lock(&internal_list_lock);
 
@@ -655,20 +674,25 @@  get_shared_umem(struct pkt_rx_queue *rxq) {
 			if (rxq == list_rxq)
 				continue;
 			if (mb_pool == internals->rx_queues[i].mb_pool) {
+				if (ctx_exists(rxq, ifname, list_rxq,
+						internals->if_name)) {
+					ret = -1;
+					goto out;
+				}
 				if (__atomic_load_n(
 					&internals->rx_queues[i].umem->refcnt,
 							__ATOMIC_ACQUIRE)) {
-					pthread_mutex_unlock(
-							&internal_list_lock);
-					return internals->rx_queues[i].umem;
+					*umem = internals->rx_queues[i].umem;
+					goto out;
 				}
 			}
 		}
 	}
 
+out:
 	pthread_mutex_unlock(&internal_list_lock);
 
-	return NULL;
+	return ret;
 }
 
 static int
@@ -913,7 +937,9 @@  xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
 	uint64_t umem_size, align = 0;
 
 	if (internals->shared_umem) {
-		umem = get_shared_umem(rxq);
+		if (get_shared_umem(rxq, internals->if_name, &umem) < 0)
+			return NULL;
+
 		if (umem != NULL &&
 			__atomic_load_n(&umem->refcnt, __ATOMIC_ACQUIRE) <
 					umem->max_xsks) {