From patchwork Thu Dec 9 17:19:47 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Loftus, Ciara" X-Patchwork-Id: 105051 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3A0FDA0093; Thu, 9 Dec 2021 18:23:00 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id C43214014F; Thu, 9 Dec 2021 18:22:59 +0100 (CET) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id C4A1F40041; Thu, 9 Dec 2021 18:22:57 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=simple/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1639070578; x=1670606578; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=OP5jPj0HfScaOXXj+1KywZUIa82ZWyOrYspTJb/eDrY=; b=bBdYX5CEMGjNYyOhENywB3loRwPdYtYm+5CtitZUuSTGtGTYfVVrXpiA 3EpAPNZOFEmtTYEAe5gVI7sN1OxtfjC7QYY+xvTwIA8o3k1hsPqkKZmyO qCCdEyp0Q7itbg2ASMRgSmBKbiuoK3DOhic2ysrzkHSVSngz2Uca/7DzQ e3MjLorgqYNAasK4vhRjoG4Eq3aufnXOAaH7QpGqUMMFy1H2LKTY+em+T kD0PHTLg+YkRePkG+wR5r0upbY+pWjxZ+garfyP6Fp+QAubG0AlAjS+IT EeozBr8LbGzQFhDtAH+W3Q0ipQv2bxKDnOV3bNztTNEapWJz9Uj7sul3U g==; X-IronPort-AV: E=McAfee;i="6200,9189,10193"; a="225026198" X-IronPort-AV: E=Sophos;i="5.88,193,1635231600"; d="scan'208";a="225026198" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 09 Dec 2021 09:20:20 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.88,193,1635231600"; d="scan'208";a="564897391" Received: from unknown (HELO localhost.localdomain) ([10.55.129.110]) by fmsmga008.fm.intel.com with ESMTP; 09 Dec 2021 09:20:18 -0800 From: Ciara Loftus To: dev@dpdk.org Cc: Ciara Loftus , stable@dpdk.org Subject: [PATCH] net/af_xdp: fix build with -Wunused-function Date: Thu, 9 Dec 2021 17:19:47 +0000 Message-Id: <20211209171947.25340-1-ciara.loftus@intel.com> X-Mailer: git-send-email 2.17.1 MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org The get_shared_umem function is only called when the kernel flag XDP_UMEM_UNALIGNED_CHUNK_FLAG is defined. Move the function implementation and associated helper so that it only gets compiled when that flag is set. Fixes: 74b46340e2d4 ("net/af_xdp: support shared UMEM") Cc: stable@dpdk.org Signed-off-by: Ciara Loftus Acked-by: Ferruh Yigit --- drivers/net/af_xdp/rte_eth_af_xdp.c | 121 ++++++++++++++-------------- 1 file changed, 60 insertions(+), 61 deletions(-) diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c index 96c2c9d939..b3ed704b36 100644 --- a/drivers/net/af_xdp/rte_eth_af_xdp.c +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c @@ -697,67 +697,6 @@ find_internal_resource(struct pmd_internals *port_int) return list; } -/* Check if the netdev,qid context already exists */ -static inline bool -ctx_exists(struct pkt_rx_queue *rxq, const char *ifname, - struct pkt_rx_queue *list_rxq, const char *list_ifname) -{ - bool exists = false; - - if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx && - !strncmp(ifname, list_ifname, IFNAMSIZ)) { - AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n", - ifname, rxq->xsk_queue_idx); - exists = true; - } - - return exists; -} - -/* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */ -static inline int -get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname, - struct xsk_umem_info **umem) -{ - struct internal_list *list; - struct pmd_internals *internals; - int i = 0, ret = 0; - struct rte_mempool *mb_pool = rxq->mb_pool; - - if (mb_pool == NULL) - return ret; - - pthread_mutex_lock(&internal_list_lock); - - TAILQ_FOREACH(list, &internal_list, next) { - internals = list->eth_dev->data->dev_private; - for (i = 0; i < internals->queue_cnt; i++) { - struct pkt_rx_queue *list_rxq = - &internals->rx_queues[i]; - if (rxq == list_rxq) - continue; - if (mb_pool == internals->rx_queues[i].mb_pool) { - if (ctx_exists(rxq, ifname, list_rxq, - internals->if_name)) { - ret = -1; - goto out; - } - if (__atomic_load_n( - &internals->rx_queues[i].umem->refcnt, - __ATOMIC_ACQUIRE)) { - *umem = internals->rx_queues[i].umem; - goto out; - } - } - } - } - -out: - pthread_mutex_unlock(&internal_list_lock); - - return ret; -} - static int eth_dev_configure(struct rte_eth_dev *dev) { @@ -1013,6 +952,66 @@ static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align) return aligned_addr; } +/* Check if the netdev,qid context already exists */ +static inline bool +ctx_exists(struct pkt_rx_queue *rxq, const char *ifname, + struct pkt_rx_queue *list_rxq, const char *list_ifname) +{ + bool exists = false; + + if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx && + !strncmp(ifname, list_ifname, IFNAMSIZ)) { + AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n", + ifname, rxq->xsk_queue_idx); + exists = true; + } + + return exists; +} + +/* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */ +static inline int +get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname, + struct xsk_umem_info **umem) +{ + struct internal_list *list; + struct pmd_internals *internals; + int i = 0, ret = 0; + struct rte_mempool *mb_pool = rxq->mb_pool; + + if (mb_pool == NULL) + return ret; + + pthread_mutex_lock(&internal_list_lock); + + TAILQ_FOREACH(list, &internal_list, next) { + internals = list->eth_dev->data->dev_private; + for (i = 0; i < internals->queue_cnt; i++) { + struct pkt_rx_queue *list_rxq = + &internals->rx_queues[i]; + if (rxq == list_rxq) + continue; + if (mb_pool == internals->rx_queues[i].mb_pool) { + if (ctx_exists(rxq, ifname, list_rxq, + internals->if_name)) { + ret = -1; + goto out; + } + if (__atomic_load_n(&internals->rx_queues[i].umem->refcnt, + __ATOMIC_ACQUIRE)) { + *umem = internals->rx_queues[i].umem; + goto out; + } + } + } + } + +out: + pthread_mutex_unlock(&internal_list_lock); + + return ret; +} + static struct xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq)