From patchwork Wed Oct 2 23:25:57 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ajit Khaparde X-Patchwork-Id: 60479 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 89A271BFFE; Thu, 3 Oct 2019 01:26:51 +0200 (CEST) Received: from rnd-relay.smtp.broadcom.com (unknown [192.19.229.170]) by dpdk.org (Postfix) with ESMTP id 8409C1BE8E for ; Thu, 3 Oct 2019 01:26:11 +0200 (CEST) Received: from mail-irv-17.broadcom.com (mail-irv-17.lvn.broadcom.net [10.75.242.48]) by rnd-relay.smtp.broadcom.com (Postfix) with ESMTP id 1F8DB30C258; Wed, 2 Oct 2019 16:24:58 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.10.3 rnd-relay.smtp.broadcom.com 1F8DB30C258 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=broadcom.com; s=dkimrelay; t=1570058698; bh=VcoLxkm6fj8GRQTCTFfdCZp0ca+h5VTA4ZV4ANlwW44=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=gxWKhSUNuI2mSQR4V6fKlBO4pTGjUlutIOwBvw4QUnLMvGl1V0eWC2zQc+TMLNBuX mC3KABML87PpYNvMOM5Ia2/Zm9Mq8iOha1R81YNsMSQtcxNsGzGcEPCdH0dqY8/yJ9 gKqxF1ltp2efNgA9y0KcySGiseU5HrPseAwIZUgc= Received: from localhost.localdomain (unknown [10.230.30.225]) by mail-irv-17.broadcom.com (Postfix) with ESMTP id 2928914008B; Wed, 2 Oct 2019 16:26:10 -0700 (PDT) From: Ajit Khaparde To: dev@dpdk.org Cc: ferruh.yigit@intel.com, Venkat Duvvuru , Kalesh Anakkur Purayil Date: Wed, 2 Oct 2019 16:25:57 -0700 Message-Id: <20191002232601.22715-17-ajit.khaparde@broadcom.com> X-Mailer: git-send-email 2.20.1 (Apple Git-117) In-Reply-To: <20191002232601.22715-1-ajit.khaparde@broadcom.com> References: <20191002232601.22715-1-ajit.khaparde@broadcom.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v2 16/20] net/bnxt: synchronize between flow related functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Venkat Duvvuru Currently, there are four flow related functions, namely bnxt_flow_create, bnxt_flow_destroy, bnxt_flow_validate, bnxt_flow_flush. All these functions are not multi-thread safe. This patch fixes it by synchronizing these functions with a lock. Reviewed-by: Kalesh Anakkur Purayil Signed-off-by: Venkat Duvvuru Signed-off-by: Ajit Khaparde --- drivers/net/bnxt/bnxt.h | 6 +++++ drivers/net/bnxt/bnxt_ethdev.c | 23 ++++++++++++++++ drivers/net/bnxt/bnxt_flow.c | 49 +++++++++++++++++++++++++++------- 3 files changed, 69 insertions(+), 9 deletions(-) diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h index 8602ab3346..c34582c0ad 100644 --- a/drivers/net/bnxt/bnxt.h +++ b/drivers/net/bnxt/bnxt.h @@ -456,6 +456,7 @@ struct bnxt { uint32_t flow_flags; #define BNXT_FLOW_FLAG_L2_HDR_SRC_FILTER_EN BIT(0) + pthread_mutex_t flow_lock; unsigned int rx_nr_rings; unsigned int rx_cp_nr_rings; unsigned int rx_num_qs_per_vnic; @@ -567,6 +568,11 @@ void bnxt_schedule_fw_health_check(struct bnxt *bp); bool is_bnxt_supported(struct rte_eth_dev *dev); bool bnxt_stratus_device(struct bnxt *bp); extern const struct rte_flow_ops bnxt_flow_ops; +#define bnxt_acquire_flow_lock(bp) \ + pthread_mutex_lock(&(bp)->flow_lock) + +#define bnxt_release_flow_lock(bp) \ + pthread_mutex_unlock(&(bp)->flow_lock) extern int bnxt_logtype_driver; #define PMD_DRV_LOG_RAW(level, fmt, args...) \ diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index 354fa4c630..9f8a63afab 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -4476,6 +4476,17 @@ static int bnxt_init_fw(struct bnxt *bp) return 0; } +static int +bnxt_init_locks(struct bnxt *bp) +{ + int err; + + err = pthread_mutex_init(&bp->flow_lock, NULL); + if (err) + PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n"); + return err; +} + static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) { int rc; @@ -4533,6 +4544,10 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) if (rc) return rc; + rc = bnxt_init_locks(bp); + if (rc) + return rc; + return 0; } @@ -4613,6 +4628,12 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) return rc; } +static void +bnxt_uninit_locks(struct bnxt *bp) +{ + pthread_mutex_destroy(&bp->flow_lock); +} + static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) { @@ -4674,6 +4695,8 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) eth_dev->rx_pkt_burst = NULL; eth_dev->tx_pkt_burst = NULL; + bnxt_uninit_locks(bp); + return rc; } diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c index 938d24be61..58d7cc2261 100644 --- a/drivers/net/bnxt/bnxt_flow.c +++ b/drivers/net/bnxt/bnxt_flow.c @@ -1455,13 +1455,17 @@ bnxt_flow_validate(struct rte_eth_dev *dev, struct bnxt_filter_info *filter; int ret = 0; + bnxt_acquire_flow_lock(bp); ret = bnxt_flow_args_validate(attr, pattern, actions, error); - if (ret != 0) + if (ret != 0) { + bnxt_release_flow_lock(bp); return ret; + } filter = bnxt_get_unused_filter(bp); if (filter == NULL) { PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n"); + bnxt_release_flow_lock(bp); return -ENOMEM; } @@ -1493,6 +1497,7 @@ bnxt_flow_validate(struct rte_eth_dev *dev, /* No need to hold on to this filter if we are just validating flow */ filter->fw_l2_filter_id = UINT64_MAX; bnxt_free_filter(bp, filter); + bnxt_release_flow_lock(bp); return ret; } @@ -1623,6 +1628,7 @@ bnxt_flow_create(struct rte_eth_dev *dev, return flow; } + bnxt_acquire_flow_lock(bp); ret = bnxt_flow_args_validate(attr, pattern, actions, error); if (ret != 0) { PMD_DRV_LOG(ERR, "Not a validate flow.\n"); @@ -1724,6 +1730,7 @@ bnxt_flow_create(struct rte_eth_dev *dev, } PMD_DRV_LOG(ERR, "Successfully created flow.\n"); STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next); + bnxt_release_flow_lock(bp); return flow; } if (!ret) { @@ -1754,6 +1761,7 @@ bnxt_flow_create(struct rte_eth_dev *dev, "Failed to create flow."); rte_free(flow); flow = NULL; + bnxt_release_flow_lock(bp); return flow; } @@ -1804,13 +1812,28 @@ bnxt_flow_destroy(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct bnxt *bp = dev->data->dev_private; - struct bnxt_filter_info *filter = flow->filter; - struct bnxt_vnic_info *vnic = flow->vnic; + struct bnxt_filter_info *filter; + struct bnxt_vnic_info *vnic; int ret = 0; + bnxt_acquire_flow_lock(bp); + if (!flow) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Invalid flow: failed to destroy flow."); + bnxt_release_flow_lock(bp); + return -EINVAL; + } + + filter = flow->filter; + vnic = flow->vnic; + if (!filter) { - ret = -EINVAL; - goto done; + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Invalid flow: failed to destroy flow."); + bnxt_release_flow_lock(bp); + return -EINVAL; } if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER && @@ -1818,10 +1841,12 @@ bnxt_flow_destroy(struct rte_eth_dev *dev, ret = bnxt_handle_tunnel_redirect_destroy(bp, filter, error); - if (!ret) + if (!ret) { goto done; - else + } else { + bnxt_release_flow_lock(bp); return ret; + } } ret = bnxt_match_filter(bp, filter); @@ -1859,6 +1884,7 @@ bnxt_flow_destroy(struct rte_eth_dev *dev, "Failed to destroy flow."); } + bnxt_release_flow_lock(bp); return ret; } @@ -1871,6 +1897,7 @@ bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) unsigned int i; int ret = 0; + bnxt_acquire_flow_lock(bp); for (i = 0; i < bp->max_vnics; i++) { vnic = &bp->vnic_info[i]; if (vnic->fw_vnic_id == INVALID_VNIC_ID) @@ -1886,10 +1913,12 @@ bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) bnxt_handle_tunnel_redirect_destroy(bp, filter, error); - if (!ret) + if (!ret) { goto done; - else + } else { + bnxt_release_flow_lock(bp); return ret; + } } if (filter->filter_type == HWRM_CFA_EM_FILTER) @@ -1906,6 +1935,7 @@ bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Failed to flush flow in HW."); + bnxt_release_flow_lock(bp); return -rte_errno; } done: @@ -1916,6 +1946,7 @@ bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) } } + bnxt_release_flow_lock(bp); return ret; }