From patchwork Tue Sep 3 02:19:00 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ajit Khaparde X-Patchwork-Id: 58438 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 441E51EB58; Tue, 3 Sep 2019 04:19:40 +0200 (CEST) Received: from rnd-relay.smtp.broadcom.com (rnd-relay.smtp.broadcom.com [192.19.229.170]) by dpdk.org (Postfix) with ESMTP id 29CB11E96E for ; Tue, 3 Sep 2019 04:19:10 +0200 (CEST) Received: from nis-sj1-27.broadcom.com (nis-sj1-27.lvn.broadcom.net [10.75.144.136]) by rnd-relay.smtp.broadcom.com (Postfix) with ESMTP id F17E830C201; Mon, 2 Sep 2019 19:19:02 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.10.3 rnd-relay.smtp.broadcom.com F17E830C201 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=broadcom.com; s=dkimrelay; t=1567477143; bh=yBalHPcK9/YcWl+lxtRn849ukz03IEAkUePR9uGsAwI=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=doIaxb36jf1eNatYFpMa3cIWXmHoIGPJrp1exN2xy7J7REqtTtVbdPpFdd1LiUPO2 aW3m9IZGY3tk7/f75C+vwdJqxbaXCcwiBqMd63AMvDkoKDibdXwVlV9WWg4e9N9inn 8VgdElhBnw2vAk1kSpcfxzzqAz2NsPmjVv9tVNE4= Received: from localhost.localdomain (unknown [10.230.30.225]) by nis-sj1-27.broadcom.com (Postfix) with ESMTP id 5CC56AC078A; Mon, 2 Sep 2019 19:19:09 -0700 (PDT) From: Ajit Khaparde To: dev@dpdk.org Cc: ferruh.yigit@intel.com, Venkat Duvvuru , Kalesh Anakkur Purayil Date: Mon, 2 Sep 2019 19:19:00 -0700 Message-Id: <20190903021901.25895-17-ajit.khaparde@broadcom.com> X-Mailer: git-send-email 2.20.1 (Apple Git-117) In-Reply-To: <20190903021901.25895-1-ajit.khaparde@broadcom.com> References: <20190903021901.25895-1-ajit.khaparde@broadcom.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH 16/17] net/bnxt: synchronize between flow related functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Venkat Duvvuru Currently, there are four flow related functions, namely bnxt_flow_create, bnxt_flow_destroy, bnxt_flow_validate, bnxt_flow_flush. All these functions are not multi-thread safe. This patch fixes it by synchronizing these functions with a lock. Reviewed-by: Kalesh Anakkur Purayil Signed-off-by: Venkat Duvvuru Signed-off-by: Ajit Khaparde --- drivers/net/bnxt/bnxt.h | 6 +++++ drivers/net/bnxt/bnxt_ethdev.c | 23 ++++++++++++++++ drivers/net/bnxt/bnxt_flow.c | 49 +++++++++++++++++++++++++++------- 3 files changed, 69 insertions(+), 9 deletions(-) diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h index 64cc06304..773227048 100644 --- a/drivers/net/bnxt/bnxt.h +++ b/drivers/net/bnxt/bnxt.h @@ -462,6 +462,7 @@ struct bnxt { uint32_t flow_flags; #define BNXT_FLOW_FLAG_L2_HDR_SRC_FILTER_EN BIT(0) + pthread_mutex_t flow_lock; unsigned int rx_nr_rings; unsigned int rx_cp_nr_rings; unsigned int rx_num_qs_per_vnic; @@ -571,6 +572,11 @@ void bnxt_schedule_fw_health_check(struct bnxt *bp); bool is_bnxt_supported(struct rte_eth_dev *dev); bool bnxt_stratus_device(struct bnxt *bp); extern const struct rte_flow_ops bnxt_flow_ops; +#define bnxt_acquire_flow_lock(bp) \ + pthread_mutex_lock(&(bp)->flow_lock) + +#define bnxt_release_flow_lock(bp) \ + pthread_mutex_unlock(&(bp)->flow_lock) extern int bnxt_logtype_driver; #define PMD_DRV_LOG_RAW(level, fmt, args...) \ diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index 5ef2ee0c4..f5cbc0038 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -4297,6 +4297,17 @@ static int bnxt_init_fw(struct bnxt *bp) return 0; } +static int +bnxt_init_locks(struct bnxt *bp) +{ + int err; + + err = pthread_mutex_init(&bp->flow_lock, NULL); + if (err) + PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n"); + return err; +} + static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) { int rc; @@ -4350,6 +4361,10 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) if (rc) return rc; + rc = bnxt_init_locks(bp); + if (rc) + return rc; + return 0; } @@ -4430,6 +4445,12 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) return rc; } +static void +bnxt_uninit_locks(struct bnxt *bp) +{ + pthread_mutex_destroy(&bp->flow_lock); +} + static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) { @@ -4451,6 +4472,8 @@ bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) bp->recovery_info = NULL; } + bnxt_uninit_locks(bp); + return rc; } diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c index 8156b5b4b..d640a923a 100644 --- a/drivers/net/bnxt/bnxt_flow.c +++ b/drivers/net/bnxt/bnxt_flow.c @@ -1385,13 +1385,17 @@ bnxt_flow_validate(struct rte_eth_dev *dev, struct bnxt_filter_info *filter; int ret = 0; + bnxt_acquire_flow_lock(bp); ret = bnxt_flow_args_validate(attr, pattern, actions, error); - if (ret != 0) + if (ret != 0) { + bnxt_release_flow_lock(bp); return ret; + } filter = bnxt_get_unused_filter(bp); if (filter == NULL) { PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n"); + bnxt_release_flow_lock(bp); return -ENOMEM; } @@ -1423,6 +1427,7 @@ bnxt_flow_validate(struct rte_eth_dev *dev, /* No need to hold on to this filter if we are just validating flow */ filter->fw_l2_filter_id = UINT64_MAX; bnxt_free_filter(bp, filter); + bnxt_release_flow_lock(bp); return ret; } @@ -1535,6 +1540,7 @@ bnxt_flow_create(struct rte_eth_dev *dev, return flow; } + bnxt_acquire_flow_lock(bp); ret = bnxt_flow_args_validate(attr, pattern, actions, error); if (ret != 0) { PMD_DRV_LOG(ERR, "Not a validate flow.\n"); @@ -1630,6 +1636,7 @@ bnxt_flow_create(struct rte_eth_dev *dev, STAILQ_INSERT_TAIL(&vnic->filter, filter, next); PMD_DRV_LOG(ERR, "Successfully created flow.\n"); STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next); + bnxt_release_flow_lock(bp); return flow; } @@ -1650,6 +1657,7 @@ bnxt_flow_create(struct rte_eth_dev *dev, "Failed to create flow."); rte_free(flow); flow = NULL; + bnxt_release_flow_lock(bp); return flow; } @@ -1700,13 +1708,28 @@ bnxt_flow_destroy(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct bnxt *bp = dev->data->dev_private; - struct bnxt_filter_info *filter = flow->filter; - struct bnxt_vnic_info *vnic = flow->vnic; + struct bnxt_filter_info *filter; + struct bnxt_vnic_info *vnic; int ret = 0; + bnxt_acquire_flow_lock(bp); + if (!flow) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Invalid flow: failed to destroy flow."); + bnxt_release_flow_lock(bp); + return -EINVAL; + } + + filter = flow->filter; + vnic = flow->vnic; + if (!filter) { - ret = -EINVAL; - goto done; + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Invalid flow: failed to destroy flow."); + bnxt_release_flow_lock(bp); + return -EINVAL; } if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER && @@ -1714,10 +1737,12 @@ bnxt_flow_destroy(struct rte_eth_dev *dev, ret = bnxt_handle_tunnel_redirect_destroy(bp, filter, error); - if (!ret) + if (!ret) { goto done; - else + } else { + bnxt_release_flow_lock(bp); return ret; + } } ret = bnxt_match_filter(bp, filter); @@ -1756,6 +1781,7 @@ bnxt_flow_destroy(struct rte_eth_dev *dev, "Failed to destroy flow."); } + bnxt_release_flow_lock(bp); return ret; } @@ -1769,6 +1795,7 @@ bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) unsigned int i; int ret = 0; + bnxt_acquire_flow_lock(bp); for (i = 0; i < bp->max_vnics; i++) { vnic = &bp->vnic_info[i]; if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID) @@ -1785,10 +1812,12 @@ bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) bnxt_handle_tunnel_redirect_destroy(bp, filter, error); - if (!ret) + if (!ret) { goto done; - else + } else { + bnxt_release_flow_lock(bp); return ret; + } } if (filter->filter_type == HWRM_CFA_EM_FILTER) @@ -1805,6 +1834,7 @@ bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Failed to flush flow in HW."); + bnxt_release_flow_lock(bp); return -rte_errno; } done: @@ -1833,6 +1863,7 @@ bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) } } + bnxt_release_flow_lock(bp); return ret; }