From patchwork Wed Aug 9 01:33:03 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wenjing Qiao X-Patchwork-Id: 130003 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id C7C5843011; Wed, 9 Aug 2023 03:34:25 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1C83643279; Wed, 9 Aug 2023 03:33:48 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [134.134.136.65]) by mails.dpdk.org (Postfix) with ESMTP id 2C57343279 for ; Wed, 9 Aug 2023 03:33:46 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1691544826; x=1723080826; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=rAnqoUL21lfWu5qDFi+dNdsgHRQlXuoVWGayih/1j+M=; b=SfYSQ1dOZQRWLWbiL6rd9jO7Wb3JndEcs6bHxq74MIytoN4UHRabM+xj XXFuCB2cDy3P5w9c/Ib4C4kNrFIUuO8lrX036UPRFqNLl1bidiGycoi2p u5fbrqF3XVP02X9N50Nu3c0r0AxLCoayCjFvgRgOOB0GrVEg7PXZpH54N A7CJeFIyDjgF13XCIZ1+t8O3rXGhd5cpLvum1s/3vCjibxl6fVsMQDdIW FMkv2JVgrBoCxHbKXXY6+ckjBK+DFf1FVia4wy0PtADTvv78N/jfOy25I /O6/6jHIrSG+ek8IhJLsBprYRNuDiZ+X8XfO+PE+xRQigFRZZ+bvLjv+C g==; X-IronPort-AV: E=McAfee;i="6600,9927,10795"; a="374704503" X-IronPort-AV: E=Sophos;i="6.01,157,1684825200"; d="scan'208";a="374704503" Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 08 Aug 2023 18:33:45 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10795"; a="845735131" X-IronPort-AV: E=Sophos;i="6.01,157,1684825200"; d="scan'208";a="845735131" Received: from dpdk-wenjing-02.sh.intel.com ([10.67.119.75]) by fmsmga002.fm.intel.com with ESMTP; 08 Aug 2023 18:33:43 -0700 From: Wenjing Qiao To: jingjing.wu@intel.com, beilei.xing@intel.com, qi.z.zhang@intel.com Cc: dev@dpdk.org, mingxia.liu@intel.com, wenjing.qiao@intel.com, Simei Su , Pavan Kumar Linga Subject: [PATCH 09/14] common/idpf/base: use local pointer before updating 'CQ out' Date: Wed, 9 Aug 2023 01:33:03 +0000 Message-Id: <20230809013308.1449103-10-wenjing.qiao@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230809013308.1449103-1-wenjing.qiao@intel.com> References: <20230809013308.1449103-1-wenjing.qiao@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Simei Su Instead of updating directly to 'cq_out' double pointer, use a local pointer and update only when we return success. Signed-off-by: Pavan Kumar Linga Signed-off-by: Simei Su --- drivers/common/idpf/base/idpf_controlq.c | 43 +++++++++++++----------- 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/drivers/common/idpf/base/idpf_controlq.c b/drivers/common/idpf/base/idpf_controlq.c index 6815153e1d..b84a1ea046 100644 --- a/drivers/common/idpf/base/idpf_controlq.c +++ b/drivers/common/idpf/base/idpf_controlq.c @@ -137,6 +137,7 @@ int idpf_ctlq_add(struct idpf_hw *hw, struct idpf_ctlq_create_info *qinfo, struct idpf_ctlq_info **cq_out) { + struct idpf_ctlq_info *cq; bool is_rxq = false; int status = 0; @@ -145,26 +146,26 @@ int idpf_ctlq_add(struct idpf_hw *hw, qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN) return -EINVAL; - *cq_out = (struct idpf_ctlq_info *) - idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info)); - if (!(*cq_out)) + cq = (struct idpf_ctlq_info *) + idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info)); + if (!cq) return -ENOMEM; - (*cq_out)->cq_type = qinfo->type; - (*cq_out)->q_id = qinfo->id; - (*cq_out)->buf_size = qinfo->buf_size; - (*cq_out)->ring_size = qinfo->len; + (cq)->cq_type = qinfo->type; + (cq)->q_id = qinfo->id; + (cq)->buf_size = qinfo->buf_size; + (cq)->ring_size = qinfo->len; - (*cq_out)->next_to_use = 0; - (*cq_out)->next_to_clean = 0; - (*cq_out)->next_to_post = (*cq_out)->ring_size - 1; + (cq)->next_to_use = 0; + (cq)->next_to_clean = 0; + (cq)->next_to_post = cq->ring_size - 1; switch (qinfo->type) { case IDPF_CTLQ_TYPE_MAILBOX_RX: is_rxq = true; /* fallthrough */ case IDPF_CTLQ_TYPE_MAILBOX_TX: - status = idpf_ctlq_alloc_ring_res(hw, *cq_out); + status = idpf_ctlq_alloc_ring_res(hw, cq); break; default: status = -EINVAL; @@ -175,33 +176,35 @@ int idpf_ctlq_add(struct idpf_hw *hw, goto init_free_q; if (is_rxq) { - idpf_ctlq_init_rxq_bufs(*cq_out); + idpf_ctlq_init_rxq_bufs(cq); } else { /* Allocate the array of msg pointers for TX queues */ - (*cq_out)->bi.tx_msg = (struct idpf_ctlq_msg **) + cq->bi.tx_msg = (struct idpf_ctlq_msg **) idpf_calloc(hw, qinfo->len, sizeof(struct idpf_ctlq_msg *)); - if (!(*cq_out)->bi.tx_msg) { + if (!cq->bi.tx_msg) { status = -ENOMEM; goto init_dealloc_q_mem; } } - idpf_ctlq_setup_regs(*cq_out, qinfo); + idpf_ctlq_setup_regs(cq, qinfo); - idpf_ctlq_init_regs(hw, *cq_out, is_rxq); + idpf_ctlq_init_regs(hw, cq, is_rxq); - idpf_init_lock(&(*cq_out)->cq_lock); + idpf_init_lock(&(cq->cq_lock)); - LIST_INSERT_HEAD(&hw->cq_list_head, (*cq_out), cq_list); + LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list); + *cq_out = cq; return status; init_dealloc_q_mem: /* free ring buffers and the ring itself */ - idpf_ctlq_dealloc_ring_res(hw, *cq_out); + idpf_ctlq_dealloc_ring_res(hw, cq); init_free_q: - idpf_free(hw, *cq_out); + idpf_free(hw, cq); + cq = NULL; return status; }