From patchwork Wed Jan 8 16:15:59 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Slava Ovsiienko X-Patchwork-Id: 64295 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5E119A04F3; Wed, 8 Jan 2020 17:16:31 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 281411DAB8; Wed, 8 Jan 2020 17:16:25 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 9DD6F1DAC2 for ; Wed, 8 Jan 2020 17:16:23 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@mellanox.com) with ESMTPS (AES256-SHA encrypted); 8 Jan 2020 18:16:21 +0200 Received: from pegasus11.mtr.labs.mlnx (pegasus11.mtr.labs.mlnx [10.210.16.104]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 008GGL1Q000718; Wed, 8 Jan 2020 18:16:21 +0200 Received: from pegasus11.mtr.labs.mlnx (localhost [127.0.0.1]) by pegasus11.mtr.labs.mlnx (8.14.7/8.14.7) with ESMTP id 008GGLv2020234; Wed, 8 Jan 2020 16:16:21 GMT Received: (from viacheslavo@localhost) by pegasus11.mtr.labs.mlnx (8.14.7/8.14.7/Submit) id 008GGLrE020233; Wed, 8 Jan 2020 16:16:21 GMT X-Authentication-Warning: pegasus11.mtr.labs.mlnx: viacheslavo set sender to viacheslavo@mellanox.com using -f From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: matan@mellanox.com, rasland@mellanox.com, orika@mellanox.com Date: Wed, 8 Jan 2020 16:15:59 +0000 Message-Id: <1578500161-20156-3-git-send-email-viacheslavo@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1578500161-20156-1-git-send-email-viacheslavo@mellanox.com> References: <1578500161-20156-1-git-send-email-viacheslavo@mellanox.com> Subject: [dpdk-dev] [PATCH 2/4] net/mlx5: update Tx error handling routine X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This is preparation step, we are going to store the index of elts to free on completion in the dedicated free on completion queue, this patch updates the elts freeing routine and updates Tx error handling routine to be synched with coming new queue. Signed-off-by: Viacheslav Ovsiienko Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_rxtx.c | 98 +++++++++++++++++++++++--------------------- drivers/net/mlx5/mlx5_rxtx.h | 4 +- drivers/net/mlx5/mlx5_txq.c | 2 +- 3 files changed, 54 insertions(+), 50 deletions(-) diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index ee6d5fc..b7b40ac 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -654,10 +654,10 @@ enum mlx5_txcmp_code { * Pointer to the error CQE. * * @return - * Negative value if queue recovery failed, - * the last Tx buffer element to free otherwise. + * Negative value if queue recovery failed, otherwise + * the error completion entry is handled successfully. */ -int +static int mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq, volatile struct mlx5_err_cqe *err_cqe) { @@ -701,18 +701,14 @@ enum mlx5_txcmp_code { */ txq->stats.oerrors += ((txq->wqe_ci & wqe_m) - new_wqe_pi) & wqe_m; - if (tx_recover_qp(txq_ctrl) == 0) { - txq->cq_ci++; - /* Release all the remaining buffers. */ - return txq->elts_head; + if (tx_recover_qp(txq_ctrl)) { + /* Recovering failed - retry later on the same WQE. */ + return -1; } - /* Recovering failed - try again later on the same WQE. */ - return -1; - } else { - txq->cq_ci++; + /* Release all the remaining buffers. */ + txq_free_elts(txq_ctrl); } - /* Do not release buffers. */ - return txq->elts_tail; + return 0; } /** @@ -2034,8 +2030,6 @@ enum mlx5_txcmp_code { * Pointer to TX queue structure. * @param valid CQE pointer * if not NULL update txq->wqe_pi and flush the buffers - * @param itail - * if not negative - flush the buffers till this index. * @param olx * Configured Tx offloads mask. It is fully defined at * compile time and may be used for optimization. @@ -2043,25 +2037,18 @@ enum mlx5_txcmp_code { static __rte_always_inline void mlx5_tx_comp_flush(struct mlx5_txq_data *restrict txq, volatile struct mlx5_cqe *last_cqe, - int itail, unsigned int olx __rte_unused) { - uint16_t tail; - if (likely(last_cqe != NULL)) { + uint16_t tail; + txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter); tail = ((volatile struct mlx5_wqe_cseg *) (txq->wqes + (txq->wqe_pi & txq->wqe_m)))->misc; - } else if (itail >= 0) { - tail = (uint16_t)itail; - } else { - return; - } - rte_compiler_barrier(); - *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci); - if (likely(tail != txq->elts_tail)) { - mlx5_tx_free_elts(txq, tail, olx); - assert(tail == txq->elts_tail); + if (likely(tail != txq->elts_tail)) { + mlx5_tx_free_elts(txq, tail, olx); + assert(tail == txq->elts_tail); + } } } @@ -2085,6 +2072,7 @@ enum mlx5_txcmp_code { { unsigned int count = MLX5_TX_COMP_MAX_CQE; volatile struct mlx5_cqe *last_cqe = NULL; + uint16_t ci = txq->cq_ci; int ret; static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value"); @@ -2092,8 +2080,8 @@ enum mlx5_txcmp_code { do { volatile struct mlx5_cqe *cqe; - cqe = &txq->cqes[txq->cq_ci & txq->cqe_m]; - ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci); + cqe = &txq->cqes[ci & txq->cqe_m]; + ret = check_cqe(cqe, txq->cqe_s, ci); if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) { if (likely(ret != MLX5_CQE_STATUS_ERR)) { /* No new CQEs in completion queue. */ @@ -2109,31 +2097,49 @@ enum mlx5_txcmp_code { rte_wmb(); ret = mlx5_tx_error_cqe_handle (txq, (volatile struct mlx5_err_cqe *)cqe); + if (unlikely(ret < 0)) { + /* + * Some error occurred on queue error + * handling, we do not advance the index + * here, allowing to retry on next call. + */ + return; + } /* - * Flush buffers, update consuming index - * if recovery succeeded. Otherwise - * just try to recover later. + * We are going to fetch all entries with + * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status. */ - last_cqe = NULL; - break; + ++ci; + continue; } /* Normal transmit completion. */ - ++txq->cq_ci; + ++ci; last_cqe = cqe; #ifndef NDEBUG if (txq->cq_pi) --txq->cq_pi; #endif - /* - * We have to restrict the amount of processed CQEs - * in one tx_burst routine call. The CQ may be large - * and many CQEs may be updated by the NIC in one - * transaction. Buffers freeing is time consuming, - * multiple iterations may introduce significant - * latency. - */ - } while (--count); - mlx5_tx_comp_flush(txq, last_cqe, ret, olx); + /* + * We have to restrict the amount of processed CQEs + * in one tx_burst routine call. The CQ may be large + * and many CQEs may be updated by the NIC in one + * transaction. Buffers freeing is time consuming, + * multiple iterations may introduce significant + * latency. + */ + if (--count == 0) + break; + } while (true); + if (likely(ci != txq->cq_ci)) { + /* + * Update completion queue consuming index + * and ring doorbell to notify hardware. + */ + rte_compiler_barrier(); + txq->cq_ci = ci; + *txq->cq_db = rte_cpu_to_be_32(ci); + mlx5_tx_comp_flush(txq, last_cqe, olx); + } } /** diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index e927343..8a2185a 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -440,6 +440,7 @@ struct mlx5_txq_ctrl *mlx5_txq_hairpin_new int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx); int mlx5_txq_verify(struct rte_eth_dev *dev); void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl); +void txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl); uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev); /* mlx5_rxtx.c */ @@ -451,9 +452,6 @@ struct mlx5_txq_ctrl *mlx5_txq_hairpin_new void mlx5_set_ptype_table(void); void mlx5_set_cksum_table(void); void mlx5_set_swp_types_table(void); -__rte_noinline int mlx5_tx_error_cqe_handle - (struct mlx5_txq_data *restrict txq, - volatile struct mlx5_err_cqe *err_cqe); uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq); __rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec); diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 1c4f7e7..abe0947 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -62,7 +62,7 @@ * @param txq_ctrl * Pointer to TX queue structure. */ -static void +void txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) { const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;