From patchwork Fri Apr 20 12:23:37 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xueming Li X-Patchwork-Id: 38633 X-Patchwork-Delegate: shahafs@mellanox.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 07A961AEF1; Fri, 20 Apr 2018 14:27:29 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id D4AA7D010 for ; Fri, 20 Apr 2018 14:27:12 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from xuemingl@mellanox.com) with ESMTPS (AES256-SHA encrypted); 20 Apr 2018 15:25:23 +0300 Received: from dev-r630-06.mtbc.labs.mlnx (dev-r630-06.mtbc.labs.mlnx [10.12.205.180]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id w3KCO1s2021568; Fri, 20 Apr 2018 15:24:01 +0300 Received: from dev-r630-06.mtbc.labs.mlnx (localhost [127.0.0.1]) by dev-r630-06.mtbc.labs.mlnx (8.14.7/8.14.7) with ESMTP id w3KCO1aS113420; Fri, 20 Apr 2018 20:24:01 +0800 Received: (from xuemingl@localhost) by dev-r630-06.mtbc.labs.mlnx (8.14.7/8.14.7/Submit) id w3KCO1im113419; Fri, 20 Apr 2018 20:24:01 +0800 From: Xueming Li To: Iremonger Bernard , Nelio Laranjeiro , Shahaf Shuler Cc: Xueming Li , dev@dpdk.org Date: Fri, 20 Apr 2018 20:23:37 +0800 Message-Id: <20180420122340.113348-9-xuemingl@mellanox.com> X-Mailer: git-send-email 2.13.3 In-Reply-To: <20180420122340.113348-1-xuemingl@mellanox.com> References: <20180420122340.113348-1-xuemingl@mellanox.com> In-Reply-To: <20180417151436.161374-1-xuemingl@mellanox.com> References: <20180417151436.161374-1-xuemingl@mellanox.com> Subject: [dpdk-dev] [PATCH v5 08/11] net/mlx5: add hardware flow debug dump X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Dump verb flow detail including flow spec type and size for debugging purpose. Signed-off-by: Xueming Li --- drivers/net/mlx5/mlx5_flow.c | 68 ++++++++++++++++++++++++++++++++++++------- drivers/net/mlx5/mlx5_rxq.c | 26 ++++++++++++++--- drivers/net/mlx5/mlx5_utils.h | 6 ++++ 3 files changed, 86 insertions(+), 14 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 174f2ba6e..593c960f8 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -2080,6 +2080,57 @@ mlx5_flow_create_update_rxqs(struct rte_eth_dev *dev, struct rte_flow *flow) } /** + * Dump flow hash RX queue detail. + * + * @param dev + * Pointer to Ethernet device. + * @param flow + * Pointer to the rte_flow. + * @param hrxq_idx + * Hash RX queue index. + */ +static void +mlx5_flow_dump(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow __rte_unused, + unsigned int hrxq_idx __rte_unused) +{ +#ifndef NDEBUG + uintptr_t spec_ptr; + uint16_t j; + char buf[256]; + uint8_t off; + + spec_ptr = (uintptr_t)(flow->frxq[hrxq_idx].ibv_attr + 1); + for (j = 0, off = 0; j < flow->frxq[hrxq_idx].ibv_attr->num_of_specs; + j++) { + struct ibv_flow_spec *spec = (void *)spec_ptr; + off += sprintf(buf + off, " %x(%hu)", spec->hdr.type, + spec->hdr.size); + spec_ptr += spec->hdr.size; + } + DRV_LOG(DEBUG, + "port %u Verbs flow %p type %u: hrxq:%p qp:%p ind:%p," + " hash:%" PRIx64 "/%u specs:%hhu(%hu), priority:%hu, type:%d," + " flags:%x, comp_mask:%x specs:%s", + dev->data->port_id, (void *)flow, hrxq_idx, + (void *)flow->frxq[hrxq_idx].hrxq, + (void *)flow->frxq[hrxq_idx].hrxq->qp, + (void *)flow->frxq[hrxq_idx].hrxq->ind_table, + flow->frxq[hrxq_idx].hash_fields | + (flow->tunnel && + flow->rss_conf.level > 1 ? (uint32_t)IBV_RX_HASH_INNER : 0), + flow->rss_conf.queue_num, + flow->frxq[hrxq_idx].ibv_attr->num_of_specs, + flow->frxq[hrxq_idx].ibv_attr->size, + flow->frxq[hrxq_idx].ibv_attr->priority, + flow->frxq[hrxq_idx].ibv_attr->type, + flow->frxq[hrxq_idx].ibv_attr->flags, + flow->frxq[hrxq_idx].ibv_attr->comp_mask, + buf); +#endif +} + +/** * Complete flow rule creation. * * @param dev @@ -2121,6 +2172,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev, flow->frxq[i].ibv_flow = mlx5_glue->create_flow(flow->frxq[i].hrxq->qp, flow->frxq[i].ibv_attr); + mlx5_flow_dump(dev, flow, i); if (!flow->frxq[i].ibv_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, @@ -2128,11 +2180,6 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev, goto error; } ++flows_n; - DRV_LOG(DEBUG, "port %u %p type %d QP %p ibv_flow %p", - dev->data->port_id, - (void *)flow, i, - (void *)flow->frxq[i].hrxq->qp, - (void *)flow->frxq[i].ibv_flow); } if (!flows_n) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, @@ -2676,24 +2723,25 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) flow->rss_conf.level); if (!flow->frxq[i].hrxq) { DRV_LOG(DEBUG, - "port %u flow %p cannot be applied", + "port %u flow %p cannot create hash" + " rxq", dev->data->port_id, (void *)flow); rte_errno = EINVAL; return -rte_errno; } flow_create: + mlx5_flow_dump(dev, flow, i); flow->frxq[i].ibv_flow = mlx5_glue->create_flow(flow->frxq[i].hrxq->qp, flow->frxq[i].ibv_attr); if (!flow->frxq[i].ibv_flow) { DRV_LOG(DEBUG, - "port %u flow %p cannot be applied", - dev->data->port_id, (void *)flow); + "port %u flow %p type %u cannot be" + " applied", + dev->data->port_id, (void *)flow, i); rte_errno = EINVAL; return -rte_errno; } - DRV_LOG(DEBUG, "port %u flow %p applied", - dev->data->port_id, (void *)flow); } mlx5_flow_create_update_rxqs(dev, flow); } diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 58403b5b6..2957e7c86 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -1259,9 +1259,9 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues, } rte_atomic32_inc(&ind_tbl->refcnt); LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); - DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d", - dev->data->port_id, (void *)ind_tbl, - rte_atomic32_read(&ind_tbl->refcnt)); + DEBUG("port %u new indirection table %p: queues:%u refcnt:%d", + dev->data->port_id, (void *)ind_tbl, 1 << wq_n, + rte_atomic32_read(&ind_tbl->refcnt)); return ind_tbl; error: rte_free(ind_tbl); @@ -1330,9 +1330,12 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d", ((struct priv *)dev->data->dev_private)->port, (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); - if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) + if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) { claim_zero(mlx5_glue->destroy_rwq_ind_table (ind_tbl->ind_table)); + DEBUG("port %u delete indirection table %p: queues: %u", + dev->data->port_id, (void *)ind_tbl, ind_tbl->queues_n); + } for (i = 0; i != ind_tbl->queues_n; ++i) claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i])); if (!rte_atomic32_read(&ind_tbl->refcnt)) { @@ -1445,6 +1448,13 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, .pd = priv->pd, }, &qp_init_attr); + DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64 + " tunnel:0x%x level:%hhu dv_attr:comp_mask:0x%" PRIx64 + " create_flags:0x%x", + dev->data->port_id, (void *)qp, (void *)ind_tbl, + (tunnel && rss_level == 2 ? (uint32_t)IBV_RX_HASH_INNER : 0) | + hash_fields, tunnel, rss_level, + qp_init_attr.comp_mask, qp_init_attr.create_flags); #else qp = mlx5_glue->create_qp_ex (priv->ctx, @@ -1466,6 +1476,10 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, .rwq_ind_tbl = ind_tbl->ind_table, .pd = priv->pd, }); + DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64 + " tunnel:0x%x level:%hhu", + dev->data->port_id, (void *)qp, (void *)ind_tbl, + hash_fields, tunnel, rss_level); #endif if (!qp) { rte_errno = errno; @@ -1575,6 +1589,10 @@ mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); + DEBUG("port %u delete QP %p: hash: 0x%" PRIx64 ", tunnel:" + " 0x%x, level: %hhu", + dev->data->port_id, (void *)hrxq, hrxq->hash_fields, + hrxq->tunnel, hrxq->rss_level); mlx5_ind_table_ibv_release(dev, hrxq->ind_table); LIST_REMOVE(hrxq, next); rte_free(hrxq); diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h index e8f980ff7..886f60e61 100644 --- a/drivers/net/mlx5/mlx5_utils.h +++ b/drivers/net/mlx5/mlx5_utils.h @@ -103,16 +103,22 @@ extern int mlx5_logtype; /* claim_zero() does not perform any check when debugging is disabled. */ #ifndef NDEBUG +#define DEBUG(...) DRV_LOG(DEBUG, __VA_ARGS__) #define claim_zero(...) assert((__VA_ARGS__) == 0) #define claim_nonzero(...) assert((__VA_ARGS__) != 0) #else /* NDEBUG */ +#define DEBUG(...) (void)0 #define claim_zero(...) (__VA_ARGS__) #define claim_nonzero(...) (__VA_ARGS__) #endif /* NDEBUG */ +#define INFO(...) DRV_LOG(INFO, __VA_ARGS__) +#define WARN(...) DRV_LOG(WARNING, __VA_ARGS__) +#define ERROR(...) DRV_LOG(ERR, __VA_ARGS__) + /* Convenience macros for accessing mbuf fields. */ #define NEXT(m) ((m)->next) #define DATA_LEN(m) ((m)->data_len)