From patchwork Wed Aug 2 14:10:28 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: =?utf-8?q?N=C3=A9lio_Laranjeiro?= X-Patchwork-Id: 27378 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 09852A1B6; Wed, 2 Aug 2017 16:11:34 +0200 (CEST) Received: from mail-wr0-f178.google.com (mail-wr0-f178.google.com [209.85.128.178]) by dpdk.org (Postfix) with ESMTP id 07436A0CE for ; Wed, 2 Aug 2017 16:11:10 +0200 (CEST) Received: by mail-wr0-f178.google.com with SMTP id y43so19259082wrd.3 for ; Wed, 02 Aug 2017 07:11:10 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=6wind-com.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references :in-reply-to:references; bh=X3gcK0z4M5WmSZYrqjDNKjmfnCgR0swenC99k1PEaZU=; b=kfLd4gHjoeWhbd7cH1J8jp1Ie2HJNlvpC7ndjeQGJe4t0RbmKfHqWr1o1x8S9uvaZL oqqi8KiPzKz0k2jZXkf/VPS/uxUrZ1UVq1f2Ecd1K0OV6z5PjYCX0/cuaUtt6+9MobVZ CZdCd9milmRKs2PEXMz9EjaF60Qo1qQ3ECQ5fYKYhghV/eEVsYTwgVz63jYtfoP1iQBd FMe++d1BMXSKgj3h4riI04OJG7xcR5vVI419/OojNsH0qIfu4aJR6WazCg2q7F7ts50Q Cd29lhe9yMmioiR8uEivIapXz6VqWUOhAsheczpfJx1fr+RYYiM6LxjI3pZlAr0V4Io7 C/zg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:in-reply-to:references; bh=X3gcK0z4M5WmSZYrqjDNKjmfnCgR0swenC99k1PEaZU=; b=ALi6xeV7cQskN5+y8O5TdfQZFB/fCLXgvTSjHDN6B/BP2qAyZyWISeRi0SueZY+c01 ucDoK1oaFIN+rEdEMfuo/QMSztvF9NZ/seTMWEqYyFI5pjJhz53krcb1FGhqqmSccl1O 6GjIfslv3ZT627esOdMCc/A/vTK2KaEvt2O4lYBRnNwtcI6r0s6+AZGiOlXHLyQlAbTV Zp/lJNaApeVRmShazJIjHTs0FP+vhdVWoD3ExVKbmT7fhv1T7bCcjtqhlw6ExVLwDPHo V/VQyef6I92731sQqAEiFVOc7YeV3dNv2J5XnryhDgCgZ0uTqWfox5CgUWDQufi4ki+V 5kYg== X-Gm-Message-State: AIVw1121nKqUNC+IvZ3m8oHW7nGMtfPWmJS9jjePbQnSxlZwQWH9mOt8 9w3G3L6Mko3y6s9bxlTDmA== X-Received: by 10.223.155.145 with SMTP id d17mr20540414wrc.193.1501683069337; Wed, 02 Aug 2017 07:11:09 -0700 (PDT) Received: from ping.dev.6wind.com (host.78.145.23.62.rev.coltfrance.com. [62.23.145.78]) by smtp.gmail.com with ESMTPSA id d53sm39449552wrd.81.2017.08.02.07.11.08 (version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Wed, 02 Aug 2017 07:11:08 -0700 (PDT) From: Nelio Laranjeiro To: dev@dpdk.org Cc: adrien.mazarguil@6wind.com Date: Wed, 2 Aug 2017 16:10:28 +0200 Message-Id: X-Mailer: git-send-email 2.1.4 In-Reply-To: References: In-Reply-To: References: Subject: [dpdk-dev] [PATCH v1 12/21] net/mlx5: remove queue drop support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" In prevision of the huge rework on Rx hash queues and the fact this feature will be totally different, the drop flow is removed from now and will be re-introduced later. Signed-off-by: Nelio Laranjeiro --- drivers/net/mlx5/mlx5.h | 1 - drivers/net/mlx5/mlx5_flow.c | 228 +++---------------------------------------- 2 files changed, 15 insertions(+), 214 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 448995e..a0266d4 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -143,7 +143,6 @@ struct priv { struct rte_intr_handle intr_handle; /* Interrupt handler. */ unsigned int (*reta_idx)[]; /* RETA index table. */ unsigned int reta_idx_n; /* RETA index size. */ - struct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */ TAILQ_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */ LIST_HEAD(mr, mlx5_mr) mr; /* Memory region. */ LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */ diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 9ed8d05..151854a 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -96,7 +96,6 @@ struct rte_flow { struct ibv_exp_wq *wq; /**< Verbs work queue. */ struct ibv_cq *cq; /**< Verbs completion queue. */ uint32_t mark:1; /**< Set if the flow is marked. */ - uint32_t drop:1; /**< Drop queue. */ uint64_t hash_fields; /**< Fields that participate in the hash. */ uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< List of queues. */ uint16_t queues_n; /**< Number of queues in the list. */ @@ -274,7 +273,6 @@ static const struct mlx5_flow_items mlx5_flow_items[] = { /* Structure to parse actions. */ struct mlx5_flow_action { uint32_t queue:1; /**< Target is a receive queue. */ - uint32_t drop:1; /**< Target is a drop queue. */ uint32_t mark:1; /**< Mark is present in the flow. */ uint32_t mark_id; /**< Mark identifier. */ uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */ @@ -290,14 +288,6 @@ struct mlx5_flow_parse { struct mlx5_flow_action actions; /**< Parsed action result. */ }; -/** Structure for Drop queue. */ -struct rte_flow_drop { - struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */ - struct ibv_qp *qp; /**< Verbs queue pair. */ - struct ibv_exp_wq *wq; /**< Verbs work queue. */ - struct ibv_cq *cq; /**< Verbs completion queue. */ -}; - static const struct rte_flow_ops mlx5_flow_ops = { .validate = mlx5_flow_validate, .create = mlx5_flow_create, @@ -512,7 +502,11 @@ priv_flow_validate(struct priv *priv, if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) { continue; } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) { - flow->actions.drop = 1; + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Drop queue not supported"); + return -rte_errno; } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) { const struct rte_flow_action_queue *queue = (const struct rte_flow_action_queue *) @@ -614,11 +608,9 @@ priv_flow_validate(struct priv *priv, goto exit_action_not_supported; } } - if (flow->actions.mark && !flow->ibv_attr && !flow->actions.drop) + if (flow->actions.mark && !flow->ibv_attr) flow->offset += sizeof(struct ibv_exp_flow_spec_action_tag); - if (!flow->ibv_attr && flow->actions.drop) - flow->offset += sizeof(struct ibv_exp_flow_spec_action_drop); - if (!flow->actions.queue && !flow->actions.drop) { + if (!flow->actions.queue) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "no valid action"); return -rte_errno; @@ -1015,62 +1007,6 @@ mlx5_flow_create_flag_mark(struct mlx5_flow_parse *flow, uint32_t mark_id) } /** - * Complete flow rule creation with a drop queue. - * - * @param priv - * Pointer to private structure. - * @param flow - * MLX5 flow attributes (filled by mlx5_flow_validate()). - * @param[out] error - * Perform verbose error reporting if not NULL. - * - * @return - * A flow if the rule could be created. - */ -static struct rte_flow * -priv_flow_create_action_queue_drop(struct priv *priv, - struct mlx5_flow_parse *flow, - struct rte_flow_error *error) -{ - struct rte_flow *rte_flow; - struct ibv_exp_flow_spec_action_drop *drop; - unsigned int size = sizeof(struct ibv_exp_flow_spec_action_drop); - - assert(priv->pd); - assert(priv->ctx); - rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0); - if (!rte_flow) { - rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "cannot allocate flow memory"); - return NULL; - } - rte_flow->drop = 1; - drop = (void *)((uintptr_t)flow->ibv_attr + flow->offset); - *drop = (struct ibv_exp_flow_spec_action_drop){ - .type = IBV_EXP_FLOW_SPEC_ACTION_DROP, - .size = size, - }; - ++flow->ibv_attr->num_of_specs; - flow->offset += sizeof(struct ibv_exp_flow_spec_action_drop); - rte_flow->ibv_attr = flow->ibv_attr; - if (!priv->dev->data->dev_started) - return rte_flow; - rte_flow->qp = priv->flow_drop_queue->qp; - rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp, - rte_flow->ibv_attr); - if (!rte_flow->ibv_flow) { - rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "flow rule creation failure"); - goto error; - } - return rte_flow; -error: - assert(rte_flow); - rte_free(rte_flow); - return NULL; -} - -/** * Complete flow rule creation. * * @param priv @@ -1237,15 +1173,11 @@ priv_flow_create(struct priv *priv, flow.hash_fields = 0; claim_zero(priv_flow_validate(priv, attr, items, actions, error, &flow)); - if (flow.actions.mark && !flow.actions.drop) { + if (flow.actions.mark) { mlx5_flow_create_flag_mark(&flow, flow.actions.mark_id); flow.offset += sizeof(struct ibv_exp_flow_spec_action_tag); } - if (flow.actions.drop) - rte_flow = - priv_flow_create_action_queue_drop(priv, &flow, error); - else - rte_flow = priv_flow_create_action_queue(priv, &flow, error); + rte_flow = priv_flow_create_action_queue(priv, &flow, error); if (!rte_flow) goto exit; return rte_flow; @@ -1297,8 +1229,6 @@ priv_flow_destroy(struct priv *priv, TAILQ_REMOVE(&priv->flows, flow, next); if (flow->ibv_flow) claim_zero(ibv_exp_destroy_flow(flow->ibv_flow)); - if (flow->drop) - goto free; if (flow->qp) claim_zero(ibv_destroy_qp(flow->qp)); if (flow->ind_table) @@ -1319,8 +1249,6 @@ priv_flow_destroy(struct priv *priv, TAILQ_FOREACH(tmp, &priv->flows, next) { unsigned int j; - if (tmp->drop) - continue; if (!tmp->mark) continue; for (j = 0; (j != tmp->queues_n) && !mark; j++) @@ -1331,7 +1259,6 @@ priv_flow_destroy(struct priv *priv, } mlx5_priv_rxq_ibv_release(priv, rxq_ctrl->ibv); } -free: rte_free(flow->ibv_attr); DEBUG("Flow destroyed %p", (void *)flow); rte_free(flow); @@ -1394,122 +1321,6 @@ mlx5_flow_flush(struct rte_eth_dev *dev, } /** - * Create drop queue. - * - * @param priv - * Pointer to private structure. - * - * @return - * 0 on success. - */ -static int -priv_flow_create_drop_queue(struct priv *priv) -{ - struct rte_flow_drop *fdq = NULL; - - assert(priv->pd); - assert(priv->ctx); - fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0); - if (!fdq) { - WARN("cannot allocate memory for drop queue"); - goto error; - } - fdq->cq = ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0, - &(struct ibv_exp_cq_init_attr){ - .comp_mask = 0, - }); - if (!fdq->cq) { - WARN("cannot allocate CQ for drop queue"); - goto error; - } - fdq->wq = ibv_exp_create_wq(priv->ctx, - &(struct ibv_exp_wq_init_attr){ - .wq_type = IBV_EXP_WQT_RQ, - .max_recv_wr = 1, - .max_recv_sge = 1, - .pd = priv->pd, - .cq = fdq->cq, - }); - if (!fdq->wq) { - WARN("cannot allocate WQ for drop queue"); - goto error; - } - fdq->ind_table = ibv_exp_create_rwq_ind_table(priv->ctx, - &(struct ibv_exp_rwq_ind_table_init_attr){ - .pd = priv->pd, - .log_ind_tbl_size = 0, - .ind_tbl = &fdq->wq, - .comp_mask = 0, - }); - if (!fdq->ind_table) { - WARN("cannot allocate indirection table for drop queue"); - goto error; - } - fdq->qp = ibv_exp_create_qp(priv->ctx, - &(struct ibv_exp_qp_init_attr){ - .qp_type = IBV_QPT_RAW_PACKET, - .comp_mask = - IBV_EXP_QP_INIT_ATTR_PD | - IBV_EXP_QP_INIT_ATTR_PORT | - IBV_EXP_QP_INIT_ATTR_RX_HASH, - .pd = priv->pd, - .rx_hash_conf = &(struct ibv_exp_rx_hash_conf){ - .rx_hash_function = - IBV_EXP_RX_HASH_FUNC_TOEPLITZ, - .rx_hash_key_len = rss_hash_default_key_len, - .rx_hash_key = rss_hash_default_key, - .rx_hash_fields_mask = 0, - .rwq_ind_tbl = fdq->ind_table, - }, - .port_num = priv->port, - }); - if (!fdq->qp) { - WARN("cannot allocate QP for drop queue"); - goto error; - } - priv->flow_drop_queue = fdq; - return 0; -error: - if (fdq->qp) - claim_zero(ibv_destroy_qp(fdq->qp)); - if (fdq->ind_table) - claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table)); - if (fdq->wq) - claim_zero(ibv_exp_destroy_wq(fdq->wq)); - if (fdq->cq) - claim_zero(ibv_destroy_cq(fdq->cq)); - if (fdq) - rte_free(fdq); - priv->flow_drop_queue = NULL; - return -1; -} - -/** - * Delete drop queue. - * - * @param priv - * Pointer to private structure. - */ -static void -priv_flow_delete_drop_queue(struct priv *priv) -{ - struct rte_flow_drop *fdq = priv->flow_drop_queue; - - if (!fdq) - return; - if (fdq->qp) - claim_zero(ibv_destroy_qp(fdq->qp)); - if (fdq->ind_table) - claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table)); - if (fdq->wq) - claim_zero(ibv_exp_destroy_wq(fdq->wq)); - if (fdq->cq) - claim_zero(ibv_destroy_cq(fdq->cq)); - rte_free(fdq); - priv->flow_drop_queue = NULL; -} - -/** * Remove all flows. * * Called by dev_stop() to remove all flows. @@ -1523,17 +1334,15 @@ priv_flow_stop(struct priv *priv) struct rte_flow *flow; TAILQ_FOREACH_REVERSE(flow, &priv->flows, mlx5_flows, next) { + unsigned int i; + claim_zero(ibv_exp_destroy_flow(flow->ibv_flow)); flow->ibv_flow = NULL; - if (flow->mark) { - unsigned int n; - - for (n = 0; n < flow->queues_n; ++n) - (*priv->rxqs)[flow->queues[n]]->mark = 0; - } + /* Disable mark on all queues. */ + for (i = 0; i != priv->rxqs_n; ++i) + (*priv->rxqs)[i]->mark = 0; DEBUG("Flow %p removed", (void *)flow); } - priv_flow_delete_drop_queue(priv); } /** @@ -1548,19 +1357,12 @@ priv_flow_stop(struct priv *priv) int priv_flow_start(struct priv *priv) { - int ret; struct rte_flow *flow; - ret = priv_flow_create_drop_queue(priv); - if (ret) - return -1; TAILQ_FOREACH(flow, &priv->flows, next) { struct ibv_qp *qp; - if (flow->drop) - qp = priv->flow_drop_queue->qp; - else - qp = flow->qp; + qp = flow->qp; flow->ibv_flow = ibv_exp_create_flow(qp, flow->ibv_attr); if (!flow->ibv_flow) { DEBUG("Flow %p cannot be applied", (void *)flow);