From patchwork Sun Jun 4 23:25:05 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ivan Malov X-Patchwork-Id: 128044 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7199042C2C; Mon, 5 Jun 2023 01:27:12 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 317FD42D61; Mon, 5 Jun 2023 01:25:45 +0200 (CEST) Received: from agw.arknetworks.am (agw.arknetworks.am [79.141.165.80]) by mails.dpdk.org (Postfix) with ESMTP id 7D95542D49 for ; Mon, 5 Jun 2023 01:25:33 +0200 (CEST) Received: from localhost.localdomain (unknown [78.109.68.201]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest SHA256) (No client certificate requested) by agw.arknetworks.am (Postfix) with ESMTPSA id 0D649E12C8; Mon, 5 Jun 2023 03:25:32 +0400 (+04) From: Ivan Malov To: dev@dpdk.org Cc: Andrew Rybchenko , Ferruh Yigit , Andy Moreton Subject: [PATCH v3 16/34] net/sfc: switch driver-internal flows to use generic methods Date: Mon, 5 Jun 2023 03:25:05 +0400 Message-Id: <20230604232523.6746-17-ivan.malov@arknetworks.am> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20230604232523.6746-1-ivan.malov@arknetworks.am> References: <20230601195538.8265-1-ivan.malov@arknetworks.am> <20230604232523.6746-1-ivan.malov@arknetworks.am> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Doing so helps to consolidate flow operation and ensure that every FW-allocatable resource can be shared by several flows. That is useful in the light of upcoming support for embedded conntrack assistance, where several flows will ideally share everything but unique 5-tuple entries in the conntrack table. Signed-off-by: Ivan Malov Reviewed-by: Andy Moreton --- drivers/net/sfc/sfc_mae.c | 186 +++++++------------------------ drivers/net/sfc/sfc_mae.h | 51 ++------- drivers/net/sfc/sfc_repr_proxy.c | 38 ++----- drivers/net/sfc/sfc_repr_proxy.h | 2 +- 4 files changed, 61 insertions(+), 216 deletions(-) diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c index f7bf682c11..51b2a22357 100644 --- a/drivers/net/sfc/sfc_mae.c +++ b/drivers/net/sfc/sfc_mae.c @@ -74,137 +74,48 @@ sfc_mae_counter_registry_fini(struct sfc_mae_counter_registry *registry) sfc_mae_counters_fini(®istry->counters); } -static int -sfc_mae_internal_rule_find_empty_slot(struct sfc_adapter *sa, - struct sfc_mae_rule **rule) -{ - struct sfc_mae *mae = &sa->mae; - struct sfc_mae_internal_rules *internal_rules = &mae->internal_rules; - unsigned int entry; - int rc; - - for (entry = 0; entry < SFC_MAE_NB_RULES_MAX; entry++) { - if (internal_rules->rules[entry].spec == NULL) - break; - } - - if (entry == SFC_MAE_NB_RULES_MAX) { - rc = ENOSPC; - sfc_err(sa, "failed too many rules (%u rules used)", entry); - goto fail_too_many_rules; - } - - *rule = &internal_rules->rules[entry]; - - return 0; - -fail_too_many_rules: - return rc; -} - -int -sfc_mae_rule_add_mport_match_deliver(struct sfc_adapter *sa, - const efx_mport_sel_t *mport_match, - const efx_mport_sel_t *mport_deliver, - int prio, struct sfc_mae_rule **rulep) +struct rte_flow * +sfc_mae_repr_flow_create(struct sfc_adapter *sa, int prio, uint16_t port_id, + enum rte_flow_action_type dst_type, + enum rte_flow_item_type src_type) { + const struct rte_flow_item_ethdev item_spec = { .port_id = port_id }; + const struct rte_flow_action_ethdev action = { .port_id = port_id }; + const void *item_mask = &rte_flow_item_ethdev_mask; + struct rte_flow_attr attr = { .transfer = 1 }; + const struct rte_flow_action actions[] = { + { .type = dst_type, .conf = &action }, + { .type = RTE_FLOW_ACTION_TYPE_END } + }; + const struct rte_flow_item items[] = { + { .type = src_type, .mask = item_mask, .spec = &item_spec }, + { .type = RTE_FLOW_ITEM_TYPE_END } + }; struct sfc_mae *mae = &sa->mae; - struct sfc_mae_rule *rule; - int rc; - - sfc_log_init(sa, "entry"); + struct rte_flow_error error; if (prio > 0 && (unsigned int)prio >= mae->nb_action_rule_prios_max) { - rc = EINVAL; sfc_err(sa, "failed: invalid priority %d (max %u)", prio, mae->nb_action_rule_prios_max); - goto fail_invalid_prio; + return NULL; } if (prio < 0) prio = mae->nb_action_rule_prios_max - 1; - rc = sfc_mae_internal_rule_find_empty_slot(sa, &rule); - if (rc != 0) - goto fail_find_empty_slot; - - sfc_log_init(sa, "init MAE match spec"); - rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION, - (uint32_t)prio, &rule->spec); - if (rc != 0) { - sfc_err(sa, "failed to init MAE match spec"); - goto fail_match_init; - } - - rc = efx_mae_match_spec_mport_set(rule->spec, mport_match, NULL); - if (rc != 0) { - sfc_err(sa, "failed to get MAE match mport selector"); - goto fail_mport_set; - } - - rc = efx_mae_action_set_spec_init(sa->nic, &rule->actions); - if (rc != 0) { - sfc_err(sa, "failed to init MAE action set"); - goto fail_action_init; - } - - rc = efx_mae_action_set_populate_deliver(rule->actions, - mport_deliver); - if (rc != 0) { - sfc_err(sa, "failed to populate deliver action"); - goto fail_populate_deliver; - } - - rc = efx_mae_action_set_alloc(sa->nic, rule->actions, - &rule->action_set); - if (rc != 0) { - sfc_err(sa, "failed to allocate action set"); - goto fail_action_set_alloc; - } - - rc = efx_mae_action_rule_insert(sa->nic, rule->spec, NULL, - &rule->action_set, - &rule->rule_id); - if (rc != 0) { - sfc_err(sa, "failed to insert action rule"); - goto fail_rule_insert; - } - - *rulep = rule; - - sfc_log_init(sa, "done"); - - return 0; - -fail_rule_insert: - efx_mae_action_set_free(sa->nic, &rule->action_set); - -fail_action_set_alloc: -fail_populate_deliver: - efx_mae_action_set_spec_fini(sa->nic, rule->actions); + attr.priority = prio; -fail_action_init: -fail_mport_set: - efx_mae_match_spec_fini(sa->nic, rule->spec); - -fail_match_init: -fail_find_empty_slot: -fail_invalid_prio: - sfc_log_init(sa, "failed: %s", rte_strerror(rc)); - return rc; + return sfc_flow_create_locked(sa, true, &attr, items, actions, &error); } void -sfc_mae_rule_del(struct sfc_adapter *sa, struct sfc_mae_rule *rule) +sfc_mae_repr_flow_destroy(struct sfc_adapter *sa, struct rte_flow *flow) { - if (rule == NULL || rule->spec == NULL) - return; - - efx_mae_action_rule_remove(sa->nic, &rule->rule_id); - efx_mae_action_set_free(sa->nic, &rule->action_set); - efx_mae_action_set_spec_fini(sa->nic, rule->actions); - efx_mae_match_spec_fini(sa->nic, rule->spec); + struct rte_flow_error error; + int rc; - rule->spec = NULL; + rc = sfc_flow_destroy_locked(sa, flow, &error); + if (rc != 0) + sfc_err(sa, "failed to destroy the internal flow"); } int @@ -4311,11 +4222,9 @@ sfc_mae_flow_query(struct rte_eth_dev *dev, int sfc_mae_switchdev_init(struct sfc_adapter *sa) { - const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + struct sfc_adapter_shared *sas = sfc_sa2shared(sa); struct sfc_mae *mae = &sa->mae; - efx_mport_sel_t pf; - efx_mport_sel_t phy; - int rc; + int rc = EINVAL; sfc_log_init(sa, "entry"); @@ -4330,31 +4239,20 @@ sfc_mae_switchdev_init(struct sfc_adapter *sa) goto fail_no_mae; } - rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID, - &pf); - if (rc != 0) { - sfc_err(sa, "failed get PF mport"); - goto fail_pf_get; - } - - rc = efx_mae_mport_by_phy_port(encp->enc_assigned_port, &phy); - if (rc != 0) { - sfc_err(sa, "failed get PHY mport"); - goto fail_phy_get; - } - - rc = sfc_mae_rule_add_mport_match_deliver(sa, &pf, &phy, - SFC_MAE_RULE_PRIO_LOWEST, - &mae->switchdev_rule_pf_to_ext); - if (rc != 0) { + mae->switchdev_rule_pf_to_ext = sfc_mae_repr_flow_create(sa, + SFC_MAE_RULE_PRIO_LOWEST, sas->port_id, + RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT, + RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR); + if (mae->switchdev_rule_pf_to_ext == NULL) { sfc_err(sa, "failed add MAE rule to forward from PF to PHY"); goto fail_pf_add; } - rc = sfc_mae_rule_add_mport_match_deliver(sa, &phy, &pf, - SFC_MAE_RULE_PRIO_LOWEST, - &mae->switchdev_rule_ext_to_pf); - if (rc != 0) { + mae->switchdev_rule_ext_to_pf = sfc_mae_repr_flow_create(sa, + SFC_MAE_RULE_PRIO_LOWEST, sas->port_id, + RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR, + RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT); + if (mae->switchdev_rule_ext_to_pf == NULL) { sfc_err(sa, "failed add MAE rule to forward from PHY to PF"); goto fail_phy_add; } @@ -4364,11 +4262,9 @@ sfc_mae_switchdev_init(struct sfc_adapter *sa) return 0; fail_phy_add: - sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext); + sfc_mae_repr_flow_destroy(sa, mae->switchdev_rule_pf_to_ext); fail_pf_add: -fail_phy_get: -fail_pf_get: fail_no_mae: sfc_log_init(sa, "failed: %s", rte_strerror(rc)); return rc; @@ -4382,6 +4278,6 @@ sfc_mae_switchdev_fini(struct sfc_adapter *sa) if (!sa->switchdev) return; - sfc_mae_rule_del(sa, mae->switchdev_rule_pf_to_ext); - sfc_mae_rule_del(sa, mae->switchdev_rule_ext_to_pf); + sfc_mae_repr_flow_destroy(sa, mae->switchdev_rule_pf_to_ext); + sfc_mae_repr_flow_destroy(sa, mae->switchdev_rule_ext_to_pf); } diff --git a/drivers/net/sfc/sfc_mae.h b/drivers/net/sfc/sfc_mae.h index 307236ea11..f9434e1ab6 100644 --- a/drivers/net/sfc/sfc_mae.h +++ b/drivers/net/sfc/sfc_mae.h @@ -180,33 +180,6 @@ struct sfc_mae_counter_registry { } polling; }; -/** - * MAE rules used to capture traffic generated by VFs and direct it to - * representors (one for each VF). - */ -#define SFC_MAE_NB_REPR_RULES_MAX (64) - -/** Rules to forward traffic from PHY port to PF and from PF to PHY port */ -#define SFC_MAE_NB_SWITCHDEV_RULES (2) -/** Maximum required internal MAE rules */ -#define SFC_MAE_NB_RULES_MAX (SFC_MAE_NB_SWITCHDEV_RULES + \ - SFC_MAE_NB_REPR_RULES_MAX) - -struct sfc_mae_rule { - efx_mae_match_spec_t *spec; - efx_mae_actions_t *actions; - efx_mae_aset_id_t action_set; - efx_mae_rule_id_t rule_id; -}; - -struct sfc_mae_internal_rules { - /* - * Rules required to sustain switchdev mode or to provide - * port representor functionality. - */ - struct sfc_mae_rule rules[SFC_MAE_NB_RULES_MAX]; -}; - struct sfc_mae { /** Assigned switch domain identifier */ uint16_t switch_domain_id; @@ -234,14 +207,12 @@ struct sfc_mae { bool counter_rxq_running; /** Counter registry */ struct sfc_mae_counter_registry counter_registry; - /** Driver-internal flow rules */ - struct sfc_mae_internal_rules internal_rules; /** * Switchdev default rules. They forward traffic from PHY port * to PF and vice versa. */ - struct sfc_mae_rule *switchdev_rule_pf_to_ext; - struct sfc_mae_rule *switchdev_rule_ext_to_pf; + struct rte_flow *switchdev_rule_pf_to_ext; + struct rte_flow *switchdev_rule_ext_to_pf; }; struct sfc_adapter; @@ -396,16 +367,18 @@ sfc_flow_query_cb_t sfc_mae_flow_query; /** * Insert a driver-internal flow rule that matches traffic originating from - * some m-port selector and redirects it to another one - * (eg. PF --> PHY, PHY --> PF). + * a source port (REPRESENTED_PORT or PORT_REPRESENTOR) and directs it to + * its destination counterpart (PORT_REPRESENTOR or REPRESENTED_PORT). * - * If requested priority is negative, use the lowest priority. + * If the prio argument is negative, the lowest level will be picked. */ -int sfc_mae_rule_add_mport_match_deliver(struct sfc_adapter *sa, - const efx_mport_sel_t *mport_match, - const efx_mport_sel_t *mport_deliver, - int prio, struct sfc_mae_rule **rulep); -void sfc_mae_rule_del(struct sfc_adapter *sa, struct sfc_mae_rule *rule); +struct rte_flow *sfc_mae_repr_flow_create(struct sfc_adapter *sa, + int prio, uint16_t port_id, + enum rte_flow_action_type dst_type, + enum rte_flow_item_type src_type); + +void sfc_mae_repr_flow_destroy(struct sfc_adapter *sa, struct rte_flow *flow); + int sfc_mae_switchdev_init(struct sfc_adapter *sa); void sfc_mae_switchdev_fini(struct sfc_adapter *sa); diff --git a/drivers/net/sfc/sfc_repr_proxy.c b/drivers/net/sfc/sfc_repr_proxy.c index 74c3494c35..ff13795c97 100644 --- a/drivers/net/sfc/sfc_repr_proxy.c +++ b/drivers/net/sfc/sfc_repr_proxy.c @@ -681,47 +681,25 @@ static int sfc_repr_proxy_mae_rule_insert(struct sfc_adapter *sa, struct sfc_repr_proxy_port *port) { - struct sfc_repr_proxy *rp = &sa->repr_proxy; - efx_mport_sel_t mport_alias_selector; - efx_mport_sel_t mport_vf_selector; - struct sfc_mae_rule *mae_rule; - int rc; + int rc = EINVAL; sfc_log_init(sa, "entry"); - rc = efx_mae_mport_by_id(&port->egress_mport, - &mport_vf_selector); - if (rc != 0) { - sfc_err(sa, "failed to get VF mport for repr %u", - port->repr_id); - goto fail_get_vf; - } - - rc = efx_mae_mport_by_id(&rp->mport_alias, &mport_alias_selector); - if (rc != 0) { - sfc_err(sa, "failed to get mport selector for repr %u", - port->repr_id); - goto fail_get_alias; - } - - rc = sfc_mae_rule_add_mport_match_deliver(sa, &mport_vf_selector, - &mport_alias_selector, -1, - &mae_rule); - if (rc != 0) { + port->mae_rule = sfc_mae_repr_flow_create(sa, + SFC_MAE_RULE_PRIO_LOWEST, port->rte_port_id, + RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR, + RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT); + if (port->mae_rule == NULL) { sfc_err(sa, "failed to insert MAE rule for repr %u", port->repr_id); goto fail_rule_add; } - port->mae_rule = mae_rule; - sfc_log_init(sa, "done"); return 0; fail_rule_add: -fail_get_alias: -fail_get_vf: sfc_log_init(sa, "failed: %s", rte_strerror(rc)); return rc; } @@ -730,9 +708,7 @@ static void sfc_repr_proxy_mae_rule_remove(struct sfc_adapter *sa, struct sfc_repr_proxy_port *port) { - struct sfc_mae_rule *mae_rule = port->mae_rule; - - sfc_mae_rule_del(sa, mae_rule); + sfc_mae_repr_flow_destroy(sa, port->mae_rule); } static int diff --git a/drivers/net/sfc/sfc_repr_proxy.h b/drivers/net/sfc/sfc_repr_proxy.h index 260e2cab30..0a4dedc3e1 100644 --- a/drivers/net/sfc/sfc_repr_proxy.h +++ b/drivers/net/sfc/sfc_repr_proxy.h @@ -67,7 +67,7 @@ struct sfc_repr_proxy_port { uint32_t remote_vnic_mcdi_client_handle; struct sfc_repr_proxy_rxq rxq[SFC_REPR_RXQ_MAX]; struct sfc_repr_proxy_txq txq[SFC_REPR_TXQ_MAX]; - struct sfc_mae_rule *mae_rule; + struct rte_flow *mae_rule; bool enabled; bool started; };