From patchwork Thu Oct 26 14:15:12 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Radu Nicolau X-Patchwork-Id: 30982 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 4EE321BB32; Thu, 26 Oct 2017 16:21:25 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id 41EE01B2E3 for ; Thu, 26 Oct 2017 16:21:23 +0200 (CEST) Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by orsmga102.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 26 Oct 2017 07:19:25 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.43,434,1503385200"; d="scan'208";a="167418629" Received: from silpixa00383879.ir.intel.com (HELO silpixa00383879.ger.corp.intel.com) ([10.237.223.127]) by fmsmga005.fm.intel.com with ESMTP; 26 Oct 2017 07:19:20 -0700 From: Radu Nicolau To: dev@dpdk.org Cc: david.marchand@6wind.com, declan.doherty@intel.com, pablo.de.lara.guarch@intel.com, hemant.agrawal@nxp.com, borisp@mellanox.com, aviadye@mellanox.com, thomas@monjalon.net, sandeep.malik@nxp.com, jerin.jacob@caviumnetworks.com, john.mcnamara@intel.com, konstantin.ananyev@intel.com, shahafs@mellanox.com, olivier.matz@6wind.com, akhil.goyal@nxp.com, Radu Nicolau Date: Thu, 26 Oct 2017 15:15:12 +0100 Message-Id: <1509027314-18229-2-git-send-email-radu.nicolau@intel.com> X-Mailer: git-send-email 2.7.5 In-Reply-To: <1509027314-18229-1-git-send-email-radu.nicolau@intel.com> References: <1509013365-13819-1-git-send-email-radu.nicolau@intel.com> <1509027314-18229-1-git-send-email-radu.nicolau@intel.com> Subject: [dpdk-dev] [PATCH v2 1/3] net/ixgbe: fix build issue X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Fixes: 9a0752f498d2 ("net/ixgbe: enable inline IPsec") Signed-off-by: Radu Nicolau --- drivers/net/ixgbe/Makefile | 2 ++ drivers/net/ixgbe/ixgbe_ethdev.c | 8 +++++++ drivers/net/ixgbe/ixgbe_ethdev.h | 4 ++++ drivers/net/ixgbe/ixgbe_flow.c | 6 ++++++ drivers/net/ixgbe/ixgbe_rxtx.c | 39 +++++++++++++++++++++++++--------- drivers/net/ixgbe/ixgbe_rxtx.h | 8 +++++-- drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c | 6 ++++++ 7 files changed, 61 insertions(+), 12 deletions(-) diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile index f03c426..7f575f7 100644 --- a/drivers/net/ixgbe/Makefile +++ b/drivers/net/ixgbe/Makefile @@ -124,7 +124,9 @@ ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_BYPASS),y) SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c endif +ifeq ($(CONFIG_RTE_LIBRTE_SECURITY),y) SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ipsec.c +endif SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_tm.c diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 4339347..cc27065 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -61,7 +61,9 @@ #include #include #include +#ifdef RTE_LIBRTE_SECURITY #include +#endif #include "ixgbe_logs.h" #include "base/ixgbe_api.h" @@ -1168,10 +1170,12 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) return 0; } +#ifdef RTE_LIBRTE_SECURITY /* Initialize security_ctx only for primary process*/ eth_dev->security_ctx = ixgbe_ipsec_ctx_create(eth_dev); if (eth_dev->security_ctx == NULL) return -ENOMEM; +#endif rte_eth_copy_pci_info(eth_dev, pci_dev); @@ -1406,7 +1410,9 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) /* Remove all Traffic Manager configuration */ ixgbe_tm_conf_uninit(eth_dev); +#ifdef RTE_LIBRTE_SECURITY rte_free(eth_dev->security_ctx); +#endif return 0; } @@ -3707,8 +3713,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) hw->mac.type == ixgbe_mac_X550EM_a) dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; +#ifdef RTE_LIBRTE_SECURITY dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY; dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; +#endif dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h index f5b52c4..9d336ef 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.h +++ b/drivers/net/ixgbe/ixgbe_ethdev.h @@ -38,7 +38,9 @@ #include "base/ixgbe_dcb_82599.h" #include "base/ixgbe_dcb_82598.h" #include "ixgbe_bypass.h" +#ifdef RTE_LIBRTE_SECURITY #include "ixgbe_ipsec.h" +#endif #include #include #include @@ -487,7 +489,9 @@ struct ixgbe_adapter { struct ixgbe_filter_info filter; struct ixgbe_l2_tn_info l2_tn; struct ixgbe_bw_conf bw_conf; +#ifdef RTE_LIBRTE_SECURITY struct ixgbe_ipsec ipsec; +#endif bool rx_bulk_alloc_allowed; bool rx_vec_allowed; struct rte_timecounter systime_tc; diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index 13c8243..82fc705 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -229,6 +229,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } +#ifdef RTE_LIBRTE_SECURITY /** * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY */ @@ -263,6 +264,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec, item->type == RTE_FLOW_ITEM_TYPE_IPV6); } +#endif /* the first not void item can be MAC or IPv4 */ item = next_no_void_pattern(pattern, NULL); @@ -557,9 +559,11 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev, if (ret) return ret; +#ifdef RTE_LIBRTE_SECURITY /* ESP flow not really a flow*/ if (filter->proto == IPPROTO_ESP) return 0; +#endif /* Ixgbe doesn't support tcp flags. */ if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { @@ -2801,9 +2805,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev, ret = ixgbe_parse_ntuple_filter(dev, attr, pattern, actions, &ntuple_filter, error); +#ifdef RTE_LIBRTE_SECURITY /* ESP flow not really a flow*/ if (ntuple_filter.proto == IPPROTO_ESP) return flow; +#endif if (!ret) { ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE); diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index 9f66ebf..37bb57b 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -397,7 +397,7 @@ static inline void ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, volatile struct ixgbe_adv_tx_context_desc *ctx_txd, uint64_t ol_flags, union ixgbe_tx_offload tx_offload, - union ixgbe_crypto_tx_desc_md *mdata) + __rte_unused uint64_t *mdata) { uint32_t type_tucmd_mlhl; uint32_t mss_l4len_idx = 0; @@ -481,17 +481,21 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, seqnum_seed |= tx_offload.l2_len << IXGBE_ADVTXD_TUNNEL_LEN; } +#ifdef RTE_LIBRTE_SECURITY if (ol_flags & PKT_TX_SEC_OFFLOAD) { + union ixgbe_crypto_tx_desc_md *md = + (union ixgbe_crypto_tx_desc_md *)mdata; seqnum_seed |= - (IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & mdata->sa_idx); - type_tucmd_mlhl |= mdata->enc ? + (IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & md->sa_idx); + type_tucmd_mlhl |= md->enc ? (IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP | IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0; type_tucmd_mlhl |= - (mdata->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK); + (md->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK); tx_offload_mask.sa_idx |= ~0; tx_offload_mask.sec_pad_len |= ~0; } +#endif txq->ctx_cache[ctx_idx].flags = ol_flags; txq->ctx_cache[ctx_idx].tx_offload.data[0] = @@ -670,7 +674,9 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint32_t ctx = 0; uint32_t new_ctx; union ixgbe_tx_offload tx_offload; +#ifdef RTE_LIBRTE_SECURITY uint8_t use_ipsec; +#endif tx_offload.data[0] = 0; tx_offload.data[1] = 0; @@ -698,7 +704,9 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * are needed for offload functionality. */ ol_flags = tx_pkt->ol_flags; +#ifdef RTE_LIBRTE_SECURITY use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD); +#endif /* If hardware offload required */ tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK; @@ -710,6 +718,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_offload.tso_segsz = tx_pkt->tso_segsz; tx_offload.outer_l2_len = tx_pkt->outer_l2_len; tx_offload.outer_l3_len = tx_pkt->outer_l3_len; +#ifdef RTE_LIBRTE_SECURITY if (use_ipsec) { union ixgbe_crypto_tx_desc_md *ipsec_mdata = (union ixgbe_crypto_tx_desc_md *) @@ -717,6 +726,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_offload.sa_idx = ipsec_mdata->sa_idx; tx_offload.sec_pad_len = ipsec_mdata->pad_len; } +#endif /* If new context need be built or reuse the exist ctx. */ ctx = what_advctx_update(txq, tx_ol_req, @@ -877,9 +887,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, - tx_offload, - (union ixgbe_crypto_tx_desc_md *) - &tx_pkt->udata64); + tx_offload, &tx_pkt->udata64); txe->last_id = tx_last; tx_id = txe->next_id; @@ -897,8 +905,10 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); +#ifdef RTE_LIBRTE_SECURITY if (use_ipsec) olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC; +#endif m_seg = tx_pkt; do { @@ -1473,11 +1483,13 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) pkt_flags |= PKT_RX_EIP_CKSUM_BAD; } +#ifdef RTE_LIBRTE_SECURITY if (rx_status & IXGBE_RXD_STAT_SECP) { pkt_flags |= PKT_RX_SEC_OFFLOAD; if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG) pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED; } +#endif return pkt_flags; } @@ -2397,9 +2409,10 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) { /* Use a simple Tx queue (no offloads, no multi segs) if possible */ if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) && - (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST) && - !(dev->data->dev_conf.txmode.offloads - & DEV_TX_OFFLOAD_SECURITY)) { +#ifdef RTE_LIBRTE_SECURITY + !(txq->using_ipsec) && +#endif + (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) { PMD_INIT_LOG(DEBUG, "Using simple tx code path"); dev->tx_pkt_prepare = NULL; #ifdef RTE_IXGBE_INC_VECTOR @@ -2569,8 +2582,10 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->txq_flags = tx_conf->txq_flags; txq->ops = &def_txq_ops; txq->tx_deferred_start = tx_conf->tx_deferred_start; +#ifdef RTE_LIBRTE_SECURITY txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY); +#endif /* * Modification to set VFTDT for virtual function if vf is detected @@ -4555,8 +4570,10 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; rxq->rx_using_sse = rx_using_sse; +#ifdef RTE_LIBRTE_SECURITY rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY); +#endif } } @@ -5044,6 +5061,7 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) ixgbe_setup_loopback_link_82599(hw); +#ifdef RTE_LIBRTE_SECURITY if ((dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) || (dev->data->dev_conf.txmode.offloads & @@ -5056,6 +5074,7 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) return ret; } } +#endif return 0; } diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h index 4017831..cc7c828 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.h +++ b/drivers/net/ixgbe/ixgbe_rxtx.h @@ -140,8 +140,10 @@ struct ixgbe_rx_queue { uint16_t rx_free_trigger; /**< triggers rx buffer allocation */ uint8_t rx_using_sse; /**< indicates that vector RX is in use */ +#ifdef RTE_LIBRTE_SECURITY uint8_t using_ipsec; /**< indicates that IPsec RX feature is in use */ +#endif #ifdef RTE_IXGBE_INC_VECTOR uint16_t rxrearm_nb; /**< number of remaining to be re-armed */ uint16_t rxrearm_start; /**< the idx we start the re-arming from */ @@ -185,10 +187,11 @@ union ixgbe_tx_offload { /* fields for TX offloading of tunnels */ uint64_t outer_l3_len:8; /**< Outer L3 (IP) Hdr Length. */ uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */ - +#ifdef RTE_LIBRTE_SECURITY /* inline ipsec related*/ uint64_t sa_idx:8; /**< TX SA database entry index */ uint64_t sec_pad_len:4; /**< padding length */ +#endif }; }; @@ -253,9 +256,10 @@ struct ixgbe_tx_queue { struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM]; const struct ixgbe_txq_ops *ops; /**< txq ops */ uint8_t tx_deferred_start; /**< not in global dev start. */ +#ifdef RTE_LIBRTE_SECURITY uint8_t using_ipsec; /**< indicates that IPsec TX feature is in use */ - +#endif }; struct ixgbe_txq_ops { diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c index 6fe0d85..158ccd9 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c @@ -122,6 +122,7 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq) IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); } +#ifdef RTE_LIBRTE_SECURITY static inline void desc_to_olflags_v_ipsec(__m128i descs[4], struct rte_mbuf **rx_pkts) { @@ -174,6 +175,7 @@ desc_to_olflags_v_ipsec(__m128i descs[4], struct rte_mbuf **rx_pkts) _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2); _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3); } +#endif static inline void desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags, @@ -363,7 +365,9 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, volatile union ixgbe_adv_rx_desc *rxdp; struct ixgbe_rx_entry *sw_ring; uint16_t nb_pkts_recd; +#ifdef RTE_LIBRTE_SECURITY uint8_t use_ipsec = rxq->using_ipsec; +#endif int pos; uint64_t var; __m128i shuf_msk; @@ -527,8 +531,10 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, /* set ol_flags with vlan packet type */ desc_to_olflags_v(descs, mbuf_init, vlan_flags, &rx_pkts[pos]); +#ifdef RTE_LIBRTE_SECURITY if (unlikely(use_ipsec)) desc_to_olflags_v_ipsec(descs, rx_pkts); +#endif /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);