From patchwork Wed Sep 8 08:37:57 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 98311 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id C5824A0C56; Wed, 8 Sep 2021 10:39:38 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id EE70141195; Wed, 8 Sep 2021 10:37:27 +0200 (CEST) Received: from smtpbguseast2.qq.com (smtpbguseast2.qq.com [54.204.34.130]) by mails.dpdk.org (Postfix) with ESMTP id DBDF641195 for ; Wed, 8 Sep 2021 10:37:25 +0200 (CEST) X-QQ-mid: bizesmtp47t1631090240t0zrze82 Received: from wxdbg.localdomain.com (unknown [183.129.236.74]) by esmtp6.qq.com (ESMTP) with id ; Wed, 08 Sep 2021 16:37:20 +0800 (CST) X-QQ-SSF: 01400000002000E0G000B00A0000000 X-QQ-FEAT: xfGhp5cbJEeWl8e5KN4RzOTx70tMhXFhpyDUCZs/rrzq7kB0oy9QUkzyq/gAA O2gI2u1RH+7jtY9tTWhvq9xfJTNOiDHi9Me86xVCAcRlEz5RjCQlTqWX3DRRcDzFX1UEAwv 68AHglpcwFEhCncsWEZslVbS+l1xo/B/yFyQ1I50on+WoN5tx/ylJqhvuXCDkHpQWC8Ltqa VjwO8jv6fBsISGtMv2FYviIxbLnFXOw/ZDSMYRcIeqxHqbjUu9UrbTCU1wEw4IIWOc36iJJ AgPufqmZhnFcJNje4iDU27PNbwTdC4olQclPJYhycpB/6QpgfQ5p9G9hUasTrIxL1hB+VBQ 8vA1LLkocUgLLQNIdGhh5Whkp+qoEThbYsFy1NP X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Wed, 8 Sep 2021 16:37:57 +0800 Message-Id: <20210908083758.312055-32-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20210908083758.312055-1-jiawenwu@trustnetic.com> References: <20210908083758.312055-1-jiawenwu@trustnetic.com> MIME-Version: 1.0 X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign5 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 31/32] net/ngbe: add security offload in Rx and Tx X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add security offload in Rx and Tx process. Signed-off-by: Jiawen Wu --- drivers/net/ngbe/ngbe_ipsec.c | 106 ++++++++++++++++++++++++++++++++++ drivers/net/ngbe/ngbe_ipsec.h | 2 + drivers/net/ngbe/ngbe_rxtx.c | 91 ++++++++++++++++++++++++++++- drivers/net/ngbe/ngbe_rxtx.h | 14 ++++- 4 files changed, 210 insertions(+), 3 deletions(-) diff --git a/drivers/net/ngbe/ngbe_ipsec.c b/drivers/net/ngbe/ngbe_ipsec.c index cc79d7d88f..54e05a834f 100644 --- a/drivers/net/ngbe/ngbe_ipsec.c +++ b/drivers/net/ngbe/ngbe_ipsec.c @@ -17,6 +17,55 @@ (a).ipv6[2] == (b).ipv6[2] && \ (a).ipv6[3] == (b).ipv6[3]) +static void +ngbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + struct ngbe_ipsec *priv = NGBE_DEV_IPSEC(dev); + int i = 0; + + /* clear Rx IP table*/ + for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) { + uint16_t index = i << 3; + uint32_t reg_val = NGBE_IPSRXIDX_WRITE | + NGBE_IPSRXIDX_TB_IP | index; + wr32(hw, NGBE_IPSRXADDR(0), 0); + wr32(hw, NGBE_IPSRXADDR(1), 0); + wr32(hw, NGBE_IPSRXADDR(2), 0); + wr32(hw, NGBE_IPSRXADDR(3), 0); + wr32w(hw, NGBE_IPSRXIDX, reg_val, NGBE_IPSRXIDX_WRITE, 1000); + } + + /* clear Rx SPI and Rx/Tx SA tables*/ + for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) { + uint32_t index = i << 3; + uint32_t reg_val = NGBE_IPSRXIDX_WRITE | + NGBE_IPSRXIDX_TB_SPI | index; + wr32(hw, NGBE_IPSRXSPI, 0); + wr32(hw, NGBE_IPSRXADDRIDX, 0); + wr32w(hw, NGBE_IPSRXIDX, reg_val, NGBE_IPSRXIDX_WRITE, 1000); + reg_val = NGBE_IPSRXIDX_WRITE | NGBE_IPSRXIDX_TB_KEY | index; + wr32(hw, NGBE_IPSRXKEY(0), 0); + wr32(hw, NGBE_IPSRXKEY(1), 0); + wr32(hw, NGBE_IPSRXKEY(2), 0); + wr32(hw, NGBE_IPSRXKEY(3), 0); + wr32(hw, NGBE_IPSRXSALT, 0); + wr32(hw, NGBE_IPSRXMODE, 0); + wr32w(hw, NGBE_IPSRXIDX, reg_val, NGBE_IPSRXIDX_WRITE, 1000); + reg_val = NGBE_IPSTXIDX_WRITE | index; + wr32(hw, NGBE_IPSTXKEY(0), 0); + wr32(hw, NGBE_IPSTXKEY(1), 0); + wr32(hw, NGBE_IPSTXKEY(2), 0); + wr32(hw, NGBE_IPSTXKEY(3), 0); + wr32(hw, NGBE_IPSTXSALT, 0); + wr32w(hw, NGBE_IPSTXIDX, reg_val, NGBE_IPSTXIDX_WRITE, 1000); + } + + memset(priv->rx_ip_tbl, 0, sizeof(priv->rx_ip_tbl)); + memset(priv->rx_sa_tbl, 0, sizeof(priv->rx_sa_tbl)); + memset(priv->tx_sa_tbl, 0, sizeof(priv->tx_sa_tbl)); +} + static int ngbe_crypto_add_sa(struct ngbe_crypto_session *ic_session) { @@ -550,6 +599,63 @@ ngbe_crypto_capabilities_get(void *device __rte_unused) return ngbe_security_capabilities; } +int +ngbe_crypto_enable_ipsec(struct rte_eth_dev *dev) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + uint32_t reg; + uint64_t rx_offloads; + uint64_t tx_offloads; + + rx_offloads = dev->data->dev_conf.rxmode.offloads; + tx_offloads = dev->data->dev_conf.txmode.offloads; + + /* sanity checks */ + if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) { + PMD_DRV_LOG(ERR, "RSC and IPsec not supported"); + return -1; + } + if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) { + PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec"); + return -1; + } + + /* Set NGBE_SECTXBUFFAF to 0x14 as required in the datasheet*/ + wr32(hw, NGBE_SECTXBUFAF, 0x14); + + /* IFG needs to be set to 3 when we are using security. Otherwise a Tx + * hang will occur with heavy traffic. + */ + reg = rd32(hw, NGBE_SECTXIFG); + reg = (reg & ~NGBE_SECTXIFG_MIN_MASK) | NGBE_SECTXIFG_MIN(0x3); + wr32(hw, NGBE_SECTXIFG, reg); + + reg = rd32(hw, NGBE_SECRXCTL); + reg |= NGBE_SECRXCTL_CRCSTRIP; + wr32(hw, NGBE_SECRXCTL, reg); + + if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) { + wr32m(hw, NGBE_SECRXCTL, NGBE_SECRXCTL_ODSA, 0); + reg = rd32m(hw, NGBE_SECRXCTL, NGBE_SECRXCTL_ODSA); + if (reg != 0) { + PMD_DRV_LOG(ERR, "Error enabling Rx Crypto"); + return -1; + } + } + if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) { + wr32(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_STFWD); + reg = rd32(hw, NGBE_SECTXCTL); + if (reg != NGBE_SECTXCTL_STFWD) { + PMD_DRV_LOG(ERR, "Error enabling Rx Crypto"); + return -1; + } + } + + ngbe_crypto_clear_ipsec_tables(dev); + + return 0; +} + static struct rte_security_ops ngbe_security_ops = { .session_create = ngbe_crypto_create_session, .session_get_size = ngbe_crypto_session_get_size, diff --git a/drivers/net/ngbe/ngbe_ipsec.h b/drivers/net/ngbe/ngbe_ipsec.h index fa5f21027b..13273d91d8 100644 --- a/drivers/net/ngbe/ngbe_ipsec.h +++ b/drivers/net/ngbe/ngbe_ipsec.h @@ -90,4 +90,6 @@ struct ngbe_ipsec { struct ngbe_crypto_tx_sa_table tx_sa_tbl[IPSEC_MAX_SA_COUNT]; }; +int ngbe_crypto_enable_ipsec(struct rte_eth_dev *dev); + #endif /*NGBE_IPSEC_H_*/ diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c index bee4f04616..04c8ec4e88 100644 --- a/drivers/net/ngbe/ngbe_rxtx.c +++ b/drivers/net/ngbe/ngbe_rxtx.c @@ -33,6 +33,9 @@ static const u64 NGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM | PKT_TX_TCP_SEG | PKT_TX_TUNNEL_MASK | PKT_TX_OUTER_IP_CKSUM | +#ifdef RTE_LIB_SECURITY + PKT_TX_SEC_OFFLOAD | +#endif NGBE_TX_IEEE1588_TMST); #define NGBE_TX_OFFLOAD_NOTSUP_MASK \ @@ -274,7 +277,8 @@ ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, static inline void ngbe_set_xmit_ctx(struct ngbe_tx_queue *txq, volatile struct ngbe_tx_ctx_desc *ctx_txd, - uint64_t ol_flags, union ngbe_tx_offload tx_offload) + uint64_t ol_flags, union ngbe_tx_offload tx_offload, + __rte_unused uint64_t *mdata) { union ngbe_tx_offload tx_offload_mask; uint32_t type_tucmd_mlhl; @@ -361,6 +365,19 @@ ngbe_set_xmit_ctx(struct ngbe_tx_queue *txq, vlan_macip_lens |= NGBE_TXD_VLAN(tx_offload.vlan_tci); } +#ifdef RTE_LIB_SECURITY + if (ol_flags & PKT_TX_SEC_OFFLOAD) { + union ngbe_crypto_tx_desc_md *md = + (union ngbe_crypto_tx_desc_md *)mdata; + tunnel_seed |= NGBE_TXD_IPSEC_SAIDX(md->sa_idx); + type_tucmd_mlhl |= md->enc ? + (NGBE_TXD_IPSEC_ESP | NGBE_TXD_IPSEC_ESPENC) : 0; + type_tucmd_mlhl |= NGBE_TXD_IPSEC_ESPLEN(md->pad_len); + tx_offload_mask.sa_idx |= ~0; + tx_offload_mask.sec_pad_len |= ~0; + } +#endif + txq->ctx_cache[ctx_idx].flags = ol_flags; txq->ctx_cache[ctx_idx].tx_offload.data[0] = tx_offload_mask.data[0] & tx_offload.data[0]; @@ -592,6 +609,9 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint32_t ctx = 0; uint32_t new_ctx; union ngbe_tx_offload tx_offload; +#ifdef RTE_LIB_SECURITY + uint8_t use_ipsec; +#endif tx_offload.data[0] = 0; tx_offload.data[1] = 0; @@ -618,6 +638,9 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * are needed for offload functionality. */ ol_flags = tx_pkt->ol_flags; +#ifdef RTE_LIB_SECURITY + use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD); +#endif /* If hardware offload required */ tx_ol_req = ol_flags & NGBE_TX_OFFLOAD_MASK; @@ -633,6 +656,16 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_offload.outer_l3_len = tx_pkt->outer_l3_len; tx_offload.outer_tun_len = 0; +#ifdef RTE_LIB_SECURITY + if (use_ipsec) { + union ngbe_crypto_tx_desc_md *ipsec_mdata = + (union ngbe_crypto_tx_desc_md *) + rte_security_dynfield(tx_pkt); + tx_offload.sa_idx = ipsec_mdata->sa_idx; + tx_offload.sec_pad_len = ipsec_mdata->pad_len; + } +#endif + /* If new context need be built or reuse the exist ctx*/ ctx = what_ctx_update(txq, tx_ol_req, tx_offload); /* Only allocate context descriptor if required */ @@ -776,7 +809,8 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } ngbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, - tx_offload); + tx_offload, + rte_security_dynfield(tx_pkt)); txe->last_id = tx_last; tx_id = txe->next_id; @@ -795,6 +829,10 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } olinfo_status |= NGBE_TXD_PAYLEN(pkt_len); +#ifdef RTE_LIB_SECURITY + if (use_ipsec) + olinfo_status |= NGBE_TXD_IPSEC; +#endif m_seg = tx_pkt; do { @@ -978,6 +1016,13 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) pkt_flags |= PKT_RX_OUTER_IP_CKSUM_BAD; } +#ifdef RTE_LIB_SECURITY + if (rx_status & NGBE_RXD_STAT_SECP) { + pkt_flags |= PKT_RX_SEC_OFFLOAD; + if (rx_status & NGBE_RXD_ERR_SECERR) + pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED; + } +#endif return pkt_flags; } @@ -1800,6 +1845,9 @@ ngbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt) { struct ngbe_tx_queue *txq = (struct ngbe_tx_queue *)tx_queue; if (txq->offloads == 0 && +#ifdef RTE_LIB_SECURITY + !(txq->using_ipsec) && +#endif txq->tx_free_thresh >= RTE_PMD_NGBE_TX_MAX_BURST) return ngbe_tx_done_cleanup_simple(txq, free_cnt); @@ -1885,6 +1933,9 @@ ngbe_set_tx_function(struct rte_eth_dev *dev, struct ngbe_tx_queue *txq) { /* Use a simple Tx queue (no offloads, no multi segs) if possible */ if (txq->offloads == 0 && +#ifdef RTE_LIB_SECURITY + !(txq->using_ipsec) && +#endif txq->tx_free_thresh >= RTE_PMD_NGBE_TX_MAX_BURST) { PMD_INIT_LOG(DEBUG, "Using simple tx code path"); dev->tx_pkt_burst = ngbe_xmit_pkts_simple; @@ -1926,6 +1977,10 @@ ngbe_get_tx_port_offloads(struct rte_eth_dev *dev) if (hw->is_pf) tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT; +#ifdef RTE_LIB_SECURITY + if (dev->security_ctx) + tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; +#endif return tx_offload_capa; } @@ -2012,6 +2067,10 @@ ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->offloads = offloads; txq->ops = &def_txq_ops; txq->tx_deferred_start = tx_conf->tx_deferred_start; +#ifdef RTE_LIB_SECURITY + txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_SECURITY); +#endif txq->tdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXWP(txq->reg_idx)); txq->tdc_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXCFG(txq->reg_idx)); @@ -2220,6 +2279,11 @@ ngbe_get_rx_port_offloads(struct rte_eth_dev *dev) offloads |= (DEV_RX_OFFLOAD_QINQ_STRIP | DEV_RX_OFFLOAD_VLAN_EXTEND); +#ifdef RTE_LIB_SECURITY + if (dev->security_ctx) + offloads |= DEV_RX_OFFLOAD_SECURITY; +#endif + return offloads; } @@ -2745,6 +2809,7 @@ ngbe_dev_mq_rx_configure(struct rte_eth_dev *dev) void ngbe_set_rx_function(struct rte_eth_dev *dev) { + uint16_t i; struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); if (dev->data->scattered_rx) { @@ -2788,6 +2853,15 @@ ngbe_set_rx_function(struct rte_eth_dev *dev) dev->rx_pkt_burst = ngbe_recv_pkts; } + +#ifdef RTE_LIB_SECURITY + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct ngbe_rx_queue *rxq = dev->data->rx_queues[i]; + + rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_SECURITY); + } +#endif } /* @@ -3052,6 +3126,19 @@ ngbe_dev_rxtx_start(struct rte_eth_dev *dev) if (hw->is_pf && dev->data->dev_conf.lpbk_mode) ngbe_setup_loopback_link(hw); +#ifdef RTE_LIB_SECURITY + if ((dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) || + (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY)) { + ret = ngbe_crypto_enable_ipsec(dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, + "ngbe_crypto_enable_ipsec fails with %d.", + ret); + return ret; + } + } +#endif + return 0; } diff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h index d63b25c1aa..67c1260f6f 100644 --- a/drivers/net/ngbe/ngbe_rxtx.h +++ b/drivers/net/ngbe/ngbe_rxtx.h @@ -261,7 +261,10 @@ struct ngbe_rx_queue { uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */ uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */ uint16_t rx_free_trigger; /**< triggers rx buffer allocation */ - +#ifdef RTE_LIB_SECURITY + uint8_t using_ipsec; + /** indicates that IPsec Rx feature is in use */ +#endif uint16_t rx_free_thresh; /**< max free Rx desc to hold */ uint16_t queue_id; /**< RX queue index */ uint16_t reg_idx; /**< RX queue register index */ @@ -305,6 +308,11 @@ union ngbe_tx_offload { uint64_t outer_tun_len:8; /**< Outer TUN (Tunnel) Hdr Length. */ uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */ uint64_t outer_l3_len:16; /**< Outer L3 (IP) Hdr Length. */ +#ifdef RTE_LIB_SECURITY + /* inline ipsec related*/ + uint64_t sa_idx:8; /**< TX SA database entry index */ + uint64_t sec_pad_len:4; /**< padding length */ +#endif }; }; @@ -355,6 +363,10 @@ struct ngbe_tx_queue { uint8_t tx_deferred_start; /**< not in global dev start */ const struct ngbe_txq_ops *ops; /**< txq ops */ +#ifdef RTE_LIB_SECURITY + uint8_t using_ipsec; + /**< indicates that IPsec TX feature is in use */ +#endif }; struct ngbe_txq_ops {