From patchwork Tue Oct 19 09:23:38 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Radu Nicolau X-Patchwork-Id: 102153 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 30972A0C43; Tue, 19 Oct 2021 11:36:36 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0AA3C410E7; Tue, 19 Oct 2021 11:36:32 +0200 (CEST) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by mails.dpdk.org (Postfix) with ESMTP id 610D24003E for ; Tue, 19 Oct 2021 11:36:30 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10141"; a="314669755" X-IronPort-AV: E=Sophos;i="5.85,384,1624345200"; d="scan'208";a="314669755" Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 Oct 2021 02:36:29 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.85,384,1624345200"; d="scan'208";a="661739806" Received: from silpixa00400884.ir.intel.com ([10.243.22.82]) by orsmga005.jf.intel.com with ESMTP; 19 Oct 2021 02:36:27 -0700 From: Radu Nicolau To: Jingjing Wu , Beilei Xing Cc: dev@dpdk.org, declan.doherty@intel.com, abhijit.sinha@intel.com, qi.z.zhang@intel.com, bruce.richardson@intel.com, konstantin.ananyev@intel.com, Radu Nicolau Date: Tue, 19 Oct 2021 10:23:38 +0100 Message-Id: <20211019092344.1299368-2-radu.nicolau@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20211019092344.1299368-1-radu.nicolau@intel.com> References: <20210909142428.750634-1-radu.nicolau@intel.com> <20211019092344.1299368-1-radu.nicolau@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v10 1/7] common/iavf: add iAVF IPsec inline crypto support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support for inline crypto for IPsec. Signed-off-by: Declan Doherty Signed-off-by: Abhijit Sinha Signed-off-by: Radu Nicolau --- drivers/common/iavf/iavf_type.h | 1 + drivers/common/iavf/virtchnl.h | 17 +- drivers/common/iavf/virtchnl_inline_ipsec.h | 553 ++++++++++++++++++++ 3 files changed, 569 insertions(+), 2 deletions(-) create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h diff --git a/drivers/common/iavf/iavf_type.h b/drivers/common/iavf/iavf_type.h index 73dfb47e70..51267ca3b3 100644 --- a/drivers/common/iavf/iavf_type.h +++ b/drivers/common/iavf/iavf_type.h @@ -723,6 +723,7 @@ enum iavf_tx_desc_dtype_value { IAVF_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */ IAVF_TX_DESC_DTYPE_CONTEXT = 0x1, IAVF_TX_DESC_DTYPE_FCOE_CTX = 0x2, + IAVF_TX_DESC_DTYPE_IPSEC = 0x3, IAVF_TX_DESC_DTYPE_FILTER_PROG = 0x8, IAVF_TX_DESC_DTYPE_DDP_CTX = 0x9, IAVF_TX_DESC_DTYPE_FLEX_DATA = 0xB, diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h index 067f715945..269578f7c0 100644 --- a/drivers/common/iavf/virtchnl.h +++ b/drivers/common/iavf/virtchnl.h @@ -38,6 +38,8 @@ * value in current and future projects */ +#include "virtchnl_inline_ipsec.h" + /* Error Codes */ enum virtchnl_status_code { VIRTCHNL_STATUS_SUCCESS = 0, @@ -133,7 +135,8 @@ enum virtchnl_ops { VIRTCHNL_OP_DISABLE_CHANNELS = 31, VIRTCHNL_OP_ADD_CLOUD_FILTER = 32, VIRTCHNL_OP_DEL_CLOUD_FILTER = 33, - /* opcodes 34, 35, 36, and 37 are reserved */ + VIRTCHNL_OP_INLINE_IPSEC_CRYPTO = 34, + /* opcodes 35 and 36 are reserved */ VIRTCHNL_OP_DCF_CONFIG_BW = 37, VIRTCHNL_OP_DCF_VLAN_OFFLOAD = 38, VIRTCHNL_OP_DCF_CMD_DESC = 39, @@ -225,6 +228,8 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode) return "VIRTCHNL_OP_ADD_CLOUD_FILTER"; case VIRTCHNL_OP_DEL_CLOUD_FILTER: return "VIRTCHNL_OP_DEL_CLOUD_FILTER"; + case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO: + return "VIRTCHNL_OP_INLINE_IPSEC_CRYPTO"; case VIRTCHNL_OP_DCF_CMD_DESC: return "VIRTCHNL_OP_DCF_CMD_DESC"; case VIRTCHNL_OP_DCF_CMD_BUFF: @@ -385,7 +390,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6) /* used to negotiate communicating link speeds in Mbps */ #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7) - /* BIT(8) is reserved */ +#define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO BIT(8) #define VIRTCHNL_VF_LARGE_NUM_QPAIRS BIT(9) #define VIRTCHNL_VF_OFFLOAD_CRC BIT(10) #define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15) @@ -2291,6 +2296,14 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, sizeof(struct virtchnl_queue_vector); } break; + + case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO: + { + struct inline_ipsec_msg *iim = (struct inline_ipsec_msg *)msg; + valid_len = + virtchnl_inline_ipsec_val_msg_len(iim->ipsec_opcode); + break; + } /* These are always errors coming from the VF. */ case VIRTCHNL_OP_EVENT: case VIRTCHNL_OP_UNKNOWN: diff --git a/drivers/common/iavf/virtchnl_inline_ipsec.h b/drivers/common/iavf/virtchnl_inline_ipsec.h new file mode 100644 index 0000000000..1e9134501e --- /dev/null +++ b/drivers/common/iavf/virtchnl_inline_ipsec.h @@ -0,0 +1,553 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2021 Intel Corporation + */ + +#ifndef _VIRTCHNL_INLINE_IPSEC_H_ +#define _VIRTCHNL_INLINE_IPSEC_H_ + +#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM 3 +#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM 16 +#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM 128 +#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER 2 +#define VIRTCHNL_IPSEC_MAX_KEY_LEN 128 +#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM 8 +#define VIRTCHNL_IPSEC_SA_DESTROY 0 +#define VIRTCHNL_IPSEC_BROADCAST_VFID 0xFFFFFFFF +#define VIRTCHNL_IPSEC_INVALID_REQ_ID 0xFFFF +#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP 0xFFFFFFFF +#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP 0xFFFFFFFF + +/* crypto type */ +#define VIRTCHNL_AUTH 1 +#define VIRTCHNL_CIPHER 2 +#define VIRTCHNL_AEAD 3 + +/* caps enabled */ +#define VIRTCHNL_IPSEC_ESN_ENA BIT(0) +#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA BIT(1) +#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA BIT(2) +#define VIRTCHNL_IPSEC_AUDIT_ENA BIT(3) +#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA BIT(4) +#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA BIT(5) +#define VIRTCHNL_IPSEC_ARW_CHECK_ENA BIT(6) +#define VIRTCHNL_IPSEC_24BIT_SPI_ENA BIT(7) + +/* algorithm type */ +/* Hash Algorithm */ +#define VIRTCHNL_HASH_NO_ALG 0 /* NULL algorithm */ +#define VIRTCHNL_AES_CBC_MAC 1 /* AES-CBC-MAC algorithm */ +#define VIRTCHNL_AES_CMAC 2 /* AES CMAC algorithm */ +#define VIRTCHNL_AES_GMAC 3 /* AES GMAC algorithm */ +#define VIRTCHNL_AES_XCBC_MAC 4 /* AES XCBC algorithm */ +#define VIRTCHNL_MD5_HMAC 5 /* HMAC using MD5 algorithm */ +#define VIRTCHNL_SHA1_HMAC 6 /* HMAC using 128 bit SHA algorithm */ +#define VIRTCHNL_SHA224_HMAC 7 /* HMAC using 224 bit SHA algorithm */ +#define VIRTCHNL_SHA256_HMAC 8 /* HMAC using 256 bit SHA algorithm */ +#define VIRTCHNL_SHA384_HMAC 9 /* HMAC using 384 bit SHA algorithm */ +#define VIRTCHNL_SHA512_HMAC 10 /* HMAC using 512 bit SHA algorithm */ +#define VIRTCHNL_SHA3_224_HMAC 11 /* HMAC using 224 bit SHA3 algorithm */ +#define VIRTCHNL_SHA3_256_HMAC 12 /* HMAC using 256 bit SHA3 algorithm */ +#define VIRTCHNL_SHA3_384_HMAC 13 /* HMAC using 384 bit SHA3 algorithm */ +#define VIRTCHNL_SHA3_512_HMAC 14 /* HMAC using 512 bit SHA3 algorithm */ +/* Cipher Algorithm */ +#define VIRTCHNL_CIPHER_NO_ALG 15 /* NULL algorithm */ +#define VIRTCHNL_3DES_CBC 16 /* Triple DES algorithm in CBC mode */ +#define VIRTCHNL_AES_CBC 17 /* AES algorithm in CBC mode */ +#define VIRTCHNL_AES_CTR 18 /* AES algorithm in Counter mode */ +/* AEAD Algorithm */ +#define VIRTCHNL_AES_CCM 19 /* AES algorithm in CCM mode */ +#define VIRTCHNL_AES_GCM 20 /* AES algorithm in GCM mode */ +#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */ + +/* protocol type */ +#define VIRTCHNL_PROTO_ESP 1 +#define VIRTCHNL_PROTO_AH 2 +#define VIRTCHNL_PROTO_RSVD1 3 + +/* sa mode */ +#define VIRTCHNL_SA_MODE_TRANSPORT 1 +#define VIRTCHNL_SA_MODE_TUNNEL 2 +#define VIRTCHNL_SA_MODE_TRAN_TUN 3 +#define VIRTCHNL_SA_MODE_UNKNOWN 4 + +/* sa direction */ +#define VIRTCHNL_DIR_INGRESS 1 +#define VIRTCHNL_DIR_EGRESS 2 +#define VIRTCHNL_DIR_INGRESS_EGRESS 3 + +/* sa termination */ +#define VIRTCHNL_TERM_SOFTWARE 1 +#define VIRTCHNL_TERM_HARDWARE 2 + +/* sa ip type */ +#define VIRTCHNL_IPV4 1 +#define VIRTCHNL_IPV6 2 + +/* for virtchnl_ipsec_resp */ +enum inline_ipsec_resp { + INLINE_IPSEC_SUCCESS = 0, + INLINE_IPSEC_FAIL = -1, + INLINE_IPSEC_ERR_FIFO_FULL = -2, + INLINE_IPSEC_ERR_NOT_READY = -3, + INLINE_IPSEC_ERR_VF_DOWN = -4, + INLINE_IPSEC_ERR_INVALID_PARAMS = -5, + INLINE_IPSEC_ERR_NO_MEM = -6, +}; + +/* Detailed opcodes for DPDK and IPsec use */ +enum inline_ipsec_ops { + INLINE_IPSEC_OP_GET_CAP = 0, + INLINE_IPSEC_OP_GET_STATUS = 1, + INLINE_IPSEC_OP_SA_CREATE = 2, + INLINE_IPSEC_OP_SA_UPDATE = 3, + INLINE_IPSEC_OP_SA_DESTROY = 4, + INLINE_IPSEC_OP_SP_CREATE = 5, + INLINE_IPSEC_OP_SP_DESTROY = 6, + INLINE_IPSEC_OP_SA_READ = 7, + INLINE_IPSEC_OP_EVENT = 8, + INLINE_IPSEC_OP_RESP = 9, +}; + +/* Not all valid, if certain field is invalid, set 1 for all bits */ +struct virtchnl_algo_cap { + u32 algo_type; + + u16 block_size; + + u16 min_key_size; + u16 max_key_size; + u16 inc_key_size; + + u16 min_iv_size; + u16 max_iv_size; + u16 inc_iv_size; + + u16 min_digest_size; + u16 max_digest_size; + u16 inc_digest_size; + + u16 min_aad_size; + u16 max_aad_size; + u16 inc_aad_size; +} __rte_packed; + +/* vf record the capability of crypto from the virtchnl */ +struct virtchnl_sym_crypto_cap { + u8 crypto_type; + u8 algo_cap_num; + struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM]; +} __rte_packed; + +/* VIRTCHNL_OP_GET_IPSEC_CAP + * VF pass virtchnl_ipsec_cap to PF + * and PF return capability of ipsec from virtchnl. + */ +struct virtchnl_ipsec_cap { + /* max number of SA per VF */ + u16 max_sa_num; + + /* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */ + u8 virtchnl_protocol_type; + + /* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */ + u8 virtchnl_sa_mode; + + /* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */ + u8 virtchnl_direction; + + /* termination mode - value ref VIRTCHNL_TERM_XXX */ + u8 termination_mode; + + /* number of supported crypto capability */ + u8 crypto_cap_num; + + /* descriptor ID */ + u16 desc_id; + + /* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */ + u32 caps_enabled; + + /* crypto capabilities */ + struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM]; +} __rte_packed; + +/* configuration of crypto function */ +struct virtchnl_ipsec_crypto_cfg_item { + u8 crypto_type; + + u32 algo_type; + + /* Length of valid IV data. */ + u16 iv_len; + + /* Length of digest */ + u16 digest_len; + + /* SA salt */ + u32 salt; + + /* The length of the symmetric key */ + u16 key_len; + + /* key data buffer */ + u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN]; +} __rte_packed; + +struct virtchnl_ipsec_sym_crypto_cfg { + struct virtchnl_ipsec_crypto_cfg_item + items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER]; +}; + +/* VIRTCHNL_OP_IPSEC_SA_CREATE + * VF send this SA configuration to PF using virtchnl; + * PF create SA as configuration and PF driver will return + * an unique index (sa_idx) for the created SA. + */ +struct virtchnl_ipsec_sa_cfg { + /* IPsec SA Protocol - AH/ESP */ + u8 virtchnl_protocol_type; + + /* termination mode - value ref VIRTCHNL_TERM_XXX */ + u8 virtchnl_termination; + + /* type of outer IP - IPv4/IPv6 */ + u8 virtchnl_ip_type; + + /* type of esn - !0:enable/0:disable */ + u8 esn_enabled; + + /* udp encap - !0:enable/0:disable */ + u8 udp_encap_enabled; + + /* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */ + u8 virtchnl_direction; + + /* reserved */ + u8 reserved1; + + /* SA security parameter index */ + u32 spi; + + /* outer src ip address */ + u8 src_addr[16]; + + /* outer dst ip address */ + u8 dst_addr[16]; + + /* SPD reference. Used to link an SA with its policy. + * PF drivers may ignore this field. + */ + u16 spd_ref; + + /* high 32 bits of esn */ + u32 esn_hi; + + /* low 32 bits of esn */ + u32 esn_low; + + /* When enabled, sa_index must be valid */ + u8 sa_index_en; + + /* SA index when sa_index_en is true */ + u32 sa_index; + + /* auditing mode - enable/disable */ + u8 audit_en; + + /* lifetime byte limit - enable/disable + * When enabled, byte_limit_hard and byte_limit_soft + * must be valid. + */ + u8 byte_limit_en; + + /* hard byte limit count */ + u64 byte_limit_hard; + + /* soft byte limit count */ + u64 byte_limit_soft; + + /* drop on authentication failure - enable/disable */ + u8 drop_on_auth_fail_en; + + /* anti-reply window check - enable/disable + * When enabled, arw_size must be valid. + */ + u8 arw_check_en; + + /* size of arw window, offset by 1. Setting to 0 + * represents ARW window size of 1. Setting to 127 + * represents ARW window size of 128 + */ + u8 arw_size; + + /* no ip offload mode - enable/disable + * When enabled, ip type and address must not be valid. + */ + u8 no_ip_offload_en; + + /* SA Domain. Used to logical separate an SADB into groups. + * PF drivers supporting a single group ignore this field. + */ + u16 sa_domain; + + /* crypto configuration */ + struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg; +} __rte_packed; + +/* VIRTCHNL_OP_IPSEC_SA_UPDATE + * VF send configuration of index of SA to PF + * PF will update SA according to configuration + */ +struct virtchnl_ipsec_sa_update { + u32 sa_index; /* SA to update */ + u32 esn_hi; /* high 32 bits of esn */ + u32 esn_low; /* low 32 bits of esn */ +} __rte_packed; + +/* VIRTCHNL_OP_IPSEC_SA_DESTROY + * VF send configuration of index of SA to PF + * PF will destroy SA according to configuration + * flag bitmap indicate all SA or just selected SA will + * be destroyed + */ +struct virtchnl_ipsec_sa_destroy { + /* All zero bitmap indicates all SA will be destroyed. + * Non-zero bitmap indicates the selected SA in + * array sa_index will be destroyed. + */ + u8 flag; + + /* selected SA index */ + u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM]; +} __rte_packed; + +/* VIRTCHNL_OP_IPSEC_SA_READ + * VF send this SA configuration to PF using virtchnl; + * PF read SA and will return configuration for the created SA. + */ +struct virtchnl_ipsec_sa_read { + /* SA valid - invalid/valid */ + u8 valid; + + /* SA active - inactive/active */ + u8 active; + + /* SA SN rollover - not_rollover/rollover */ + u8 sn_rollover; + + /* IPsec SA Protocol - AH/ESP */ + u8 virtchnl_protocol_type; + + /* termination mode - value ref VIRTCHNL_TERM_XXX */ + u8 virtchnl_termination; + + /* auditing mode - enable/disable */ + u8 audit_en; + + /* lifetime byte limit - enable/disable + * When set to limit, byte_limit_hard and byte_limit_soft + * must be valid. + */ + u8 byte_limit_en; + + /* hard byte limit count */ + u64 byte_limit_hard; + + /* soft byte limit count */ + u64 byte_limit_soft; + + /* drop on authentication failure - enable/disable */ + u8 drop_on_auth_fail_en; + + /* anti-replay window check - enable/disable + * When set to check, arw_size, arw_top, and arw must be valid + */ + u8 arw_check_en; + + /* size of arw window, offset by 1. Setting to 0 + * represents ARW window size of 1. Setting to 127 + * represents ARW window size of 128 + */ + u8 arw_size; + + /* reserved */ + u8 reserved1; + + /* top of anti-replay-window */ + u64 arw_top; + + /* anti-replay-window */ + u8 arw[16]; + + /* packets processed */ + u64 packets_processed; + + /* bytes processed */ + u64 bytes_processed; + + /* packets dropped */ + u32 packets_dropped; + + /* authentication failures */ + u32 auth_fails; + + /* ARW check failures */ + u32 arw_fails; + + /* type of esn - enable/disable */ + u8 esn; + + /* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */ + u8 virtchnl_direction; + + /* SA security parameter index */ + u32 spi; + + /* SA salt */ + u32 salt; + + /* high 32 bits of esn */ + u32 esn_hi; + + /* low 32 bits of esn */ + u32 esn_low; + + /* SA Domain. Used to logical separate an SADB into groups. + * PF drivers supporting a single group ignore this field. + */ + u16 sa_domain; + + /* SPD reference. Used to link an SA with its policy. + * PF drivers may ignore this field. + */ + u16 spd_ref; + + /* crypto configuration. Salt and keys are set to 0 */ + struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg; +} __rte_packed; + + +#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4 (0) +#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6 (1) + +/* Add allowlist entry in IES */ +struct virtchnl_ipsec_sp_cfg { + u32 spi; + u32 dip[4]; + + /* Drop frame if true or redirect to QAT if false. */ + u8 drop; + + /* Congestion domain. For future use. */ + u8 cgd; + + /* 0 for IPv4 table, 1 for IPv6 table. */ + u8 table_id; + + /* Set TC (congestion domain) if true. For future use. */ + u8 set_tc; +} __rte_packed; + + +/* Delete allowlist entry in IES */ +struct virtchnl_ipsec_sp_destroy { + /* 0 for IPv4 table, 1 for IPv6 table. */ + u8 table_id; + u32 rule_id; +} __rte_packed; + +/* Response from IES to allowlist operations */ +struct virtchnl_ipsec_sp_cfg_resp { + u32 rule_id; +}; + +struct virtchnl_ipsec_sa_cfg_resp { + u32 sa_handle; +}; + +#define INLINE_IPSEC_EVENT_RESET 0x1 +#define INLINE_IPSEC_EVENT_CRYPTO_ON 0x2 +#define INLINE_IPSEC_EVENT_CRYPTO_OFF 0x4 + +struct virtchnl_ipsec_event { + u32 ipsec_event_data; +}; + +#define INLINE_IPSEC_STATUS_AVAILABLE 0x1 +#define INLINE_IPSEC_STATUS_UNAVAILABLE 0x2 + +struct virtchnl_ipsec_status { + u32 status; +}; + +struct virtchnl_ipsec_resp { + u32 resp; +}; + +/* Internal message descriptor for VF <-> IPsec communication */ +struct inline_ipsec_msg { + u16 ipsec_opcode; + u16 req_id; + + union { + /* IPsec request */ + struct virtchnl_ipsec_sa_cfg sa_cfg[0]; + struct virtchnl_ipsec_sp_cfg sp_cfg[0]; + struct virtchnl_ipsec_sa_update sa_update[0]; + struct virtchnl_ipsec_sa_destroy sa_destroy[0]; + struct virtchnl_ipsec_sp_destroy sp_destroy[0]; + + /* IPsec response */ + struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0]; + struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0]; + struct virtchnl_ipsec_cap ipsec_cap[0]; + struct virtchnl_ipsec_status ipsec_status[0]; + /* response to del_sa, del_sp, update_sa */ + struct virtchnl_ipsec_resp ipsec_resp[0]; + + /* IPsec event (no req_id is required) */ + struct virtchnl_ipsec_event event[0]; + + /* Reserved */ + struct virtchnl_ipsec_sa_read sa_read[0]; + } ipsec_data; +} __rte_packed; + +static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode) +{ + u16 valid_len = sizeof(struct inline_ipsec_msg); + + switch (opcode) { + case INLINE_IPSEC_OP_GET_CAP: + case INLINE_IPSEC_OP_GET_STATUS: + break; + case INLINE_IPSEC_OP_SA_CREATE: + valid_len += sizeof(struct virtchnl_ipsec_sa_cfg); + break; + case INLINE_IPSEC_OP_SP_CREATE: + valid_len += sizeof(struct virtchnl_ipsec_sp_cfg); + break; + case INLINE_IPSEC_OP_SA_UPDATE: + valid_len += sizeof(struct virtchnl_ipsec_sa_update); + break; + case INLINE_IPSEC_OP_SA_DESTROY: + valid_len += sizeof(struct virtchnl_ipsec_sa_destroy); + break; + case INLINE_IPSEC_OP_SP_DESTROY: + valid_len += sizeof(struct virtchnl_ipsec_sp_destroy); + break; + /* Only for msg length calculation of response to VF in case of + * inline ipsec failure. + */ + case INLINE_IPSEC_OP_RESP: + valid_len += sizeof(struct virtchnl_ipsec_resp); + break; + default: + valid_len = 0; + break; + } + + return valid_len; +} + +#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */ From patchwork Tue Oct 19 09:23:39 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Radu Nicolau X-Patchwork-Id: 102154 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E5590A0C43; Tue, 19 Oct 2021 11:36:43 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 488A5410DF; Tue, 19 Oct 2021 11:36:35 +0200 (CEST) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by mails.dpdk.org (Postfix) with ESMTP id 33AC1410F8 for ; Tue, 19 Oct 2021 11:36:32 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10141"; a="314669758" X-IronPort-AV: E=Sophos;i="5.85,384,1624345200"; d="scan'208";a="314669758" Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 Oct 2021 02:36:32 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.85,384,1624345200"; d="scan'208";a="661739839" Received: from silpixa00400884.ir.intel.com ([10.243.22.82]) by orsmga005.jf.intel.com with ESMTP; 19 Oct 2021 02:36:30 -0700 From: Radu Nicolau To: Jingjing Wu , Beilei Xing , Bruce Richardson , Konstantin Ananyev Cc: dev@dpdk.org, declan.doherty@intel.com, abhijit.sinha@intel.com, qi.z.zhang@intel.com, Radu Nicolau Date: Tue, 19 Oct 2021 10:23:39 +0100 Message-Id: <20211019092344.1299368-3-radu.nicolau@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20211019092344.1299368-1-radu.nicolau@intel.com> References: <20210909142428.750634-1-radu.nicolau@intel.com> <20211019092344.1299368-1-radu.nicolau@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v10 2/7] net/iavf: rework tx path X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Rework the TX path and TX descriptor usage in order to allow for better use of oflload flags and to facilitate enabling of inline crypto offload feature. Signed-off-by: Declan Doherty Signed-off-by: Abhijit Sinha Signed-off-by: Radu Nicolau Acked-by: Jingjing Wu --- drivers/net/iavf/iavf_rxtx.c | 538 ++++++++++++++++----------- drivers/net/iavf/iavf_rxtx.h | 117 +++++- drivers/net/iavf/iavf_rxtx_vec_sse.c | 10 +- 3 files changed, 431 insertions(+), 234 deletions(-) diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index 88bbd40c10..11b7fea36f 100644 --- a/drivers/net/iavf/iavf_rxtx.c +++ b/drivers/net/iavf/iavf_rxtx.c @@ -1054,27 +1054,31 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp) static inline void iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb, - volatile union iavf_rx_flex_desc *rxdp, - uint8_t rx_flags) + volatile union iavf_rx_flex_desc *rxdp) { - uint16_t vlan_tci = 0; - - if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 && - rte_le_to_cpu_64(rxdp->wb.status_error0) & - (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) - vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1); + if (rte_le_to_cpu_64(rxdp->wb.status_error0) & + (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) { + mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + mb->vlan_tci = + rte_le_to_cpu_16(rxdp->wb.l2tag1); + } else { + mb->vlan_tci = 0; + } #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC - if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 && - rte_le_to_cpu_16(rxdp->wb.status_error1) & - (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) - vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd); -#endif - - if (vlan_tci) { - mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; - mb->vlan_tci = vlan_tci; + if (rte_le_to_cpu_16(rxdp->wb.status_error1) & + (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) { + mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ | + PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN; + mb->vlan_tci_outer = mb->vlan_tci; + mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd); + PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u", + rte_le_to_cpu_16(rxdp->wb.l2tag2_1st), + rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd)); + } else { + mb->vlan_tci_outer = 0; } +#endif } /* Translate the rx descriptor status and error fields to pkt flags */ @@ -1394,7 +1398,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue, rxm->ol_flags = 0; rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M & rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; - iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags); + iavf_flex_rxd_to_vlan_tci(rxm, &rxd); rxq->rxd_to_pkt_fields(rxq, rxm, &rxd); pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0); rxm->ol_flags |= pkt_flags; @@ -1536,7 +1540,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts, first_seg->ol_flags = 0; first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M & rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; - iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags); + iavf_flex_rxd_to_vlan_tci(first_seg, &rxd); rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd); pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0); @@ -1774,7 +1778,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq) mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M & rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)]; - iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags); + iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]); rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]); stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0); pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0); @@ -2068,190 +2072,302 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq) return 0; } -/* Check if the context descriptor is needed for TX offloading */ + + +static inline void +iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m) +{ + uint64_t cmd = 0; + + /* TSO enabled */ + if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) + cmd = IAVF_TX_CTX_DESC_TSO << IAVF_TXD_DATA_QW1_CMD_SHIFT; + + /* Time Sync - Currently not supported */ + + /* Outer L2 TAG 2 Insertion - Currently not supported */ + /* Inner L2 TAG 2 Insertion - Currently not supported */ + + *field |= cmd; +} + +static inline void +iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0, + const struct rte_mbuf *m) +{ + uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE; + uint64_t eip_len = 0; + uint64_t eip_noinc = 0; + /* Default - IP_ID is increment in each segment of LSO */ + + switch (m->ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6 | + PKT_TX_OUTER_IP_CKSUM)) { + case PKT_TX_OUTER_IPV4: + eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD; + eip_len = m->outer_l3_len >> 2; + break; + case PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM: + eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD; + eip_len = m->outer_l3_len >> 2; + break; + case PKT_TX_OUTER_IPV6: + eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6; + eip_len = m->outer_l3_len >> 2; + break; + } + + *qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT | + eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT | + eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT; +} + static inline uint16_t -iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag) +iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field, + struct rte_mbuf *m) { - if (flags & PKT_TX_TCP_SEG) - return 1; - if (flags & PKT_TX_VLAN_PKT && - vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) - return 1; - return 0; + uint64_t segmentation_field = 0; + uint64_t total_length = 0; + + total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len); + + if (m->ol_flags & PKT_TX_TUNNEL_MASK) + total_length -= m->outer_l3_len; + +#ifdef RTE_LIBRTE_IAVF_DEBUG_TX + if (!m->l4_len || !m->tso_segsz) + PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d", + m->l4_len, m->tso_segsz); + if (m->tso_segsz < 88) + PMD_TX_LOG(DEBUG, "LSO Segment size %d is less than minimum %d", + m->tso_segsz, 88); +#endif + segmentation_field = + (((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) & + IAVF_TXD_CTX_QW1_TSO_LEN_MASK) | + (((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) & + IAVF_TXD_CTX_QW1_MSS_MASK); + + *field |= segmentation_field; + + return total_length; } + +struct iavf_tx_context_desc_qws { + __le64 qw0; + __le64 qw1; +}; + static inline void -iavf_txd_enable_checksum(uint64_t ol_flags, - uint32_t *td_cmd, - uint32_t *td_offset, - union iavf_tx_offload tx_offload) +iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc, + struct rte_mbuf *m, uint16_t *tlen) { + volatile struct iavf_tx_context_desc_qws *desc_qws = + (volatile struct iavf_tx_context_desc_qws *)desc; + /* fill descriptor type field */ + desc_qws->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT; + + /* fill command field */ + iavf_fill_ctx_desc_cmd_field(&desc_qws->qw1, m); + + /* fill segmentation field */ + if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) { + *tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1, + m); + } + + /* fill tunnelling field */ + if (m->ol_flags & PKT_TX_TUNNEL_MASK) + iavf_fill_ctx_desc_tunnelling_field(&desc_qws->qw0, m); + else + desc_qws->qw0 = 0; + + desc_qws->qw0 = rte_cpu_to_le_64(desc_qws->qw0); + desc_qws->qw1 = rte_cpu_to_le_64(desc_qws->qw1); +} + + +static inline void +iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1, + struct rte_mbuf *m) +{ + uint64_t command = 0; + uint64_t offset = 0; + uint64_t l2tag1 = 0; + + *qw1 = IAVF_TX_DESC_DTYPE_DATA; + + command = (uint64_t)IAVF_TX_DESC_CMD_ICRC; + + /* Descriptor based VLAN insertion */ + if (m->ol_flags & PKT_TX_VLAN_PKT) { + command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1; + l2tag1 |= m->vlan_tci; + } + /* Set MACLEN */ - *td_offset |= (tx_offload.l2_len >> 1) << - IAVF_TX_DESC_LENGTH_MACLEN_SHIFT; - - /* Enable L3 checksum offloads */ - if (ol_flags & PKT_TX_IP_CKSUM) { - *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM; - *td_offset |= (tx_offload.l3_len >> 2) << - IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; - } else if (ol_flags & PKT_TX_IPV4) { - *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4; - *td_offset |= (tx_offload.l3_len >> 2) << - IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; - } else if (ol_flags & PKT_TX_IPV6) { - *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6; - *td_offset |= (tx_offload.l3_len >> 2) << - IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; - } - - if (ol_flags & PKT_TX_TCP_SEG) { - *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= (tx_offload.l4_len >> 2) << + offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT; + + /* Enable L3 checksum offloading inner */ + if (m->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) { + command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM; + offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; + } else if (m->ol_flags & PKT_TX_IPV4) { + command |= IAVF_TX_DESC_CMD_IIPT_IPV4; + offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; + } else if (m->ol_flags & PKT_TX_IPV6) { + command |= IAVF_TX_DESC_CMD_IIPT_IPV6; + offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; + } + + if (m->ol_flags & PKT_TX_TCP_SEG) { + command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP; + offset |= (m->l4_len >> 2) << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; - return; } /* Enable L4 checksum offloads */ - switch (ol_flags & PKT_TX_L4_MASK) { + switch (m->ol_flags & PKT_TX_L4_MASK) { case PKT_TX_TCP_CKSUM: - *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << - IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP; + offset |= (sizeof(struct rte_tcp_hdr) >> 2) << + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case PKT_TX_SCTP_CKSUM: - *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP; - *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << - IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + command |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP; + offset |= (sizeof(struct rte_sctp_hdr) >> 2) << + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case PKT_TX_UDP_CKSUM: - *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP; - *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << - IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; - break; - default: + command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP; + offset |= (sizeof(struct rte_udp_hdr) >> 2) << + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; } + + *qw1 = rte_cpu_to_le_64((((uint64_t)command << + IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) | + (((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) & + IAVF_TXD_DATA_QW1_OFFSET_MASK) | + ((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)); } -/* set TSO context descriptor - * support IP -> L4 and IP -> IP -> L4 - */ -static inline uint64_t -iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload) +static inline void +iavf_fill_data_desc_buffer_sz_field(volatile uint64_t *field, uint16_t value) { - uint64_t ctx_desc = 0; - uint32_t cd_cmd, hdr_len, cd_tso_len; - - if (!tx_offload.l4_len) { - PMD_TX_LOG(DEBUG, "L4 length set to 0"); - return ctx_desc; + *field |= (((uint64_t)value << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) & + IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK); } - hdr_len = tx_offload.l2_len + - tx_offload.l3_len + - tx_offload.l4_len; +static inline void +iavf_fill_data_desc(volatile struct iavf_tx_desc *desc, + struct rte_mbuf *m, uint64_t desc_template, + uint16_t tlen, uint16_t ipseclen) +{ + uint32_t hdrlen = m->l2_len; + uint32_t bufsz = 0; - cd_cmd = IAVF_TX_CTX_DESC_TSO; - cd_tso_len = mbuf->pkt_len - hdr_len; - ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) | - ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) | - ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT); + /* fill data descriptor qw1 from template */ + desc->cmd_type_offset_bsz = desc_template; - return ctx_desc; -} + /* set data buffer address */ + desc->buffer_addr = rte_mbuf_data_iova(m); -/* Construct the tx flags */ -static inline uint64_t -iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size, - uint32_t td_tag) -{ - return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA | - ((uint64_t)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) | - ((uint64_t)td_offset << - IAVF_TXD_QW1_OFFSET_SHIFT) | - ((uint64_t)size << - IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) | - ((uint64_t)td_tag << - IAVF_TXD_QW1_L2TAG1_SHIFT)); + /* calculate data buffer size less set header lengths */ + if ((m->ol_flags & PKT_TX_TUNNEL_MASK) && + (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))) { + hdrlen += m->outer_l3_len; + if (m->ol_flags & PKT_TX_L4_MASK) + hdrlen += m->l3_len + m->l4_len; + else + hdrlen += m->l3_len; + if (m->ol_flags & PKT_TX_SEC_OFFLOAD) + hdrlen += ipseclen; + bufsz = hdrlen + tlen; + } else { + bufsz = m->data_len; + } + + /* set data buffer size */ + desc->cmd_type_offset_bsz |= + (((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) & + IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK); + + desc->buffer_addr = rte_cpu_to_le_64(desc->buffer_addr); + desc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc->cmd_type_offset_bsz); } + /* TX function */ uint16_t iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - volatile struct iavf_tx_desc *txd; - volatile struct iavf_tx_desc *txr; - struct iavf_tx_queue *txq; - struct iavf_tx_entry *sw_ring; + struct iavf_tx_queue *txq = tx_queue; + volatile struct iavf_tx_desc *txr = txq->tx_ring; + struct iavf_tx_entry *txe_ring = txq->sw_ring; struct iavf_tx_entry *txe, *txn; - struct rte_mbuf *tx_pkt; - struct rte_mbuf *m_seg; - uint16_t tx_id; - uint16_t nb_tx; - uint32_t td_cmd; - uint32_t td_offset; - uint32_t td_tag; - uint64_t ol_flags; - uint16_t nb_used; - uint16_t nb_ctx; - uint16_t tx_last; - uint16_t slen; - uint64_t buf_dma_addr; - uint16_t cd_l2tag2 = 0; - union iavf_tx_offload tx_offload = {0}; - - txq = tx_queue; - sw_ring = txq->sw_ring; - txr = txq->tx_ring; - tx_id = txq->tx_tail; - txe = &sw_ring[tx_id]; + struct rte_mbuf *mb, *mb_seg; + uint16_t desc_idx, desc_idx_last; + uint16_t idx; + /* Check if the descriptor ring needs to be cleaned. */ if (txq->nb_free < txq->free_thresh) - (void)iavf_xmit_cleanup(txq); + iavf_xmit_cleanup(txq); + + desc_idx = txq->tx_tail; + txe = &txe_ring[desc_idx]; + +#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING + iavf_dump_tx_entry_ring(txq); + iavf_dump_tx_desc_ring(txq); +#endif + - for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { - td_cmd = 0; - td_tag = 0; - td_offset = 0; + for (idx = 0; idx < nb_pkts; idx++) { + volatile struct iavf_tx_desc *ddesc; + uint16_t nb_desc_ctx; + uint16_t nb_desc_data, nb_desc_required; + uint16_t tlen = 0, ipseclen = 0; + uint64_t ddesc_template = 0; + uint64_t ddesc_cmd = 0; + + mb = tx_pkts[idx]; - tx_pkt = *tx_pkts++; RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf); - ol_flags = tx_pkt->ol_flags; - tx_offload.l2_len = tx_pkt->l2_len; - tx_offload.l3_len = tx_pkt->l3_len; - tx_offload.l4_len = tx_pkt->l4_len; - tx_offload.tso_segsz = tx_pkt->tso_segsz; - /* Calculate the number of context descriptors needed. */ - nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag); + nb_desc_data = mb->nb_segs; + nb_desc_ctx = !!(mb->ol_flags & + (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK)); - /* The number of descriptors that must be allocated for + /** + * The number of descriptors that must be allocated for * a packet equals to the number of the segments of that - * packet plus 1 context descriptor if needed. + * packet plus the context and ipsec descriptors if needed. */ - nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx); - tx_last = (uint16_t)(tx_id + nb_used - 1); + nb_desc_required = nb_desc_data + nb_desc_ctx; + + desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1); - /* Circular ring */ - if (tx_last >= txq->nb_tx_desc) - tx_last = (uint16_t)(tx_last - txq->nb_tx_desc); + /* wrap descriptor ring */ + if (desc_idx_last >= txq->nb_tx_desc) + desc_idx_last = + (uint16_t)(desc_idx_last - txq->nb_tx_desc); - PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u" - " tx_first=%u tx_last=%u", - txq->port_id, txq->queue_id, tx_id, tx_last); + PMD_TX_LOG(DEBUG, + "port_id=%u queue_id=%u tx_first=%u tx_last=%u", + txq->port_id, txq->queue_id, desc_idx, desc_idx_last); - if (nb_used > txq->nb_free) { + if (nb_desc_required > txq->nb_free) { if (iavf_xmit_cleanup(txq)) { - if (nb_tx == 0) + if (idx == 0) return 0; goto end_of_tx; } - if (unlikely(nb_used > txq->rs_thresh)) { - while (nb_used > txq->nb_free) { + if (unlikely(nb_desc_required > txq->rs_thresh)) { + while (nb_desc_required > txq->nb_free) { if (iavf_xmit_cleanup(txq)) { - if (nb_tx == 0) + if (idx == 0) return 0; goto end_of_tx; } @@ -2259,122 +2375,94 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) } } - /* Descriptor based VLAN insertion */ - if (ol_flags & PKT_TX_VLAN_PKT && - txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) { - td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1; - td_tag = tx_pkt->vlan_tci; - } - - /* According to datasheet, the bit2 is reserved and must be - * set to 1. - */ - td_cmd |= 0x04; - - /* Enable checksum offloading */ - if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK) - iavf_txd_enable_checksum(ol_flags, &td_cmd, - &td_offset, tx_offload); + iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb); - if (nb_ctx) { /* Setup TX context descriptor if required */ - uint64_t cd_type_cmd_tso_mss = - IAVF_TX_DESC_DTYPE_CONTEXT; - volatile struct iavf_tx_context_desc *ctx_txd = + if (nb_desc_ctx) { + volatile struct iavf_tx_context_desc *ctx_desc = (volatile struct iavf_tx_context_desc *) - &txr[tx_id]; + &txr[desc_idx]; /* clear QW0 or the previous writeback value * may impact next write */ - *(volatile uint64_t *)ctx_txd = 0; + *(volatile uint64_t *)ctx_desc = 0; - txn = &sw_ring[txe->next_id]; + txn = &txe_ring[txe->next_id]; RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); + if (txe->mbuf) { rte_pktmbuf_free_seg(txe->mbuf); txe->mbuf = NULL; } - /* TSO enabled */ - if (ol_flags & PKT_TX_TCP_SEG) - cd_type_cmd_tso_mss |= - iavf_set_tso_ctx(tx_pkt, tx_offload); + iavf_fill_context_desc(ctx_desc, mb, &tlen); + IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx); - if (ol_flags & PKT_TX_VLAN_PKT && - txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) { - cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2 - << IAVF_TXD_CTX_QW1_CMD_SHIFT; - cd_l2tag2 = tx_pkt->vlan_tci; + txe->last_id = desc_idx_last; + desc_idx = txe->next_id; + txe = txn; } - ctx_txd->type_cmd_tso_mss = - rte_cpu_to_le_64(cd_type_cmd_tso_mss); - ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2); - IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id); - txe->last_id = tx_last; - tx_id = txe->next_id; - txe = txn; - } - m_seg = tx_pkt; + mb_seg = mb; + do { - txd = &txr[tx_id]; - txn = &sw_ring[txe->next_id]; + ddesc = (volatile struct iavf_tx_desc *) + &txr[desc_idx]; + + txn = &txe_ring[txe->next_id]; + RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); if (txe->mbuf) rte_pktmbuf_free_seg(txe->mbuf); - txe->mbuf = m_seg; - - /* Setup TX Descriptor */ - slen = m_seg->data_len; - buf_dma_addr = rte_mbuf_data_iova(m_seg); - txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr); - txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd, - td_offset, - slen, - td_tag); - - IAVF_DUMP_TX_DESC(txq, txd, tx_id); - txe->last_id = tx_last; - tx_id = txe->next_id; + + txe->mbuf = mb_seg; + iavf_fill_data_desc(ddesc, mb_seg, + ddesc_template, tlen, ipseclen); + + IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx); + + txe->last_id = desc_idx_last; + desc_idx = txe->next_id; txe = txn; - m_seg = m_seg->next; - } while (m_seg); + mb_seg = mb_seg->next; + } while (mb_seg); /* The last packet data descriptor needs End Of Packet (EOP) */ - td_cmd |= IAVF_TX_DESC_CMD_EOP; - txq->nb_used = (uint16_t)(txq->nb_used + nb_used); - txq->nb_free = (uint16_t)(txq->nb_free - nb_used); + ddesc_cmd = IAVF_TX_DESC_CMD_EOP; + + txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required); + txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required); if (txq->nb_used >= txq->rs_thresh) { PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id=" "%4u (port=%d queue=%d)", - tx_last, txq->port_id, txq->queue_id); + desc_idx_last, txq->port_id, txq->queue_id); - td_cmd |= IAVF_TX_DESC_CMD_RS; + ddesc_cmd |= IAVF_TX_DESC_CMD_RS; /* Update txq RS bit counters */ txq->nb_used = 0; } - txd->cmd_type_offset_bsz |= - rte_cpu_to_le_64(((uint64_t)td_cmd) << - IAVF_TXD_QW1_CMD_SHIFT); - IAVF_DUMP_TX_DESC(txq, txd, tx_id); + ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd << + IAVF_TXD_DATA_QW1_CMD_SHIFT); + + IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1); } end_of_tx: rte_wmb(); PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", - txq->port_id, txq->queue_id, tx_id, nb_tx); + txq->port_id, txq->queue_id, desc_idx, idx); - IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id); - txq->tx_tail = tx_id; + IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx); + txq->tx_tail = desc_idx; - return nb_tx; + return idx; } /* Check if the packet with vlan user priority is transmitted in the diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h index f4ae2fd6e1..d05a525ef9 100644 --- a/drivers/net/iavf/iavf_rxtx.h +++ b/drivers/net/iavf/iavf_rxtx.h @@ -405,6 +405,112 @@ enum iavf_rx_flex_desc_status_error_1_bits { IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */ }; + +#define IAVF_TXD_DATA_QW1_DTYPE_SHIFT (0) +#define IAVF_TXD_DATA_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT) + +#define IAVF_TXD_DATA_QW1_CMD_SHIFT (4) +#define IAVF_TXD_DATA_QW1_CMD_MASK (0x3FFUL << IAVF_TXD_DATA_QW1_CMD_SHIFT) + +#define IAVF_TXD_DATA_QW1_OFFSET_SHIFT (16) +#define IAVF_TXD_DATA_QW1_OFFSET_MASK (0x3FFFFULL << \ + IAVF_TXD_DATA_QW1_OFFSET_SHIFT) + +#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT (IAVF_TXD_DATA_QW1_OFFSET_SHIFT) +#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_MASK \ + (0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT) + +#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT \ + (IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT) +#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_MASK \ + (0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT) + +#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT \ + (IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT) +#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_MASK \ + (0xFUL << IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT) + +#define IAVF_TXD_DATA_QW1_MACLEN_MASK \ + (0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT) +#define IAVF_TXD_DATA_QW1_IPLEN_MASK \ + (0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT) +#define IAVF_TXD_DATA_QW1_L4LEN_MASK \ + (0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT) +#define IAVF_TXD_DATA_QW1_FCLEN_MASK \ + (0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT) + +#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT (34) +#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK \ + (0x3FFFULL << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) + +#define IAVF_TXD_DATA_QW1_L2TAG1_SHIFT (48) +#define IAVF_TXD_DATA_QW1_L2TAG1_MASK \ + (0xFFFFULL << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT) + +#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT (11) +#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_MASK \ + (0x7UL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT) + +#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT (14) +#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_MASK \ + (0xFUL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT) + +#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT (30) +#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_MASK \ + (0x3FFFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT) + +#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_SHIFT (30) +#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_MASK \ + (0x3FUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT) + +#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT (50) +#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_MASK \ + (0x3FFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT) + +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT (0) +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_MASK (0x3UL) + +enum iavf_tx_ctx_desc_tunnel_external_ip_type { + IAVF_TX_CTX_DESC_EIPT_NONE, + IAVF_TX_CTX_DESC_EIPT_IPV6, + IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD, + IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD +}; + +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT (2) +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_MASK (0x7FUL) + +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_SHIFT (9) +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_MASK (0x3UL) + +enum iavf_tx_ctx_desc_tunnel_l4_tunnel_type { + IAVF_TX_CTX_DESC_L4_TUN_TYP_NO_UDP_GRE, + IAVF_TX_CTX_DESC_L4_TUN_TYP_UDP, + IAVF_TX_CTX_DESC_L4_TUN_TYP_GRE +}; + +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT (11) +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_MASK (0x1UL) + +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_SHIFT (12) +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_MASK (0x7FUL) + +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_SHIFT (19) +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_MASK (0xFUL) + +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_SHIFT (23) +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_MASK (0x1UL) + +#define IAVF_TXD_CTX_QW0_L2TAG2_PARAM (32) +#define IAVF_TXD_CTX_QW0_L2TAG2_MASK (0xFFFFUL) + + +#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK (0xFFFFF) + +/* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */ +#define IAVF_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */ + + /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */ #define IAVF_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */ @@ -555,9 +661,10 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq, const volatile struct iavf_tx_desc *tx_desc = desc; enum iavf_tx_desc_dtype_value type; - type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64( - tx_desc->cmd_type_offset_bsz & - rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)); + + type = (enum iavf_tx_desc_dtype_value) + rte_le_to_cpu_64(tx_desc->cmd_type_offset_bsz & + rte_cpu_to_le_64(IAVF_TXD_DATA_QW1_DTYPE_MASK)); switch (type) { case IAVF_TX_DESC_DTYPE_DATA: name = "Tx_data_desc"; @@ -571,8 +678,8 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq, } printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n", - txq->queue_id, name, tx_id, tx_desc->buffer_addr, - tx_desc->cmd_type_offset_bsz); + txq->queue_id, name, tx_id, tx_desc->buffer_addr, + tx_desc->cmd_type_offset_bsz); } #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \ diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c index edb54991e2..2c3bb0b05f 100644 --- a/drivers/net/iavf/iavf_rxtx_vec_sse.c +++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c @@ -363,10 +363,12 @@ static inline void flex_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts, const uint32_t *type_table) { - const __m128i ptype_mask = _mm_set_epi16(0, IAVF_RX_FLEX_DESC_PTYPE_M, - 0, IAVF_RX_FLEX_DESC_PTYPE_M, - 0, IAVF_RX_FLEX_DESC_PTYPE_M, - 0, IAVF_RX_FLEX_DESC_PTYPE_M); + const __m128i ptype_mask = _mm_set_epi16( + IAVF_RX_FLEX_DESC_PTYPE_M, 0x0, + IAVF_RX_FLEX_DESC_PTYPE_M, 0x0, + IAVF_RX_FLEX_DESC_PTYPE_M, 0x0, + IAVF_RX_FLEX_DESC_PTYPE_M, 0x0); + __m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]); __m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]); __m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23); From patchwork Tue Oct 19 09:23:40 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Radu Nicolau X-Patchwork-Id: 102155 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D81BAA0C43; Tue, 19 Oct 2021 11:36:49 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 54629410FC; Tue, 19 Oct 2021 11:36:38 +0200 (CEST) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by mails.dpdk.org (Postfix) with ESMTP id 19D3B410FC for ; Tue, 19 Oct 2021 11:36:35 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10141"; a="314669762" X-IronPort-AV: E=Sophos;i="5.85,384,1624345200"; d="scan'208";a="314669762" Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 Oct 2021 02:36:35 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.85,384,1624345200"; d="scan'208";a="661739863" Received: from silpixa00400884.ir.intel.com ([10.243.22.82]) by orsmga005.jf.intel.com with ESMTP; 19 Oct 2021 02:36:32 -0700 From: Radu Nicolau To: Jingjing Wu , Beilei Xing Cc: dev@dpdk.org, declan.doherty@intel.com, abhijit.sinha@intel.com, qi.z.zhang@intel.com, bruce.richardson@intel.com, konstantin.ananyev@intel.com, Radu Nicolau Date: Tue, 19 Oct 2021 10:23:40 +0100 Message-Id: <20211019092344.1299368-4-radu.nicolau@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20211019092344.1299368-1-radu.nicolau@intel.com> References: <20210909142428.750634-1-radu.nicolau@intel.com> <20211019092344.1299368-1-radu.nicolau@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v10 3/7] net/iavf: add support for asynchronous virt channel messages X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support for asynchronous virtual channel messages, specifically for inline IPsec messages. Signed-off-by: Declan Doherty Signed-off-by: Abhijit Sinha Signed-off-by: Radu Nicolau Acked-by: Jingjing Wu --- drivers/net/iavf/iavf.h | 16 ++++ drivers/net/iavf/iavf_vchnl.c | 138 +++++++++++++++++++++------------- 2 files changed, 101 insertions(+), 53 deletions(-) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 34bfa9af47..67051f29a8 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -193,6 +193,7 @@ struct iavf_info { uint64_t supported_rxdid; uint8_t *proto_xtr; /* proto xtr type for all queues */ volatile enum virtchnl_ops pend_cmd; /* pending command not finished */ + rte_atomic32_t pend_cmd_count; int cmd_retval; /* return value of the cmd response from PF */ uint8_t *aq_resp; /* buffer to store the adminq response from PF */ @@ -345,9 +346,24 @@ _atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops) if (!ret) PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd); + rte_atomic32_set(&vf->pend_cmd_count, 1); + return !ret; } +/* Check there is pending cmd in execution. If none, set new command. */ +static inline int +_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops) +{ + int ret = rte_atomic32_cmpset(&vf->pend_cmd, VIRTCHNL_OP_UNKNOWN, ops); + + if (!ret) + PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd); + + rte_atomic32_set(&vf->pend_cmd_count, 2); + + return !ret; +} int iavf_check_api_version(struct iavf_adapter *adapter); int iavf_get_vf_resource(struct iavf_adapter *adapter); void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev); diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index 0f4dd21d44..da4654957a 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -24,8 +24,8 @@ #include "iavf.h" #include "iavf_rxtx.h" -#define MAX_TRY_TIMES 200 -#define ASQ_DELAY_MS 10 +#define MAX_TRY_TIMES 2000 +#define ASQ_DELAY_MS 1 static uint32_t iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed) @@ -143,7 +143,8 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, } static int -iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args) +iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args, + int async) { struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); @@ -155,8 +156,14 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args) if (vf->vf_reset) return -EIO; - if (_atomic_set_cmd(vf, args->ops)) - return -1; + + if (async) { + if (_atomic_set_async_response_cmd(vf, args->ops)) + return -1; + } else { + if (_atomic_set_cmd(vf, args->ops)) + return -1; + } ret = iavf_aq_send_msg_to_pf(hw, args->ops, IAVF_SUCCESS, args->in_args, args->in_args_size, NULL); @@ -252,9 +259,11 @@ static void iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg, uint16_t msglen) { + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = &adapter->vf; struct virtchnl_pf_event *pf_msg = (struct virtchnl_pf_event *)msg; - struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); if (msglen < sizeof(struct virtchnl_pf_event)) { PMD_DRV_LOG(DEBUG, "Error event"); @@ -330,18 +339,40 @@ iavf_handle_virtchnl_msg(struct rte_eth_dev *dev) case iavf_aqc_opc_send_msg_to_vf: if (msg_opc == VIRTCHNL_OP_EVENT) { iavf_handle_pf_event_msg(dev, info.msg_buf, - info.msg_len); + info.msg_len); } else { + /* check for inline IPsec events */ + struct inline_ipsec_msg *imsg = + (struct inline_ipsec_msg *)info.msg_buf; + struct rte_eth_event_ipsec_desc desc; + if (msg_opc == VIRTCHNL_OP_INLINE_IPSEC_CRYPTO + && imsg->ipsec_opcode == + INLINE_IPSEC_OP_EVENT) { + struct virtchnl_ipsec_event *ev = + imsg->ipsec_data.event; + desc.subtype = + RTE_ETH_EVENT_IPSEC_UNKNOWN; + desc.metadata = ev->ipsec_event_data; + rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_IPSEC, + &desc); + return; + } + /* read message and it's expected one */ - if (msg_opc == vf->pend_cmd) - _notify_cmd(vf, msg_ret); - else - PMD_DRV_LOG(ERR, "command mismatch," - "expect %u, get %u", - vf->pend_cmd, msg_opc); + if (msg_opc == vf->pend_cmd) { + rte_atomic32_dec(&vf->pend_cmd_count); + if (rte_atomic32_read( + &vf->pend_cmd_count) == 0) + _notify_cmd(vf, msg_ret); + } else { + PMD_DRV_LOG(ERR, + "command mismatch, expect %u, get %u", + vf->pend_cmd, msg_opc); + } PMD_DRV_LOG(DEBUG, - "adminq response is received," - " opcode = %d", msg_opc); + "adminq response is received, opcode = %d", + msg_opc); } break; default: @@ -365,7 +396,7 @@ iavf_enable_vlan_strip(struct iavf_adapter *adapter) args.in_args_size = 0; args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - ret = iavf_execute_vf_cmd(adapter, &args); + ret = iavf_execute_vf_cmd(adapter, &args, 0); if (ret) PMD_DRV_LOG(ERR, "Failed to execute command of" " OP_ENABLE_VLAN_STRIPPING"); @@ -386,7 +417,7 @@ iavf_disable_vlan_strip(struct iavf_adapter *adapter) args.in_args_size = 0; args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - ret = iavf_execute_vf_cmd(adapter, &args); + ret = iavf_execute_vf_cmd(adapter, &args, 0); if (ret) PMD_DRV_LOG(ERR, "Failed to execute command of" " OP_DISABLE_VLAN_STRIPPING"); @@ -415,7 +446,7 @@ iavf_check_api_version(struct iavf_adapter *adapter) args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) { PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION"); return err; @@ -468,12 +499,13 @@ iavf_get_vf_resource(struct iavf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_CRC | VIRTCHNL_VF_OFFLOAD_VLAN_V2 | VIRTCHNL_VF_LARGE_NUM_QPAIRS | - VIRTCHNL_VF_OFFLOAD_QOS; + VIRTCHNL_VF_OFFLOAD_QOS | ++ VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO; args.in_args = (uint8_t *)∩︀ args.in_args_size = sizeof(caps); - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) { PMD_DRV_LOG(ERR, @@ -518,7 +550,7 @@ iavf_get_supported_rxdid(struct iavf_adapter *adapter) args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - ret = iavf_execute_vf_cmd(adapter, &args); + ret = iavf_execute_vf_cmd(adapter, &args, 0); if (ret) { PMD_DRV_LOG(ERR, "Failed to execute command of OP_GET_SUPPORTED_RXDIDS"); @@ -562,7 +594,7 @@ iavf_config_vlan_strip_v2(struct iavf_adapter *adapter, bool enable) args.in_args_size = sizeof(vlan_strip); args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - ret = iavf_execute_vf_cmd(adapter, &args); + ret = iavf_execute_vf_cmd(adapter, &args, 0); if (ret) PMD_DRV_LOG(ERR, "fail to execute command %s", enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" : @@ -602,7 +634,7 @@ iavf_config_vlan_insert_v2(struct iavf_adapter *adapter, bool enable) args.in_args_size = sizeof(vlan_insert); args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - ret = iavf_execute_vf_cmd(adapter, &args); + ret = iavf_execute_vf_cmd(adapter, &args, 0); if (ret) PMD_DRV_LOG(ERR, "fail to execute command %s", enable ? "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2" : @@ -645,7 +677,7 @@ iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add) args.in_args_size = sizeof(vlan_filter); args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) PMD_DRV_LOG(ERR, "fail to execute command %s", add ? "OP_ADD_VLAN_V2" : "OP_DEL_VLAN_V2"); @@ -666,7 +698,7 @@ iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter) args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - ret = iavf_execute_vf_cmd(adapter, &args); + ret = iavf_execute_vf_cmd(adapter, &args, 0); if (ret) { PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS"); @@ -697,7 +729,7 @@ iavf_enable_queues(struct iavf_adapter *adapter) args.in_args_size = sizeof(queue_select); args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) { PMD_DRV_LOG(ERR, "Failed to execute command of OP_ENABLE_QUEUES"); @@ -725,7 +757,7 @@ iavf_disable_queues(struct iavf_adapter *adapter) args.in_args_size = sizeof(queue_select); args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) { PMD_DRV_LOG(ERR, "Failed to execute command of OP_DISABLE_QUEUES"); @@ -758,7 +790,7 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid, args.in_args_size = sizeof(queue_select); args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) PMD_DRV_LOG(ERR, "Failed to execute command of %s", on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES"); @@ -800,7 +832,7 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter) args.in_args_size = len; args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) PMD_DRV_LOG(ERR, "Failed to execute command of OP_ENABLE_QUEUES_V2"); @@ -844,7 +876,7 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter) args.in_args_size = len; args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) PMD_DRV_LOG(ERR, "Failed to execute command of OP_DISABLE_QUEUES_V2"); @@ -890,7 +922,7 @@ iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid, args.in_args_size = len; args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) PMD_DRV_LOG(ERR, "Failed to execute command of %s", on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2"); @@ -922,7 +954,7 @@ iavf_configure_rss_lut(struct iavf_adapter *adapter) args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) PMD_DRV_LOG(ERR, "Failed to execute command of OP_CONFIG_RSS_LUT"); @@ -954,7 +986,7 @@ iavf_configure_rss_key(struct iavf_adapter *adapter) args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) PMD_DRV_LOG(ERR, "Failed to execute command of OP_CONFIG_RSS_KEY"); @@ -1046,7 +1078,7 @@ iavf_configure_queues(struct iavf_adapter *adapter, args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) PMD_DRV_LOG(ERR, "Failed to execute command of" " VIRTCHNL_OP_CONFIG_VSI_QUEUES"); @@ -1087,7 +1119,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter) args.in_args_size = len; args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP"); @@ -1128,7 +1160,7 @@ iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num, args.in_args_size = len; args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR"); @@ -1188,7 +1220,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add) args.in_args_size = len; args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) PMD_DRV_LOG(ERR, "fail to execute command %s", add ? "OP_ADD_ETHER_ADDRESS" : @@ -1215,7 +1247,7 @@ iavf_query_stats(struct iavf_adapter *adapter, args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) { PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS"); *pstats = NULL; @@ -1250,7 +1282,7 @@ iavf_config_promisc(struct iavf_adapter *adapter, args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) { PMD_DRV_LOG(ERR, @@ -1290,7 +1322,7 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr, args.in_args_size = sizeof(cmd_buffer); args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) PMD_DRV_LOG(ERR, "fail to execute command %s", add ? "OP_ADD_ETH_ADDR" : "OP_DEL_ETH_ADDR"); @@ -1317,7 +1349,7 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add) args.in_args_size = sizeof(cmd_buffer); args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) PMD_DRV_LOG(ERR, "fail to execute command %s", add ? "OP_ADD_VLAN" : "OP_DEL_VLAN"); @@ -1344,7 +1376,7 @@ iavf_fdir_add(struct iavf_adapter *adapter, args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) { PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER"); return err; @@ -1404,7 +1436,7 @@ iavf_fdir_del(struct iavf_adapter *adapter, args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) { PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER"); return err; @@ -1451,7 +1483,7 @@ iavf_fdir_check(struct iavf_adapter *adapter, args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) { PMD_DRV_LOG(ERR, "fail to check flow direcotor rule"); return err; @@ -1492,7 +1524,7 @@ iavf_add_del_rss_cfg(struct iavf_adapter *adapter, args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) PMD_DRV_LOG(ERR, "Failed to execute command of %s", @@ -1515,7 +1547,7 @@ iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps) args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) { PMD_DRV_LOG(ERR, "Failed to execute command of OP_GET_RSS_HENA_CAPS"); @@ -1541,7 +1573,7 @@ iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena) args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) PMD_DRV_LOG(ERR, "Failed to execute command of OP_SET_RSS_HENA"); @@ -1562,7 +1594,7 @@ iavf_get_qos_cap(struct iavf_adapter *adapter) args.in_args_size = 0; args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) { PMD_DRV_LOG(ERR, @@ -1595,7 +1627,7 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev, args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) PMD_DRV_LOG(ERR, "Failed to execute command of" " VIRTCHNL_OP_CONFIG_TC_MAP"); @@ -1640,7 +1672,7 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter, i * sizeof(struct virtchnl_ether_addr); args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) { PMD_DRV_LOG(ERR, "fail to execute command %s", @@ -1686,11 +1718,11 @@ iavf_request_queues(struct rte_eth_dev *dev, uint16_t num) * before iavf_read_msg_from_pf. */ rte_intr_disable(&pci_dev->intr_handle); - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); rte_intr_enable(&pci_dev->intr_handle); } else { rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev); - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); rte_eal_alarm_set(IAVF_ALARM_INTERVAL, iavf_dev_alarm_handler, dev); } @@ -1729,7 +1761,7 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter) args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args); + err = iavf_execute_vf_cmd(adapter, &args, 0); if (err) { PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION"); return err; From patchwork Tue Oct 19 09:23:41 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Radu Nicolau X-Patchwork-Id: 102156 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 860A2A0C43; Tue, 19 Oct 2021 11:36:57 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id B4B644111D; Tue, 19 Oct 2021 11:36:42 +0200 (CEST) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by mails.dpdk.org (Postfix) with ESMTP id 9EAF540142 for ; Tue, 19 Oct 2021 11:36:39 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10141"; a="314669766" X-IronPort-AV: E=Sophos;i="5.85,384,1624345200"; d="scan'208";a="314669766" Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 Oct 2021 02:36:39 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.85,384,1624345200"; d="scan'208";a="661739877" Received: from silpixa00400884.ir.intel.com ([10.243.22.82]) by orsmga005.jf.intel.com with ESMTP; 19 Oct 2021 02:36:36 -0700 From: Radu Nicolau To: Jingjing Wu , Beilei Xing , Ray Kinsella Cc: dev@dpdk.org, declan.doherty@intel.com, abhijit.sinha@intel.com, qi.z.zhang@intel.com, bruce.richardson@intel.com, konstantin.ananyev@intel.com, Radu Nicolau Date: Tue, 19 Oct 2021 10:23:41 +0100 Message-Id: <20211019092344.1299368-5-radu.nicolau@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20211019092344.1299368-1-radu.nicolau@intel.com> References: <20210909142428.750634-1-radu.nicolau@intel.com> <20211019092344.1299368-1-radu.nicolau@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v10 4/7] net/iavf: add iAVF IPsec inline crypto support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support for inline crypto for IPsec, for ESP transport and tunnel over IPv4 and IPv6, as well as supporting the offload for ESP over UDP, and inconjunction with TSO for UDP and TCP flows. Implement support for rte_security packet metadata Add definition for IPsec descriptors, extend support for offload in data and context descriptor to support Add support to virtual channel mailbox for IPsec Crypto request operations. IPsec Crypto requests receive an initial acknowledgment from phsyical function driver of receipt of request and then an asynchronous response with success/failure of request including any response data. Add enhanced descriptor debugging Refactor of scalar tx burst function to support integration of offload Signed-off-by: Declan Doherty Signed-off-by: Abhijit Sinha Signed-off-by: Radu Nicolau Reviewed-by: Jingjing Wu --- drivers/net/iavf/iavf.h | 10 + drivers/net/iavf/iavf_ethdev.c | 41 +- drivers/net/iavf/iavf_generic_flow.c | 15 + drivers/net/iavf/iavf_generic_flow.h | 2 + drivers/net/iavf/iavf_ipsec_crypto.c | 1894 +++++++++++++++++ drivers/net/iavf/iavf_ipsec_crypto.h | 160 ++ .../net/iavf/iavf_ipsec_crypto_capabilities.h | 383 ++++ drivers/net/iavf/iavf_rxtx.c | 202 +- drivers/net/iavf/iavf_rxtx.h | 93 +- drivers/net/iavf/iavf_vchnl.c | 29 + drivers/net/iavf/meson.build | 3 +- drivers/net/iavf/rte_pmd_iavf.h | 1 + drivers/net/iavf/version.map | 3 + 13 files changed, 2815 insertions(+), 21 deletions(-) create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 67051f29a8..e98c42ba08 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -221,6 +221,7 @@ struct iavf_info { rte_spinlock_t flow_ops_lock; struct iavf_parser_list rss_parser_list; struct iavf_parser_list dist_parser_list; + struct iavf_parser_list ipsec_crypto_parser_list; struct iavf_fdir_info fdir; /* flow director info */ /* indicate large VF support enabled or not */ @@ -245,6 +246,7 @@ enum iavf_proto_xtr_type { IAVF_PROTO_XTR_IPV6_FLOW, IAVF_PROTO_XTR_TCP, IAVF_PROTO_XTR_IP_OFFSET, + IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID, IAVF_PROTO_XTR_MAX, }; @@ -256,11 +258,14 @@ struct iavf_devargs { uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM]; }; +struct iavf_security_ctx; + /* Structure to store private data for each VF instance. */ struct iavf_adapter { struct iavf_hw hw; struct rte_eth_dev_data *dev_data; struct iavf_info vf; + struct iavf_security_ctx *security_ctx; bool rx_bulk_alloc_allowed; /* For vector PMD */ @@ -279,6 +284,8 @@ struct iavf_adapter { (&((struct iavf_adapter *)adapter)->vf) #define IAVF_DEV_PRIVATE_TO_HW(adapter) \ (&((struct iavf_adapter *)adapter)->hw) +#define IAVF_DEV_PRIVATE_TO_IAVF_SECURITY_CTX(adapter) \ + (((struct iavf_adapter *)adapter)->security_ctx) /* IAVF_VSI_TO */ #define IAVF_VSI_TO_HW(vsi) \ @@ -421,5 +428,8 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev, uint16_t size); void iavf_tm_conf_init(struct rte_eth_dev *dev); void iavf_tm_conf_uninit(struct rte_eth_dev *dev); +int iavf_ipsec_crypto_request(struct iavf_adapter *adapter, + uint8_t *msg, size_t msg_len, + uint8_t *resp_msg, size_t resp_msg_len); extern const struct rte_tm_ops iavf_tm_ops; #endif /* _IAVF_ETHDEV_H_ */ diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index 611f1f7722..ac66e383a6 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -30,6 +30,7 @@ #include "iavf_rxtx.h" #include "iavf_generic_flow.h" #include "rte_pmd_iavf.h" +#include "iavf_ipsec_crypto.h" /* devargs */ #define IAVF_PROTO_XTR_ARG "proto_xtr" @@ -71,6 +72,11 @@ static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = { [IAVF_PROTO_XTR_IP_OFFSET] = { .param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" }, .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask }, + [IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = { + .param = { + .name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" }, + .ol_flag = + &rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask }, }; static int iavf_dev_configure(struct rte_eth_dev *dev); @@ -939,6 +945,9 @@ iavf_dev_stop(struct rte_eth_dev *dev) iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num, false); + /* free iAVF security device context all related resources */ + iavf_security_ctx_destroy(adapter); + adapter->stopped = 1; dev->data->dev_started = 0; @@ -948,7 +957,9 @@ iavf_dev_stop(struct rte_eth_dev *dev) static int iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { - struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = &adapter->vf; dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV; dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV; @@ -990,6 +1001,11 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC) dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC; + if (iavf_ipsec_crypto_supported(adapter)) { + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY; + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; + } + dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, @@ -1733,6 +1749,7 @@ iavf_lookup_proto_xtr_type(const char *flex_name) { "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW }, { "tcp", IAVF_PROTO_XTR_TCP }, { "ip_offset", IAVF_PROTO_XTR_IP_OFFSET }, + { "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID }, }; uint32_t i; @@ -1741,8 +1758,8 @@ iavf_lookup_proto_xtr_type(const char *flex_name) return xtr_type_map[i].type; } - PMD_DRV_LOG(ERR, "wrong proto_xtr type, " - "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset"); + PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: " + "vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said"); return -1; } @@ -2390,6 +2407,24 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) goto flow_init_err; } + /** Check if the IPsec Crypto offload is supported and create + * security_ctx if it is. + */ + if (iavf_ipsec_crypto_supported(adapter)) { + /* Initialize security_ctx only for primary process*/ + ret = iavf_security_ctx_create(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance"); + return ret; + } + + ret = iavf_security_init(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources"); + return ret; + } + } + iavf_default_rss_disable(adapter); return 0; diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c index b86d99e57d..8dfa549980 100644 --- a/drivers/net/iavf/iavf_generic_flow.c +++ b/drivers/net/iavf/iavf_generic_flow.c @@ -1635,6 +1635,7 @@ iavf_flow_init(struct iavf_adapter *ad) TAILQ_INIT(&vf->flow_list); TAILQ_INIT(&vf->rss_parser_list); TAILQ_INIT(&vf->dist_parser_list); + TAILQ_INIT(&vf->ipsec_crypto_parser_list); rte_spinlock_init(&vf->flow_ops_lock); RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { @@ -1709,6 +1710,9 @@ iavf_register_parser(struct iavf_flow_parser *parser, } else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) { list = &vf->dist_parser_list; TAILQ_INSERT_HEAD(list, parser_node, node); + } else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) { + list = &vf->ipsec_crypto_parser_list; + TAILQ_INSERT_HEAD(list, parser_node, node); } else { return -EINVAL; } @@ -2018,6 +2022,13 @@ iavf_flow_process_filter(struct rte_eth_dev *dev, *engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern, actions, error); + if (*engine) + return 0; + + *engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list, + pattern, actions, error); + if (*engine) + return 0; if (!*engine) { rte_flow_error_set(error, EINVAL, @@ -2064,6 +2075,10 @@ iavf_flow_create(struct rte_eth_dev *dev, return flow; } + /* Special case for inline crypto egress flows */ + if (attr->egress && actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY) + goto free_flow; + ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions, &engine, iavf_parse_engine_create, error); if (ret < 0) { diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h index 4794d1fb80..a471c0331f 100644 --- a/drivers/net/iavf/iavf_generic_flow.h +++ b/drivers/net/iavf/iavf_generic_flow.h @@ -449,6 +449,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad, /* engine types. */ enum iavf_flow_engine_type { IAVF_FLOW_ENGINE_NONE = 0, + IAVF_FLOW_ENGINE_IPSEC_CRYPTO, IAVF_FLOW_ENGINE_FDIR, IAVF_FLOW_ENGINE_HASH, IAVF_FLOW_ENGINE_MAX, @@ -462,6 +463,7 @@ enum iavf_flow_engine_type { */ enum iavf_flow_classification_stage { IAVF_FLOW_STAGE_NONE = 0, + IAVF_FLOW_STAGE_IPSEC_CRYPTO, IAVF_FLOW_STAGE_RSS, IAVF_FLOW_STAGE_DISTRIBUTOR, IAVF_FLOW_STAGE_MAX, diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c new file mode 100644 index 0000000000..b697e62579 --- /dev/null +++ b/drivers/net/iavf/iavf_ipsec_crypto.c @@ -0,0 +1,1894 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ + +#include +#include +#include +#include + +#include "iavf.h" +#include "iavf_rxtx.h" +#include "iavf_log.h" +#include "iavf_generic_flow.h" + +#include "iavf_ipsec_crypto.h" +#include "iavf_ipsec_crypto_capabilities.h" + +/** + * iAVF IPsec Crypto Security Context + */ +struct iavf_security_ctx { + struct iavf_adapter *adapter; + int pkt_md_offset; + struct rte_cryptodev_capabilities *crypto_capabilities; +}; + +/** + * iAVF IPsec Crypto Security Session Parameters + */ +struct iavf_security_session { + struct iavf_adapter *adapter; + + enum rte_security_ipsec_sa_mode mode; + enum rte_security_ipsec_tunnel_type type; + enum rte_security_ipsec_sa_direction direction; + + struct { + uint32_t spi; /* Security Parameter Index */ + uint32_t hw_idx; /* SA Index in hardware table */ + } sa; + + struct { + uint8_t enabled :1; + union { + uint64_t value; + struct { + uint32_t hi; + uint32_t low; + }; + }; + } esn; + + struct { + uint8_t enabled :1; + } udp_encap; + + size_t iv_sz; + size_t icv_sz; + size_t block_sz; + + struct iavf_ipsec_crypto_pkt_metadata pkt_metadata_template; +}; +/** + * IV Length field in IPsec Tx Desc uses the following encoding: + * + * 0B - 0 + * 4B - 1 + * 8B - 2 + * 16B - 3 + * + * but we also need the IV Length for TSO to correctly calculate the total + * header length so placing it in the upper 6-bits here for easier reterival. + */ +static inline uint8_t +calc_ipsec_desc_iv_len_field(uint16_t iv_sz) +{ + uint8_t iv_length = IAVF_IPSEC_IV_LEN_NONE; + + switch (iv_sz) { + case 4: + iv_length = IAVF_IPSEC_IV_LEN_DW; + break; + case 8: + iv_length = IAVF_IPSEC_IV_LEN_DDW; + break; + case 16: + iv_length = IAVF_IPSEC_IV_LEN_QDW; + break; + } + + return (iv_sz << 2) | iv_length; +} + +static unsigned int +iavf_ipsec_crypto_session_size_get(void *device __rte_unused) +{ + return sizeof(struct iavf_security_session); +} + +static const struct rte_cryptodev_symmetric_capability * +get_capability(struct iavf_security_ctx *iavf_sctx, + uint32_t algo, uint32_t type) +{ + const struct rte_cryptodev_capabilities *capability; + int i = 0; + + capability = &iavf_sctx->crypto_capabilities[i]; + + while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) { + if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC && + capability->sym.xform_type == type && + capability->sym.cipher.algo == algo) + return &capability->sym; + /** try next capability */ + capability = &iavf_crypto_capabilities[i++]; + } + + return NULL; +} + +static const struct rte_cryptodev_symmetric_capability * +get_auth_capability(struct iavf_security_ctx *iavf_sctx, + enum rte_crypto_auth_algorithm algo) +{ + return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AUTH); +} + +static const struct rte_cryptodev_symmetric_capability * +get_cipher_capability(struct iavf_security_ctx *iavf_sctx, + enum rte_crypto_cipher_algorithm algo) +{ + return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_CIPHER); +} +static const struct rte_cryptodev_symmetric_capability * +get_aead_capability(struct iavf_security_ctx *iavf_sctx, + enum rte_crypto_aead_algorithm algo) +{ + return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AEAD); +} + +static uint16_t +get_cipher_blocksize(struct iavf_security_ctx *iavf_sctx, + enum rte_crypto_cipher_algorithm algo) +{ + const struct rte_cryptodev_symmetric_capability *capability; + + capability = get_cipher_capability(iavf_sctx, algo); + if (capability == NULL) + return 0; + + return capability->cipher.block_size; +} + +static uint16_t +get_aead_blocksize(struct iavf_security_ctx *iavf_sctx, + enum rte_crypto_aead_algorithm algo) +{ + const struct rte_cryptodev_symmetric_capability *capability; + + capability = get_aead_capability(iavf_sctx, algo); + if (capability == NULL) + return 0; + + return capability->cipher.block_size; +} + +static uint16_t +get_auth_blocksize(struct iavf_security_ctx *iavf_sctx, + enum rte_crypto_auth_algorithm algo) +{ + const struct rte_cryptodev_symmetric_capability *capability; + + capability = get_auth_capability(iavf_sctx, algo); + if (capability == NULL) + return 0; + + return capability->auth.block_size; +} + +static uint8_t +calc_context_desc_cipherblock_sz(size_t len) +{ + switch (len) { + case 8: + return 0x2; + case 16: + return 0x3; + default: + return 0x0; + } +} + +static int +valid_length(uint32_t len, uint32_t min, uint32_t max, uint32_t increment) +{ + if (len < min || len > max) + return false; + + if (increment == 0) + return true; + + if ((len - min) % increment) + return false; + + /* make sure it fits in the key array */ + if (len > VIRTCHNL_IPSEC_MAX_KEY_LEN) + return false; + + return true; +} + +static int +valid_auth_xform(struct iavf_security_ctx *iavf_sctx, + struct rte_crypto_auth_xform *auth) +{ + const struct rte_cryptodev_symmetric_capability *capability; + + capability = get_auth_capability(iavf_sctx, auth->algo); + if (capability == NULL) + return false; + + /* verify key size */ + if (!valid_length(auth->key.length, + capability->auth.key_size.min, + capability->auth.key_size.max, + capability->aead.key_size.increment)) + return false; + + return true; +} + +static int +valid_cipher_xform(struct iavf_security_ctx *iavf_sctx, + struct rte_crypto_cipher_xform *cipher) +{ + const struct rte_cryptodev_symmetric_capability *capability; + + capability = get_cipher_capability(iavf_sctx, cipher->algo); + if (capability == NULL) + return false; + + /* verify key size */ + if (!valid_length(cipher->key.length, + capability->cipher.key_size.min, + capability->cipher.key_size.max, + capability->cipher.key_size.increment)) + return false; + + return true; +} + +static int +valid_aead_xform(struct iavf_security_ctx *iavf_sctx, + struct rte_crypto_aead_xform *aead) +{ + const struct rte_cryptodev_symmetric_capability *capability; + + capability = get_aead_capability(iavf_sctx, aead->algo); + if (capability == NULL) + return false; + + /* verify key size */ + if (!valid_length(aead->key.length, + capability->aead.key_size.min, + capability->aead.key_size.max, + capability->aead.key_size.increment)) + return false; + + return true; +} + +static int +iavf_ipsec_crypto_session_validate_conf(struct iavf_security_ctx *iavf_sctx, + struct rte_security_session_conf *conf) +{ + /** validate security action/protocol selection */ + if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO || + conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) { + PMD_DRV_LOG(ERR, "Invalid action / protocol specified"); + return -EINVAL; + } + + /** validate IPsec protocol selection */ + if (conf->ipsec.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) { + PMD_DRV_LOG(ERR, "Invalid IPsec protocol specified"); + return -EINVAL; + } + + /** validate selected options */ + if (conf->ipsec.options.copy_dscp || + conf->ipsec.options.copy_flabel || + conf->ipsec.options.copy_df || + conf->ipsec.options.dec_ttl || + conf->ipsec.options.ecn || + conf->ipsec.options.stats) { + PMD_DRV_LOG(ERR, "Invalid IPsec option specified"); + return -EINVAL; + } + + /** + * Validate crypto xforms parameters. + * + * AEAD transforms can be used for either inbound/outbound IPsec SAs, + * for non-AEAD crypto transforms we explicitly only support CIPHER/AUTH + * for outbound and AUTH/CIPHER chained transforms for inbound IPsec. + */ + if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { + if (!valid_aead_xform(iavf_sctx, &conf->crypto_xform->aead)) { + PMD_DRV_LOG(ERR, "Invalid IPsec option specified"); + return -EINVAL; + } + } else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS && + conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && + conf->crypto_xform->next && + conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { + if (!valid_cipher_xform(iavf_sctx, + &conf->crypto_xform->cipher)) { + PMD_DRV_LOG(ERR, "Invalid IPsec option specified"); + return -EINVAL; + } + + if (!valid_auth_xform(iavf_sctx, + &conf->crypto_xform->next->auth)) { + PMD_DRV_LOG(ERR, "Invalid IPsec option specified"); + return -EINVAL; + } + } else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && + conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && + conf->crypto_xform->next && + conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { + if (!valid_auth_xform(iavf_sctx, &conf->crypto_xform->auth)) { + PMD_DRV_LOG(ERR, "Invalid IPsec option specified"); + return -EINVAL; + } + + if (!valid_cipher_xform(iavf_sctx, + &conf->crypto_xform->next->cipher)) { + PMD_DRV_LOG(ERR, "Invalid IPsec option specified"); + return -EINVAL; + } + } + + return 0; +} + +static void +sa_add_set_aead_params(struct virtchnl_ipsec_crypto_cfg_item *cfg, + struct rte_crypto_aead_xform *aead, uint32_t salt) +{ + cfg->crypto_type = VIRTCHNL_AEAD; + + switch (aead->algo) { + case RTE_CRYPTO_AEAD_AES_CCM: + cfg->algo_type = VIRTCHNL_AES_CCM; break; + case RTE_CRYPTO_AEAD_AES_GCM: + cfg->algo_type = VIRTCHNL_AES_GCM; break; + case RTE_CRYPTO_AEAD_CHACHA20_POLY1305: + cfg->algo_type = VIRTCHNL_CHACHA20_POLY1305; break; + default: + PMD_DRV_LOG(ERR, "Invalid AEAD parameters"); + break; + } + + cfg->key_len = aead->key.length; + cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt len */ + cfg->digest_len = aead->digest_length; + cfg->salt = salt; + + memcpy(cfg->key_data, aead->key.data, cfg->key_len); +} + +static void +sa_add_set_cipher_params(struct virtchnl_ipsec_crypto_cfg_item *cfg, + struct rte_crypto_cipher_xform *cipher, uint32_t salt) +{ + cfg->crypto_type = VIRTCHNL_CIPHER; + + switch (cipher->algo) { + case RTE_CRYPTO_CIPHER_AES_CBC: + cfg->algo_type = VIRTCHNL_AES_CBC; break; + case RTE_CRYPTO_CIPHER_3DES_CBC: + cfg->algo_type = VIRTCHNL_3DES_CBC; break; + case RTE_CRYPTO_CIPHER_NULL: + cfg->algo_type = VIRTCHNL_CIPHER_NO_ALG; break; + case RTE_CRYPTO_CIPHER_AES_CTR: + cfg->algo_type = VIRTCHNL_AES_CTR; + cfg->salt = salt; + break; + default: + PMD_DRV_LOG(ERR, "Invalid cipher parameters"); + break; + } + + cfg->key_len = cipher->key.length; + cfg->iv_len = cipher->iv.length; + cfg->salt = salt; + + memcpy(cfg->key_data, cipher->key.data, cfg->key_len); +} + +static void +sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg, + struct rte_crypto_auth_xform *auth, uint32_t salt) +{ + cfg->crypto_type = VIRTCHNL_AUTH; + + switch (auth->algo) { + case RTE_CRYPTO_AUTH_NULL: + cfg->algo_type = VIRTCHNL_HASH_NO_ALG; break; + case RTE_CRYPTO_AUTH_AES_CBC_MAC: + cfg->algo_type = VIRTCHNL_AES_CBC_MAC; break; + case RTE_CRYPTO_AUTH_AES_CMAC: + cfg->algo_type = VIRTCHNL_AES_CMAC; break; + case RTE_CRYPTO_AUTH_AES_XCBC_MAC: + cfg->algo_type = VIRTCHNL_AES_XCBC_MAC; break; + case RTE_CRYPTO_AUTH_MD5_HMAC: + cfg->algo_type = VIRTCHNL_MD5_HMAC; break; + case RTE_CRYPTO_AUTH_SHA1_HMAC: + cfg->algo_type = VIRTCHNL_SHA1_HMAC; break; + case RTE_CRYPTO_AUTH_SHA224_HMAC: + cfg->algo_type = VIRTCHNL_SHA224_HMAC; break; + case RTE_CRYPTO_AUTH_SHA256_HMAC: + cfg->algo_type = VIRTCHNL_SHA256_HMAC; break; + case RTE_CRYPTO_AUTH_SHA384_HMAC: + cfg->algo_type = VIRTCHNL_SHA384_HMAC; break; + case RTE_CRYPTO_AUTH_SHA512_HMAC: + cfg->algo_type = VIRTCHNL_SHA512_HMAC; break; + case RTE_CRYPTO_AUTH_AES_GMAC: + cfg->algo_type = VIRTCHNL_AES_GMAC; + cfg->salt = salt; + break; + default: + PMD_DRV_LOG(ERR, "Invalid auth parameters"); + break; + } + + cfg->key_len = auth->key.length; + /* special case for RTE_CRYPTO_AUTH_AES_GMAC */ + if (auth->algo == RTE_CRYPTO_AUTH_AES_GMAC) + cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt */ + else + cfg->iv_len = auth->iv.length; + cfg->digest_len = auth->digest_length; + + memcpy(cfg->key_data, auth->key.data, cfg->key_len); +} + +/** + * Send SA add virtual channel request to Inline IPsec driver. + * + * Inline IPsec driver expects SPI and destination IP adderss to be in host + * order, but DPDK APIs are network order, therefore we need to do a htonl + * conversion of these parameters. + */ +static uint32_t +iavf_ipsec_crypto_security_association_add(struct iavf_adapter *adapter, + struct rte_security_session_conf *conf) +{ + struct inline_ipsec_msg *request = NULL, *response = NULL; + struct virtchnl_ipsec_sa_cfg *sa_cfg; + size_t request_len, response_len; + + int rc; + + request_len = sizeof(struct inline_ipsec_msg) + + sizeof(struct virtchnl_ipsec_sa_cfg); + + request = rte_malloc("iavf-sad-add-request", request_len, 0); + if (request == NULL) { + rc = -ENOMEM; + goto update_cleanup; + } + + response_len = sizeof(struct inline_ipsec_msg) + + sizeof(struct virtchnl_ipsec_sa_cfg_resp); + response = rte_malloc("iavf-sad-add-response", response_len, 0); + if (response == NULL) { + rc = -ENOMEM; + goto update_cleanup; + } + + /* set msg header params */ + request->ipsec_opcode = INLINE_IPSEC_OP_SA_CREATE; + request->req_id = (uint16_t)0xDEADBEEF; + + /* set SA configuration params */ + sa_cfg = (struct virtchnl_ipsec_sa_cfg *)(request + 1); + + sa_cfg->spi = conf->ipsec.spi; + sa_cfg->virtchnl_protocol_type = VIRTCHNL_PROTO_ESP; + sa_cfg->virtchnl_direction = + conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ? + VIRTCHNL_DIR_INGRESS : VIRTCHNL_DIR_EGRESS; + + if (conf->ipsec.options.esn) { + sa_cfg->esn_enabled = 1; + sa_cfg->esn_hi = conf->ipsec.esn.hi; + sa_cfg->esn_low = conf->ipsec.esn.low; + } + + if (conf->ipsec.options.udp_encap) + sa_cfg->udp_encap_enabled = 1; + + /* Set outer IP params */ + if (conf->ipsec.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) { + sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV4; + + *((uint32_t *)sa_cfg->dst_addr) = + htonl(conf->ipsec.tunnel.ipv4.dst_ip.s_addr); + } else { + uint32_t *v6_dst_addr = + conf->ipsec.tunnel.ipv6.dst_addr.s6_addr32; + + sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV6; + + ((uint32_t *)sa_cfg->dst_addr)[0] = htonl(v6_dst_addr[0]); + ((uint32_t *)sa_cfg->dst_addr)[1] = htonl(v6_dst_addr[1]); + ((uint32_t *)sa_cfg->dst_addr)[2] = htonl(v6_dst_addr[2]); + ((uint32_t *)sa_cfg->dst_addr)[3] = htonl(v6_dst_addr[3]); + } + + /* set crypto params */ + if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { + sa_add_set_aead_params(&sa_cfg->crypto_cfg.items[0], + &conf->crypto_xform->aead, conf->ipsec.salt); + + } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { + sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[0], + &conf->crypto_xform->cipher, conf->ipsec.salt); + sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[1], + &conf->crypto_xform->next->auth, conf->ipsec.salt); + + } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { + sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[0], + &conf->crypto_xform->auth, conf->ipsec.salt); + if (conf->crypto_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) + sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[1], + &conf->crypto_xform->next->cipher, conf->ipsec.salt); + } + + /* send virtual channel request to add SA to hardware database */ + rc = iavf_ipsec_crypto_request(adapter, + (uint8_t *)request, request_len, + (uint8_t *)response, response_len); + if (rc) + goto update_cleanup; + + /* verify response id */ + if (response->ipsec_opcode != request->ipsec_opcode || + response->req_id != request->req_id) + rc = -EFAULT; + else + rc = response->ipsec_data.sa_cfg_resp->sa_handle; +update_cleanup: + rte_free(response); + rte_free(request); + + return rc; +} + +static void +set_pkt_metadata_template(struct iavf_ipsec_crypto_pkt_metadata *template, + struct iavf_security_session *sess) +{ + template->sa_idx = sess->sa.hw_idx; + + if (sess->udp_encap.enabled) + template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT; + + if (sess->esn.enabled) + template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN; + + template->len_iv = calc_ipsec_desc_iv_len_field(sess->iv_sz); + template->ctx_desc_ipsec_params = + calc_context_desc_cipherblock_sz(sess->block_sz) | + ((uint8_t)(sess->icv_sz >> 2) << 3); +} + +static void +set_session_parameter(struct iavf_security_ctx *iavf_sctx, + struct iavf_security_session *sess, + struct rte_security_session_conf *conf, uint32_t sa_idx) +{ + sess->adapter = iavf_sctx->adapter; + + sess->mode = conf->ipsec.mode; + sess->direction = conf->ipsec.direction; + + if (sess->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) + sess->type = conf->ipsec.tunnel.type; + + sess->sa.spi = conf->ipsec.spi; + sess->sa.hw_idx = sa_idx; + + if (conf->ipsec.options.esn) { + sess->esn.enabled = 1; + sess->esn.value = conf->ipsec.esn.value; + } + + if (conf->ipsec.options.udp_encap) + sess->udp_encap.enabled = 1; + + if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { + sess->block_sz = get_aead_blocksize(iavf_sctx, + conf->crypto_xform->aead.algo); + sess->iv_sz = sizeof(uint64_t); /* iv.length includes salt */ + sess->icv_sz = conf->crypto_xform->aead.digest_length; + } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { + sess->block_sz = get_cipher_blocksize(iavf_sctx, + conf->crypto_xform->cipher.algo); + sess->iv_sz = conf->crypto_xform->cipher.iv.length; + sess->icv_sz = conf->crypto_xform->next->auth.digest_length; + } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { + if (conf->crypto_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { + sess->block_sz = get_auth_blocksize(iavf_sctx, + RTE_CRYPTO_SYM_XFORM_AUTH); + sess->iv_sz = conf->crypto_xform->auth.iv.length; + sess->icv_sz = conf->crypto_xform->auth.digest_length; + } else { + sess->block_sz = get_cipher_blocksize(iavf_sctx, + conf->crypto_xform->next->cipher.algo); + sess->iv_sz = + conf->crypto_xform->next->cipher.iv.length; + sess->icv_sz = conf->crypto_xform->auth.digest_length; + } + } + + set_pkt_metadata_template(&sess->pkt_metadata_template, sess); +} + +/** + * Create IPsec Security Association for inline IPsec Crypto offload. + * + * 1. validate session configuration parameters + * 2. allocate session memory from mempool + * 3. add SA to hardware database + * 4. set session parameters + * 5. create packet metadata template for datapath + */ +static int +iavf_ipsec_crypto_session_create(void *device, + struct rte_security_session_conf *conf, + struct rte_security_session *session, + struct rte_mempool *mempool) +{ + struct rte_eth_dev *ethdev = device; + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private); + struct iavf_security_ctx *iavf_sctx = adapter->security_ctx; + struct iavf_security_session *iavf_session = NULL; + int sa_idx; + int ret = 0; + + /* validate that all SA parameters are valid for device */ + ret = iavf_ipsec_crypto_session_validate_conf(iavf_sctx, conf); + if (ret) + return ret; + + /* allocate session context */ + if (rte_mempool_get(mempool, (void **)&iavf_session)) { + PMD_DRV_LOG(ERR, "Cannot get object from sess mempool"); + return -ENOMEM; + } + + /* add SA to hardware database */ + sa_idx = iavf_ipsec_crypto_security_association_add(adapter, conf); + if (sa_idx < 0) { + PMD_DRV_LOG(ERR, + "Failed to add SA (spi: %d, mode: %s, direction: %s)", + conf->ipsec.spi, + conf->ipsec.mode == + RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT ? + "transport" : "tunnel", + conf->ipsec.direction == + RTE_SECURITY_IPSEC_SA_DIR_INGRESS ? + "inbound" : "outbound"); + + rte_mempool_put(mempool, iavf_session); + return -EFAULT; + } + + /* save data plane required session parameters */ + set_session_parameter(iavf_sctx, iavf_session, conf, sa_idx); + + /* save to security session private data */ + set_sec_session_private_data(session, iavf_session); + + return 0; +} + +/** + * Check if valid ipsec crypto action. + * SPI must be non-zero and SPI in session must match SPI value + * passed into function. + * + * returns: 0 if invalid session or SPI value equal zero + * returns: 1 if valid + */ +uint32_t +iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev, + const struct rte_security_session *session, uint32_t spi) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private); + struct iavf_security_session *sess = session->sess_private_data; + + /* verify we have a valid session and that it belong to this adapter */ + if (unlikely(sess == NULL || sess->adapter != adapter)) + return false; + + /* SPI value must be non-zero */ + if (spi == 0) + return false; + /* Session SPI must patch flow SPI*/ + else if (sess->sa.spi == spi) { + return true; + /** + * TODO: We should add a way of tracking valid hw SA indices to + * make validation less brittle + */ + } + + return true; +} + +/** + * Send virtual channel security policy add request to IES driver. + * + * IES driver expects SPI and destination IP adderss to be in host + * order, but DPDK APIs are network order, therefore we need to do a htonl + * conversion of these parameters. + */ +int +iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter, + uint32_t esp_spi, + uint8_t is_v4, + rte_be32_t v4_dst_addr, + uint8_t *v6_dst_addr, + uint8_t drop) +{ + struct inline_ipsec_msg *request = NULL, *response = NULL; + size_t request_len, response_len; + int rc = 0; + + request_len = sizeof(struct inline_ipsec_msg) + + sizeof(struct virtchnl_ipsec_sp_cfg); + request = rte_malloc("iavf-inbound-security-policy-add-request", + request_len, 0); + if (request == NULL) { + rc = -ENOMEM; + goto update_cleanup; + } + + /* set msg header params */ + request->ipsec_opcode = INLINE_IPSEC_OP_SP_CREATE; + request->req_id = (uint16_t)0xDEADBEEF; + + /* ESP SPI */ + request->ipsec_data.sp_cfg->spi = htonl(esp_spi); + + /* Destination IP */ + if (is_v4) { + request->ipsec_data.sp_cfg->table_id = + VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4; + request->ipsec_data.sp_cfg->dip[0] = htonl(v4_dst_addr); + } else { + request->ipsec_data.sp_cfg->table_id = + VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6; + request->ipsec_data.sp_cfg->dip[0] = + htonl(((uint32_t *)v6_dst_addr)[0]); + request->ipsec_data.sp_cfg->dip[1] = + htonl(((uint32_t *)v6_dst_addr)[1]); + request->ipsec_data.sp_cfg->dip[2] = + htonl(((uint32_t *)v6_dst_addr)[2]); + request->ipsec_data.sp_cfg->dip[3] = + htonl(((uint32_t *)v6_dst_addr)[3]); + } + + request->ipsec_data.sp_cfg->drop = drop; + + /** Traffic Class/Congestion Domain currently not support */ + request->ipsec_data.sp_cfg->set_tc = 0; + request->ipsec_data.sp_cfg->cgd = 0; + + response_len = sizeof(struct inline_ipsec_msg) + + sizeof(struct virtchnl_ipsec_sp_cfg_resp); + response = rte_malloc("iavf-inbound-security-policy-add-response", + response_len, 0); + if (response == NULL) { + rc = -ENOMEM; + goto update_cleanup; + } + + /* send virtual channel request to add SA to hardware database */ + rc = iavf_ipsec_crypto_request(adapter, + (uint8_t *)request, request_len, + (uint8_t *)response, response_len); + if (rc) + goto update_cleanup; + + /* verify response */ + if (response->ipsec_opcode != request->ipsec_opcode || + response->req_id != request->req_id) + rc = -EFAULT; + else + rc = response->ipsec_data.sp_cfg_resp->rule_id; + +update_cleanup: + rte_free(request); + rte_free(response); + + return rc; +} + +static uint32_t +iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter, + struct iavf_security_session *sess) +{ + struct inline_ipsec_msg *request = NULL, *response = NULL; + size_t request_len, response_len; + int rc = 0; + + request_len = sizeof(struct inline_ipsec_msg) + + sizeof(struct virtchnl_ipsec_sa_update); + request = rte_malloc("iavf-sa-update-request", request_len, 0); + if (request == NULL) { + rc = -ENOMEM; + goto update_cleanup; + } + + response_len = sizeof(struct inline_ipsec_msg) + + sizeof(struct virtchnl_ipsec_resp); + response = rte_malloc("iavf-sa-update-response", response_len, 0); + if (response == NULL) { + rc = -ENOMEM; + goto update_cleanup; + } + + /* set msg header params */ + request->ipsec_opcode = INLINE_IPSEC_OP_SA_UPDATE; + request->req_id = (uint16_t)0xDEADBEEF; + + /* set request params */ + request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx; + request->ipsec_data.sa_update->esn_hi = sess->esn.hi; + + /* send virtual channel request to add SA to hardware database */ + rc = iavf_ipsec_crypto_request(adapter, + (uint8_t *)request, request_len, + (uint8_t *)response, response_len); + if (rc) + goto update_cleanup; + + /* verify response */ + if (response->ipsec_opcode != request->ipsec_opcode || + response->req_id != request->req_id) + rc = -EFAULT; + else + rc = response->ipsec_data.ipsec_resp->resp; + +update_cleanup: + rte_free(request); + rte_free(response); + + return rc; +} + +static int +iavf_ipsec_crypto_session_update(void *device, + struct rte_security_session *session, + struct rte_security_session_conf *conf) +{ + struct iavf_adapter *adapter = NULL; + struct iavf_security_session *iavf_sess = NULL; + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device; + int rc = 0; + + adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private); + iavf_sess = (struct iavf_security_session *)session->sess_private_data; + + /* verify we have a valid session and that it belong to this adapter */ + if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter)) + return -EINVAL; + + /* update esn hi 32-bits */ + if (iavf_sess->esn.enabled && conf->ipsec.options.esn) { + /** + * Update ESN in hardware for inbound SA. Store in + * iavf_security_session for outbound SA for use + * in *iavf_ipsec_crypto_pkt_metadata_set* function. + */ + if (iavf_sess->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) + rc = iavf_ipsec_crypto_sa_update_esn(adapter, + iavf_sess); + else + iavf_sess->esn.hi = conf->ipsec.esn.hi; + } + + return rc; +} + +static int +iavf_ipsec_crypto_session_stats_get(void *device __rte_unused, + struct rte_security_session *session __rte_unused, + struct rte_security_stats *stats __rte_unused) +{ + return -EOPNOTSUPP; +} + +int +iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter, + uint8_t is_v4, uint32_t flow_id) +{ + struct inline_ipsec_msg *request = NULL, *response = NULL; + size_t request_len, response_len; + int rc = 0; + + request_len = sizeof(struct inline_ipsec_msg) + + sizeof(struct virtchnl_ipsec_sp_destroy); + request = rte_malloc("iavf-sp-del-request", request_len, 0); + if (request == NULL) { + rc = -ENOMEM; + goto update_cleanup; + } + + response_len = sizeof(struct inline_ipsec_msg) + + sizeof(struct virtchnl_ipsec_resp); + response = rte_malloc("iavf-sp-del-response", response_len, 0); + if (response == NULL) { + rc = -ENOMEM; + goto update_cleanup; + } + + /* set msg header params */ + request->ipsec_opcode = INLINE_IPSEC_OP_SP_DESTROY; + request->req_id = (uint16_t)0xDEADBEEF; + + /* set security policy params */ + request->ipsec_data.sp_destroy->table_id = is_v4 ? + VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4 : + VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6; + request->ipsec_data.sp_destroy->rule_id = flow_id; + + /* send virtual channel request to add SA to hardware database */ + rc = iavf_ipsec_crypto_request(adapter, + (uint8_t *)request, request_len, + (uint8_t *)response, response_len); + if (rc) + goto update_cleanup; + + /* verify response */ + if (response->ipsec_opcode != request->ipsec_opcode || + response->req_id != request->req_id) + rc = -EFAULT; + else + return response->ipsec_data.ipsec_status->status; + +update_cleanup: + rte_free(request); + rte_free(response); + + return rc; +} + +static uint32_t +iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter, + struct iavf_security_session *sess) +{ + struct inline_ipsec_msg *request = NULL, *response = NULL; + size_t request_len, response_len; + + int rc = 0; + + request_len = sizeof(struct inline_ipsec_msg) + + sizeof(struct virtchnl_ipsec_sa_destroy); + + request = rte_malloc("iavf-sa-del-request", request_len, 0); + if (request == NULL) { + rc = -ENOMEM; + goto update_cleanup; + } + + response_len = sizeof(struct inline_ipsec_msg) + + sizeof(struct virtchnl_ipsec_resp); + + response = rte_malloc("iavf-sa-del-response", response_len, 0); + if (response == NULL) { + rc = -ENOMEM; + goto update_cleanup; + } + + /* set msg header params */ + request->ipsec_opcode = INLINE_IPSEC_OP_SA_DESTROY; + request->req_id = (uint16_t)0xDEADBEEF; + + /** + * SA delete supports deletetion of 1-8 specified SA's or if the flag + * field is zero, all SA's associated with VF will be deleted. + */ + if (sess) { + request->ipsec_data.sa_destroy->flag = 0x1; + request->ipsec_data.sa_destroy->sa_index[0] = sess->sa.hw_idx; + } else { + request->ipsec_data.sa_destroy->flag = 0x0; + } + + /* send virtual channel request to add SA to hardware database */ + rc = iavf_ipsec_crypto_request(adapter, + (uint8_t *)request, request_len, + (uint8_t *)response, response_len); + if (rc) + goto update_cleanup; + + /* verify response */ + if (response->ipsec_opcode != request->ipsec_opcode || + response->req_id != request->req_id) + rc = -EFAULT; + + /** + * Delete status will be the same bitmask as sa_destroy request flag if + * deletes successful + */ + if (request->ipsec_data.sa_destroy->flag != + response->ipsec_data.ipsec_status->status) + rc = -EFAULT; + +update_cleanup: + rte_free(response); + rte_free(request); + + return rc; +} + +static int +iavf_ipsec_crypto_session_destroy(void *device, + struct rte_security_session *session) +{ + struct iavf_adapter *adapter = NULL; + struct iavf_security_session *iavf_sess = NULL; + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device; + int ret; + + adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private); + iavf_sess = (struct iavf_security_session *)session->sess_private_data; + + /* verify we have a valid session and that it belong to this adapter */ + if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter)) + return -EINVAL; + + ret = iavf_ipsec_crypto_sa_del(adapter, iavf_sess); + rte_mempool_put(rte_mempool_from_obj(iavf_sess), (void *)iavf_sess); + return ret; +} + +/** + * Get ESP trailer from packet as well as calculate the total ESP trailer + * length, which include padding, ESP trailer footer and the ICV + */ +static inline struct rte_esp_tail * +iavf_ipsec_crypto_get_esp_trailer(struct rte_mbuf *m, + struct iavf_security_session *s, uint16_t *esp_trailer_length) +{ + struct rte_esp_tail *esp_trailer; + + uint16_t length = sizeof(struct rte_esp_tail) + s->icv_sz; + uint16_t offset = 0; + + /** + * The ICV will not be present in TSO packets as this is appended by + * hardware during segment generation + */ + if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) + length -= s->icv_sz; + + *esp_trailer_length = length; + + /** + * Calculate offset in packet to ESP trailer header, this should be + * total packet length less the size of the ESP trailer plus the ICV + * length if it is present + */ + offset = rte_pktmbuf_pkt_len(m) - length; + + if (m->nb_segs > 1) { + /* find segment which esp trailer is located */ + while (m->data_len < offset) { + offset -= m->data_len; + m = m->next; + } + } + + esp_trailer = rte_pktmbuf_mtod_offset(m, struct rte_esp_tail *, offset); + + *esp_trailer_length += esp_trailer->pad_len; + + return esp_trailer; +} + +static inline uint16_t +iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m, + struct iavf_security_session *s, uint16_t esp_tlen) +{ + uint16_t ol2_len = m->l2_len; /* MAC + VLAN */ + uint16_t ol3_len = 0; /* ipv4/6 + ext hdrs */ + uint16_t ol4_len = 0; /* UDP NATT */ + uint16_t l3_len = 0; /* IPv4/6 + ext hdrs */ + uint16_t l4_len = 0; /* TCP/UDP/STCP hdrs */ + uint16_t esp_hlen = sizeof(struct rte_esp_hdr) + s->iv_sz; + + if (s->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) + ol3_len = m->outer_l3_len; + /**< + * application provided l3len assumed to include length of + * ipv4/6 hdr + ext hdrs + */ + + if (s->udp_encap.enabled) + ol4_len = sizeof(struct rte_udp_hdr); + + l3_len = m->l3_len; + l4_len = m->l4_len; + + return rte_pktmbuf_pkt_len(m) - (ol2_len + ol3_len + ol4_len + + esp_hlen + l3_len + l4_len + esp_tlen); +} + +static int +iavf_ipsec_crypto_pkt_metadata_set(void *device, + struct rte_security_session *session, + struct rte_mbuf *m, void *params) +{ + struct rte_eth_dev *ethdev = device; + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private); + struct iavf_security_ctx *iavf_sctx = adapter->security_ctx; + struct iavf_security_session *iavf_sess = session->sess_private_data; + struct iavf_ipsec_crypto_pkt_metadata *md; + struct rte_esp_tail *esp_tail; + uint64_t *sqn = params; + uint16_t esp_trailer_length; + + /* Check we have valid session and is associated with this device */ + if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter)) + return -EINVAL; + + /* Get dynamic metadata location from mbuf */ + md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset, + struct iavf_ipsec_crypto_pkt_metadata *); + + /* Set immutatable metadata values from session template */ + memcpy(md, &iavf_sess->pkt_metadata_template, + sizeof(struct iavf_ipsec_crypto_pkt_metadata)); + + esp_tail = iavf_ipsec_crypto_get_esp_trailer(m, iavf_sess, + &esp_trailer_length); + + /* Set per packet mutable metadata values */ + md->esp_trailer_len = esp_trailer_length; + md->l4_payload_len = iavf_ipsec_crypto_compute_l4_payload_length(m, + iavf_sess, esp_trailer_length); + md->next_proto = esp_tail->next_proto; + + /* If Extended SN in use set the upper 32-bits in metadata */ + if (iavf_sess->esn.enabled && sqn != NULL) + md->esn = (uint32_t)(*sqn >> 32); + + return 0; +} + +static int +iavf_ipsec_crypto_device_capabilities_get(struct iavf_adapter *adapter, + struct virtchnl_ipsec_cap *capability) +{ + /* Perform pf-vf comms */ + struct inline_ipsec_msg *request = NULL, *response = NULL; + size_t request_len, response_len; + int rc; + + request_len = sizeof(struct inline_ipsec_msg); + + request = rte_malloc("iavf-device-capability-request", request_len, 0); + if (request == NULL) { + rc = -ENOMEM; + goto update_cleanup; + } + + response_len = sizeof(struct inline_ipsec_msg) + + sizeof(struct virtchnl_ipsec_cap); + response = rte_malloc("iavf-device-capability-response", + response_len, 0); + if (response == NULL) { + rc = -ENOMEM; + goto update_cleanup; + } + + /* set msg header params */ + request->ipsec_opcode = INLINE_IPSEC_OP_GET_CAP; + request->req_id = (uint16_t)0xDEADBEEF; + + /* send virtual channel request to add SA to hardware database */ + rc = iavf_ipsec_crypto_request(adapter, + (uint8_t *)request, request_len, + (uint8_t *)response, response_len); + if (rc) + goto update_cleanup; + + /* verify response id */ + if (response->ipsec_opcode != request->ipsec_opcode || + response->req_id != request->req_id){ + rc = -EFAULT; + goto update_cleanup; + } + memcpy(capability, response->ipsec_data.ipsec_cap, sizeof(*capability)); + +update_cleanup: + rte_free(response); + rte_free(request); + + return rc; +} + +enum rte_crypto_auth_algorithm auth_maptbl[] = { + /* Hash Algorithm */ + [VIRTCHNL_HASH_NO_ALG] = RTE_CRYPTO_AUTH_NULL, + [VIRTCHNL_AES_CBC_MAC] = RTE_CRYPTO_AUTH_AES_CBC_MAC, + [VIRTCHNL_AES_CMAC] = RTE_CRYPTO_AUTH_AES_CMAC, + [VIRTCHNL_AES_GMAC] = RTE_CRYPTO_AUTH_AES_GMAC, + [VIRTCHNL_AES_XCBC_MAC] = RTE_CRYPTO_AUTH_AES_XCBC_MAC, + [VIRTCHNL_MD5_HMAC] = RTE_CRYPTO_AUTH_MD5_HMAC, + [VIRTCHNL_SHA1_HMAC] = RTE_CRYPTO_AUTH_SHA1_HMAC, + [VIRTCHNL_SHA224_HMAC] = RTE_CRYPTO_AUTH_SHA224_HMAC, + [VIRTCHNL_SHA256_HMAC] = RTE_CRYPTO_AUTH_SHA256_HMAC, + [VIRTCHNL_SHA384_HMAC] = RTE_CRYPTO_AUTH_SHA384_HMAC, + [VIRTCHNL_SHA512_HMAC] = RTE_CRYPTO_AUTH_SHA512_HMAC, + [VIRTCHNL_SHA3_224_HMAC] = RTE_CRYPTO_AUTH_SHA3_224_HMAC, + [VIRTCHNL_SHA3_256_HMAC] = RTE_CRYPTO_AUTH_SHA3_256_HMAC, + [VIRTCHNL_SHA3_384_HMAC] = RTE_CRYPTO_AUTH_SHA3_384_HMAC, + [VIRTCHNL_SHA3_512_HMAC] = RTE_CRYPTO_AUTH_SHA3_512_HMAC, +}; + +static void +update_auth_capabilities(struct rte_cryptodev_capabilities *scap, + struct virtchnl_algo_cap *acap) +{ + struct rte_cryptodev_symmetric_capability *capability = &scap->sym; + + scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + + capability->xform_type = RTE_CRYPTO_SYM_XFORM_AUTH; + + capability->auth.algo = auth_maptbl[acap->algo_type]; + capability->auth.block_size = acap->block_size; + + capability->auth.key_size.min = acap->min_key_size; + capability->auth.key_size.max = acap->max_key_size; + capability->auth.key_size.increment = acap->inc_key_size; + + capability->auth.digest_size.min = acap->min_digest_size; + capability->auth.digest_size.max = acap->max_digest_size; + capability->auth.digest_size.increment = acap->inc_digest_size; +} + +enum rte_crypto_cipher_algorithm cipher_maptbl[] = { + /* Cipher Algorithm */ + [VIRTCHNL_CIPHER_NO_ALG] = RTE_CRYPTO_CIPHER_NULL, + [VIRTCHNL_3DES_CBC] = RTE_CRYPTO_CIPHER_3DES_CBC, + [VIRTCHNL_AES_CBC] = RTE_CRYPTO_CIPHER_AES_CBC, + [VIRTCHNL_AES_CTR] = RTE_CRYPTO_CIPHER_AES_CTR, +}; + +static void +update_cipher_capabilities(struct rte_cryptodev_capabilities *scap, + struct virtchnl_algo_cap *acap) +{ + struct rte_cryptodev_symmetric_capability *capability = &scap->sym; + + scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + + capability->xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER; + + capability->cipher.algo = cipher_maptbl[acap->algo_type]; + + capability->cipher.block_size = acap->block_size; + + capability->cipher.key_size.min = acap->min_key_size; + capability->cipher.key_size.max = acap->max_key_size; + capability->cipher.key_size.increment = acap->inc_key_size; + + capability->cipher.iv_size.min = acap->min_iv_size; + capability->cipher.iv_size.max = acap->max_iv_size; + capability->cipher.iv_size.increment = acap->inc_iv_size; +} + +enum rte_crypto_aead_algorithm aead_maptbl[] = { + /* AEAD Algorithm */ + [VIRTCHNL_AES_CCM] = RTE_CRYPTO_AEAD_AES_CCM, + [VIRTCHNL_AES_GCM] = RTE_CRYPTO_AEAD_AES_GCM, + [VIRTCHNL_CHACHA20_POLY1305] = RTE_CRYPTO_AEAD_CHACHA20_POLY1305, +}; + +static void +update_aead_capabilities(struct rte_cryptodev_capabilities *scap, + struct virtchnl_algo_cap *acap) +{ + struct rte_cryptodev_symmetric_capability *capability = &scap->sym; + + scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + + capability->xform_type = RTE_CRYPTO_SYM_XFORM_AEAD; + + capability->aead.algo = aead_maptbl[acap->algo_type]; + + capability->aead.block_size = acap->block_size; + + capability->aead.key_size.min = acap->min_key_size; + capability->aead.key_size.max = acap->max_key_size; + capability->aead.key_size.increment = acap->inc_key_size; + + capability->aead.aad_size.min = acap->min_aad_size; + capability->aead.aad_size.max = acap->max_aad_size; + capability->aead.aad_size.increment = acap->inc_aad_size; + + capability->aead.iv_size.min = acap->min_iv_size; + capability->aead.iv_size.max = acap->max_iv_size; + capability->aead.iv_size.increment = acap->inc_iv_size; + + capability->aead.digest_size.min = acap->min_digest_size; + capability->aead.digest_size.max = acap->max_digest_size; + capability->aead.digest_size.increment = acap->inc_digest_size; +} + +/** + * Dynamically set crypto capabilities based on virtchannel IPsec + * capabilities structure. + */ +int +iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx + *iavf_sctx, struct virtchnl_ipsec_cap *vch_cap) +{ + struct rte_cryptodev_capabilities *capabilities; + int i, j, number_of_capabilities = 0, ci = 0; + + /* Count the total number of crypto algorithms supported */ + for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) + number_of_capabilities += vch_cap->cap[i].algo_cap_num; + + /** + * Allocate cryptodev capabilities structure for + * *number_of_capabilities* items plus one item to null terminate the + * array + */ + capabilities = rte_zmalloc("crypto_cap", + sizeof(struct rte_cryptodev_capabilities) * + (number_of_capabilities + 1), 0); + capabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED; + + /** + * Iterate over each virtchl crypto capability by crypto type and + * algorithm. + */ + for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) { + for (j = 0; j < vch_cap->cap[i].algo_cap_num; j++, ci++) { + switch (vch_cap->cap[i].crypto_type) { + case VIRTCHNL_AUTH: + update_auth_capabilities(&capabilities[ci], + &vch_cap->cap[i].algo_cap_list[j]); + break; + case VIRTCHNL_CIPHER: + update_cipher_capabilities(&capabilities[ci], + &vch_cap->cap[i].algo_cap_list[j]); + break; + case VIRTCHNL_AEAD: + update_aead_capabilities(&capabilities[ci], + &vch_cap->cap[i].algo_cap_list[j]); + break; + default: + capabilities[ci].op = + RTE_CRYPTO_OP_TYPE_UNDEFINED; + break; + } + } + } + + iavf_sctx->crypto_capabilities = capabilities; + return 0; +} + +/** + * Get security capabilities for device + */ +static const struct rte_security_capability * +iavf_ipsec_crypto_capabilities_get(void *device) +{ + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device; + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private); + struct iavf_security_ctx *iavf_sctx = adapter->security_ctx; + unsigned int i; + + static struct rte_security_capability iavf_security_capabilities[] = { + { /* IPsec Inline Crypto ESP Tunnel Egress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + .ipsec = { + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, + .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS, + .options = { .udp_encap = 1, + .stats = 1, .esn = 1 }, + }, + .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA + }, + { /* IPsec Inline Crypto ESP Tunnel Ingress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + .ipsec = { + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, + .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, + .options = { .udp_encap = 1, + .stats = 1, .esn = 1 }, + }, + .ol_flags = 0 + }, + { /* IPsec Inline Crypto ESP Transport Egress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + .ipsec = { + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, + .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS, + .options = { .udp_encap = 1, .stats = 1, + .esn = 1 }, + }, + .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA + }, + { /* IPsec Inline Crypto ESP Transport Ingress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + .ipsec = { + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, + .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, + .options = { .udp_encap = 1, .stats = 1, + .esn = 1 } + }, + .ol_flags = 0 + }, + { + .action = RTE_SECURITY_ACTION_TYPE_NONE + } + }; + + /** + * Update the security capabilities struct with the runtime discovered + * crypto capabilities, except for last element of the array which is + * the null terminatation + */ + for (i = 0; i < ((sizeof(iavf_security_capabilities) / + sizeof(iavf_security_capabilities[0])) - 1); i++) { + iavf_security_capabilities[i].crypto_capabilities = + iavf_sctx->crypto_capabilities; + } + + return iavf_security_capabilities; +} + +static struct rte_security_ops iavf_ipsec_crypto_ops = { + .session_get_size = iavf_ipsec_crypto_session_size_get, + .session_create = iavf_ipsec_crypto_session_create, + .session_update = iavf_ipsec_crypto_session_update, + .session_stats_get = iavf_ipsec_crypto_session_stats_get, + .session_destroy = iavf_ipsec_crypto_session_destroy, + .set_pkt_metadata = iavf_ipsec_crypto_pkt_metadata_set, + .get_userdata = NULL, + .capabilities_get = iavf_ipsec_crypto_capabilities_get, +}; + +int +iavf_security_ctx_create(struct iavf_adapter *adapter) +{ + struct rte_security_ctx *sctx; + + sctx = rte_malloc("security_ctx", sizeof(struct rte_security_ctx), 0); + if (sctx == NULL) + return -ENOMEM; + + sctx->device = adapter->vf.eth_dev; + sctx->ops = &iavf_ipsec_crypto_ops; + sctx->sess_cnt = 0; + + adapter->vf.eth_dev->security_ctx = sctx; + + if (adapter->security_ctx == NULL) { + adapter->security_ctx = rte_malloc("iavf_security_ctx", + sizeof(struct iavf_security_ctx), 0); + if (adapter->security_ctx == NULL) + return -ENOMEM; + } + + return 0; +} + +int +iavf_security_init(struct iavf_adapter *adapter) +{ + struct iavf_security_ctx *iavf_sctx = adapter->security_ctx; + struct rte_mbuf_dynfield pkt_md_dynfield = { + .name = "iavf_ipsec_crypto_pkt_metadata", + .size = sizeof(struct iavf_ipsec_crypto_pkt_metadata), + .align = __alignof__(struct iavf_ipsec_crypto_pkt_metadata) + }; + struct virtchnl_ipsec_cap capabilities; + int rc; + + iavf_sctx->adapter = adapter; + + iavf_sctx->pkt_md_offset = rte_mbuf_dynfield_register(&pkt_md_dynfield); + if (iavf_sctx->pkt_md_offset < 0) + return iavf_sctx->pkt_md_offset; + + /* Get device capabilities from Inline IPsec driver over PF-VF comms */ + rc = iavf_ipsec_crypto_device_capabilities_get(adapter, &capabilities); + if (rc) + return rc; + + return iavf_ipsec_crypto_set_security_capabililites(iavf_sctx, + &capabilities); +} + +int +iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter) +{ + struct iavf_security_ctx *iavf_sctx = adapter->security_ctx; + + return iavf_sctx->pkt_md_offset; +} + +int +iavf_security_ctx_destroy(struct iavf_adapter *adapter) +{ + struct rte_security_ctx *sctx = adapter->vf.eth_dev->security_ctx; + struct iavf_security_ctx *iavf_sctx = adapter->security_ctx; + + if (iavf_sctx == NULL) + return -ENODEV; + + /* TODO: Add resources cleanup */ + + /* free and reset security data structures */ + rte_free(iavf_sctx); + rte_free(sctx); + + iavf_sctx = NULL; + sctx = NULL; + + return 0; +} + +int +iavf_ipsec_crypto_supported(struct iavf_adapter *adapter) +{ + struct virtchnl_vf_resource *resources = adapter->vf.vf_res; + + /** Capability check for IPsec Crypto */ + if (resources && (resources->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)) + return true; + + return false; +} + +#define IAVF_IPSEC_INSET_ESP (\ + IAVF_INSET_ESP_SPI) + +#define IAVF_IPSEC_INSET_AH (\ + IAVF_INSET_AH_SPI) + +#define IAVF_IPSEC_INSET_IPV4_NATT_ESP (\ + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \ + IAVF_INSET_ESP_SPI) + +#define IAVF_IPSEC_INSET_IPV6_NATT_ESP (\ + IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \ + IAVF_INSET_ESP_SPI) + +enum iavf_ipsec_flow_pt_type { + IAVF_PATTERN_ESP = 1, + IAVF_PATTERN_AH, + IAVF_PATTERN_UDP_ESP, +}; +enum iavf_ipsec_flow_pt_ip_ver { + IAVF_PATTERN_IPV4 = 1, + IAVF_PATTERN_IPV6, +}; + +#define IAVF_PATTERN(t, ipt) ((void *)((t) | ((ipt) << 4))) +#define IAVF_PATTERN_TYPE(pt) ((pt) & 0x0F) +#define IAVF_PATTERN_IP_V(pt) ((pt) >> 4) + +static struct iavf_pattern_match_item iavf_ipsec_flow_pattern[] = { + {iavf_pattern_eth_ipv4_esp, IAVF_IPSEC_INSET_ESP, + IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV4)}, + {iavf_pattern_eth_ipv6_esp, IAVF_IPSEC_INSET_ESP, + IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV6)}, + {iavf_pattern_eth_ipv4_ah, IAVF_IPSEC_INSET_AH, + IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV4)}, + {iavf_pattern_eth_ipv6_ah, IAVF_IPSEC_INSET_AH, + IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV6)}, + {iavf_pattern_eth_ipv4_udp_esp, IAVF_IPSEC_INSET_IPV4_NATT_ESP, + IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV4)}, + {iavf_pattern_eth_ipv6_udp_esp, IAVF_IPSEC_INSET_IPV6_NATT_ESP, + IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV6)}, +}; + +struct iavf_ipsec_flow_item { + uint64_t id; + uint8_t is_ipv4; + uint32_t spi; + struct rte_ether_hdr eth_hdr; + union { + struct rte_ipv4_hdr ipv4_hdr; + struct rte_ipv6_hdr ipv6_hdr; + }; + struct rte_udp_hdr udp_hdr; +}; + +static void +parse_eth_item(const struct rte_flow_item_eth *item, + struct rte_ether_hdr *eth) +{ + memcpy(eth->src_addr.addr_bytes, + item->src.addr_bytes, sizeof(eth->src_addr)); + memcpy(eth->dst_addr.addr_bytes, + item->dst.addr_bytes, sizeof(eth->dst_addr)); +} + +static void +parse_ipv4_item(const struct rte_flow_item_ipv4 *item, + struct rte_ipv4_hdr *ipv4) +{ + ipv4->src_addr = item->hdr.src_addr; + ipv4->dst_addr = item->hdr.dst_addr; +} + +static void +parse_ipv6_item(const struct rte_flow_item_ipv6 *item, + struct rte_ipv6_hdr *ipv6) +{ + memcpy(ipv6->src_addr, item->hdr.src_addr, 16); + memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16); +} + +static void +parse_udp_item(const struct rte_flow_item_udp *item, struct rte_udp_hdr *udp) +{ + udp->dst_port = item->hdr.dst_port; + udp->src_port = item->hdr.src_port; +} + +static int +has_security_action(const struct rte_flow_action actions[], + const void **session) +{ + /* only {SECURITY; END} supported */ + if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY && + actions[1].type == RTE_FLOW_ACTION_TYPE_END) { + *session = actions[0].conf; + return true; + } + return false; +} + +static struct iavf_ipsec_flow_item * +iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + uint32_t type) +{ + const void *session; + struct iavf_ipsec_flow_item + *ipsec_flow = rte_malloc("security-flow-rule", + sizeof(struct iavf_ipsec_flow_item), 0); + enum iavf_ipsec_flow_pt_type p_type = IAVF_PATTERN_TYPE(type); + enum iavf_ipsec_flow_pt_ip_ver p_ip_type = IAVF_PATTERN_IP_V(type); + + if (ipsec_flow == NULL) + return NULL; + + ipsec_flow->is_ipv4 = (p_ip_type == IAVF_PATTERN_IPV4); + + if (pattern[0].spec) + parse_eth_item((const struct rte_flow_item_eth *) + pattern[0].spec, &ipsec_flow->eth_hdr); + + switch (p_type) { + case IAVF_PATTERN_ESP: + if (ipsec_flow->is_ipv4) { + parse_ipv4_item((const struct rte_flow_item_ipv4 *) + pattern[1].spec, + &ipsec_flow->ipv4_hdr); + } else { + parse_ipv6_item((const struct rte_flow_item_ipv6 *) + pattern[1].spec, + &ipsec_flow->ipv6_hdr); + } + ipsec_flow->spi = + ((const struct rte_flow_item_esp *) + pattern[2].spec)->hdr.spi; + break; + case IAVF_PATTERN_AH: + if (ipsec_flow->is_ipv4) { + parse_ipv4_item((const struct rte_flow_item_ipv4 *) + pattern[1].spec, + &ipsec_flow->ipv4_hdr); + } else { + parse_ipv6_item((const struct rte_flow_item_ipv6 *) + pattern[1].spec, + &ipsec_flow->ipv6_hdr); + } + ipsec_flow->spi = + ((const struct rte_flow_item_ah *) + pattern[2].spec)->spi; + break; + case IAVF_PATTERN_UDP_ESP: + if (ipsec_flow->is_ipv4) { + parse_ipv4_item((const struct rte_flow_item_ipv4 *) + pattern[1].spec, + &ipsec_flow->ipv4_hdr); + } else { + parse_ipv6_item((const struct rte_flow_item_ipv6 *) + pattern[1].spec, + &ipsec_flow->ipv6_hdr); + } + parse_udp_item((const struct rte_flow_item_udp *) + pattern[2].spec, + &ipsec_flow->udp_hdr); + ipsec_flow->spi = + ((const struct rte_flow_item_esp *) + pattern[3].spec)->hdr.spi; + break; + default: + goto flow_cleanup; + } + + if (!has_security_action(actions, &session)) + goto flow_cleanup; + + if (!iavf_ipsec_crypto_action_valid(ethdev, session, + ipsec_flow->spi)) + goto flow_cleanup; + + return ipsec_flow; + +flow_cleanup: + rte_free(ipsec_flow); + return NULL; +} + + +static struct iavf_flow_parser iavf_ipsec_flow_parser; + +static int +iavf_ipsec_flow_init(struct iavf_adapter *ad) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); + struct iavf_flow_parser *parser; + + if (!vf->vf_res) + return -EINVAL; + + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO) + parser = &iavf_ipsec_flow_parser; + else + return -ENOTSUP; + + return iavf_register_parser(parser, ad); +} + +static void +iavf_ipsec_flow_uninit(struct iavf_adapter *ad) +{ + iavf_unregister_parser(&iavf_ipsec_flow_parser, ad); +} + +static int +iavf_ipsec_flow_create(struct iavf_adapter *ad, + struct rte_flow *flow, + void *meta, + struct rte_flow_error *error) +{ + struct iavf_ipsec_flow_item *ipsec_flow = meta; + if (!ipsec_flow) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "NULL rule."); + return -rte_errno; + } + + if (ipsec_flow->is_ipv4) { + ipsec_flow->id = + iavf_ipsec_crypto_inbound_security_policy_add(ad, + ipsec_flow->spi, + 1, + ipsec_flow->ipv4_hdr.dst_addr, + NULL, + 0); + } else { + ipsec_flow->id = + iavf_ipsec_crypto_inbound_security_policy_add(ad, + ipsec_flow->spi, + 0, + 0, + ipsec_flow->ipv6_hdr.dst_addr, + 0); + } + + if (ipsec_flow->id < 1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to add SA."); + return -rte_errno; + } + + flow->rule = ipsec_flow; + + return 0; +} + +static int +iavf_ipsec_flow_destroy(struct iavf_adapter *ad, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct iavf_ipsec_flow_item *ipsec_flow = flow->rule; + if (!ipsec_flow) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "NULL rule."); + return -rte_errno; + } + + iavf_ipsec_crypto_security_policy_delete(ad, + ipsec_flow->is_ipv4, ipsec_flow->id); + rte_free(ipsec_flow); + return 0; +} + +static struct iavf_flow_engine iavf_ipsec_flow_engine = { + .init = iavf_ipsec_flow_init, + .uninit = iavf_ipsec_flow_uninit, + .create = iavf_ipsec_flow_create, + .destroy = iavf_ipsec_flow_destroy, + .type = IAVF_FLOW_ENGINE_IPSEC_CRYPTO, +}; + +static int +iavf_ipsec_flow_parse(struct iavf_adapter *ad, + struct iavf_pattern_match_item *array, + uint32_t array_len, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta, + struct rte_flow_error *error) +{ + struct iavf_pattern_match_item *item = NULL; + int ret = -1; + + item = iavf_search_pattern_match_item(pattern, array, array_len, error); + if (item && item->meta) { + uint32_t type = (uint64_t)(item->meta); + struct iavf_ipsec_flow_item *fi = + iavf_ipsec_flow_item_parse(ad->vf.eth_dev, + pattern, actions, type); + if (fi && meta) { + *meta = fi; + ret = 0; + } + } + return ret; +} + +static struct iavf_flow_parser iavf_ipsec_flow_parser = { + .engine = &iavf_ipsec_flow_engine, + .array = iavf_ipsec_flow_pattern, + .array_len = RTE_DIM(iavf_ipsec_flow_pattern), + .parse_pattern_action = iavf_ipsec_flow_parse, + .stage = IAVF_FLOW_STAGE_IPSEC_CRYPTO, +}; + +RTE_INIT(iavf_ipsec_flow_engine_register) +{ + iavf_register_flow_engine(&iavf_ipsec_flow_engine); +} diff --git a/drivers/net/iavf/iavf_ipsec_crypto.h b/drivers/net/iavf/iavf_ipsec_crypto.h new file mode 100644 index 0000000000..4e4c8798ec --- /dev/null +++ b/drivers/net/iavf/iavf_ipsec_crypto.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ + +#ifndef _IAVF_IPSEC_CRYPTO_H_ +#define _IAVF_IPSEC_CRYPTO_H_ + +#include + +#include "iavf.h" + + + +struct iavf_tx_ipsec_desc { + union { + struct { + __le64 qw0; + __le64 qw1; + }; + struct { + __le16 l4payload_length; + __le32 esn; + __le16 trailer_length; + u8 type:4; + u8 rsv:1; + u8 udp:1; + u8 ivlen:2; + u8 next_header; + __le16 ipv6_ext_hdr_length; + __le32 said; + } __rte_packed; + }; +} __rte_packed; + +#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT 0 +#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_MASK (0x3FFFULL << \ + IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) + +#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT 16 +#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_MASK (0xFFFFFFFFULL << \ + IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) + +#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT 48 +#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_MASK (0x3FULL << \ + IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT) + +#define IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT 5 +#define IAVF_IPSEC_TX_DESC_QW1_UDP_MASK (0x1ULL << \ + IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) + +#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT 6 +#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_MASK (0x3ULL << \ + IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) + +#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT 8 +#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_MASK (0xFFULL << \ + IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) + +#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT 16 +#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_MASK (0xFFULL << \ + IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT) + +#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT 32 +#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_MASK (0xFFFFFULL << \ + IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) + +/* Initialization Vector Length type */ +enum iavf_ipsec_iv_len { + IAVF_IPSEC_IV_LEN_NONE, /* No IV */ + IAVF_IPSEC_IV_LEN_DW, /* 4B IV */ + IAVF_IPSEC_IV_LEN_DDW, /* 8B IV */ + IAVF_IPSEC_IV_LEN_QDW, /* 16B IV */ +}; + + +/* IPsec Crypto Packet Metaday offload flags */ +#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN (0x1 << 0) +#define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN (0x1 << 1) +#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS (0x1 << 2) +#define IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT (0x1 << 3) + +/** + * Packet metadata data structure used to hold parameters required by the iAVF + * transmit data path. Parameters set for session by calling + * rte_security_set_pkt_metadata() API. + */ +struct iavf_ipsec_crypto_pkt_metadata { + uint32_t sa_idx; /* SA hardware index (20b/4B) */ + + uint8_t ol_flags; /* flags (1B) */ + uint8_t len_iv; /* IV length (2b/1B) */ + uint8_t ctx_desc_ipsec_params; /* IPsec params for ctx desc (7b/1B) */ + uint8_t esp_trailer_len; /* ESP trailer length (6b/1B) */ + + uint16_t l4_payload_len; /* L4 payload length */ + uint8_t ipv6_ext_hdrs_len; /* IPv6 extender headers len (5b/1B) */ + uint8_t next_proto; /* Next Protocol (8b/1B) */ + + uint32_t esn; /* Extended Sequence Number (32b/4B) */ +} __rte_packed; + +/** + * Inline IPsec Crypto offload is supported + */ +int +iavf_ipsec_crypto_supported(struct iavf_adapter *adapter); + +/** + * Create security context + */ +int iavf_security_ctx_create(struct iavf_adapter *adapter); + +/** + * Create security context + */ +int iavf_security_init(struct iavf_adapter *adapter); + +/** + * Set security capabilities + */ +int iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx + *iavf_sctx, struct virtchnl_ipsec_cap *virtchl_capabilities); + + +int iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter); + +/** + * Destroy security context + */ +int iavf_security_ctx_destroy(struct iavf_adapter *adapterv); + +/** + * Verify that the inline IPsec Crypto action is valid for this device + */ +uint32_t +iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev, + const struct rte_security_session *session, uint32_t spi); + +/** + * Add inbound security policy rule to hardware + */ +int +iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter, + uint32_t esp_spi, + uint8_t is_v4, + rte_be32_t v4_dst_addr, + uint8_t *v6_dst_addr, + uint8_t drop); + +/** + * Delete inbound security policy rule from hardware + */ +int +iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter, + uint8_t is_v4, uint32_t flow_id); + +int +iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter); + +#endif /* _IAVF_IPSEC_CRYPTO_H_ */ diff --git a/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h new file mode 100644 index 0000000000..70ce8dd638 --- /dev/null +++ b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h @@ -0,0 +1,383 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ + +#ifndef _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_ +#define _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_ + +static const struct rte_cryptodev_capabilities iavf_crypto_capabilities[] = { + { /* SHA1 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, + .block_size = 64, + .key_size = { + .min = 1, + .max = 64, + .increment = 1 + }, + .digest_size = { + .min = 20, + .max = 20, + .increment = 0 + }, + .iv_size = { 0 } + }, } + }, } + }, + { /* SHA256 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, + .block_size = 64, + .key_size = { + .min = 1, + .max = 64, + .increment = 1 + }, + .digest_size = { + .min = 32, + .max = 32, + .increment = 0 + }, + .iv_size = { 0 } + }, } + }, } + }, + { /* SHA384 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, + .block_size = 128, + .key_size = { + .min = 1, + .max = 128, + .increment = 1 + }, + .digest_size = { + .min = 48, + .max = 48, + .increment = 0 + }, + .iv_size = { 0 } + }, } + }, } + }, + { /* SHA512 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, + .block_size = 128, + .key_size = { + .min = 1, + .max = 128, + .increment = 1 + }, + .digest_size = { + .min = 64, + .max = 64, + .increment = 0 + }, + .iv_size = { 0 } + }, } + }, } + }, + { /* MD5 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_MD5_HMAC, + .block_size = 64, + .key_size = { + .min = 1, + .max = 64, + .increment = 1 + }, + .digest_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .iv_size = { 0 } + }, } + }, } + }, + { /* AES XCBC MAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC, + .block_size = 16, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .digest_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .aad_size = { 0 }, + .iv_size = { 0 } + }, } + }, } + }, + { /* AES GCM */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, + {.aead = { + .algo = RTE_CRYPTO_AEAD_AES_GCM, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .digest_size = { + .min = 8, + .max = 16, + .increment = 4 + }, + .aad_size = { + .min = 0, + .max = 240, + .increment = 1 + }, + .iv_size = { + .min = 8, + .max = 8, + .increment = 0 + }, + }, } + }, } + }, + { /* ChaCha20-Poly1305 */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, + {.aead = { + .algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305, + .block_size = 16, + .key_size = { + .min = 32, + .max = 32, + .increment = 0 + }, + .digest_size = { + .min = 8, + .max = 16, + .increment = 4 + }, + .aad_size = { + .min = 0, + .max = 240, + .increment = 1 + }, + .iv_size = { + .min = 12, + .max = 12, + .increment = 0 + }, + }, } + }, } + }, + { /* AES CCM */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, + {.aead = { + .algo = RTE_CRYPTO_AEAD_AES_CCM, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .digest_size = { + .min = 8, + .max = 16, + .increment = 4 + }, + .aad_size = { + .min = 0, + .max = 240, + .increment = 1 + }, + .iv_size = { + .min = 12, + .max = 12, + .increment = 0 + }, + }, } + }, } + }, + { /* AES GMAC (AUTH) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_AES_GMAC, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .digest_size = { + .min = 8, + .max = 16, + .increment = 4 + }, + .iv_size = { + .min = 12, + .max = 12, + .increment = 0 + } + }, } + }, } + }, + { /* AES CMAC (AUTH) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_AES_CMAC, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .digest_size = { + .min = 8, + .max = 16, + .increment = 4 + }, + .iv_size = { + .min = 12, + .max = 12, + .increment = 0 + } + }, } + }, } + }, + { /* AES CBC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_AES_CBC, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .iv_size = { + .min = 16, + .max = 16, + .increment = 0 + } + }, } + }, } + }, + { /* AES CTR */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_AES_CTR, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .iv_size = { + .min = 8, + .max = 8, + .increment = 0 + } + }, } + }, } + }, + { /* NULL (AUTH) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_NULL, + .block_size = 1, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .iv_size = { 0 } + }, }, + }, }, + }, + { /* NULL (CIPHER) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_NULL, + .block_size = 1, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .iv_size = { + .min = 0, + .max = 0, + .increment = 0 + } + }, }, + }, } + }, + { /* 3DES CBC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_3DES_CBC, + .block_size = 8, + .key_size = { + .min = 24, + .max = 24, + .increment = 0 + }, + .iv_size = { + .min = 8, + .max = 8, + .increment = 0 + } + }, } + }, } + }, + { + .op = RTE_CRYPTO_OP_TYPE_UNDEFINED, + } +}; + + +#endif /* _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_ */ diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index 11b7fea36f..28cc834caf 100644 --- a/drivers/net/iavf/iavf_rxtx.c +++ b/drivers/net/iavf/iavf_rxtx.c @@ -27,6 +27,7 @@ #include "iavf.h" #include "iavf_rxtx.h" +#include "iavf_ipsec_crypto.h" #include "rte_pmd_iavf.h" /* Offset of mbuf dynamic field for protocol extraction's metadata */ @@ -39,6 +40,7 @@ uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask; uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask; uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask; uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask; +uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask; uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t flex_type) @@ -51,6 +53,8 @@ iavf_proto_xtr_type_to_rxdid(uint8_t flex_type) [IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW, [IAVF_PROTO_XTR_TCP] = IAVF_RXDID_COMMS_AUX_TCP, [IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET, + [IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = + IAVF_RXDID_COMMS_IPSEC_CRYPTO, }; return flex_type < RTE_DIM(rxdid_map) ? @@ -508,6 +512,12 @@ iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid) rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_aux_v2; break; + case IAVF_RXDID_COMMS_IPSEC_CRYPTO: + rxq->xtr_ol_flag = + rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask; + rxq->rxd_to_pkt_fields = + iavf_rxd_to_pkt_fields_by_comms_aux_v2; + break; case IAVF_RXDID_COMMS_OVS_1: rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs; break; @@ -692,6 +702,8 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev, const struct rte_eth_txconf *tx_conf) { struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct iavf_tx_queue *txq; @@ -736,9 +748,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } - if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) { + if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) { struct virtchnl_vlan_supported_caps *insertion_support = - &vf->vlan_v2_caps.offloads.insertion_support; + &adapter->vf.vlan_v2_caps.offloads.insertion_support; uint32_t insertion_cap; if (insertion_support->outer) @@ -762,6 +774,10 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->offloads = offloads; txq->tx_deferred_start = tx_conf->tx_deferred_start; + if (iavf_ipsec_crypto_supported(adapter)) + txq->ipsec_crypto_pkt_md_offset = + iavf_security_get_pkt_md_offset(adapter); + /* Allocate software ring */ txq->sw_ring = rte_zmalloc_socket("iavf tx sw ring", @@ -1081,6 +1097,70 @@ iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb, #endif } +static inline void +iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb, + volatile union iavf_rx_flex_desc *rxdp) +{ + volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc = + (volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp; + + mb->dynfield1[0] = desc->ipsec_said & + IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK; + } + +static inline void +iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb, + volatile union iavf_rx_flex_desc *rxdp, + struct iavf_ipsec_crypto_stats *stats) +{ + uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1); + + if (status1 & BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) { + uint16_t ipsec_status; + + mb->ol_flags |= PKT_RX_SEC_OFFLOAD; + + ipsec_status = status1 & + IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK; + + + if (unlikely(ipsec_status != + IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) { + mb->ol_flags |= PKT_RX_SEC_OFFLOAD_FAILED; + + switch (ipsec_status) { + case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS: + stats->ierrors.sad_miss++; + break; + case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED: + stats->ierrors.not_processed++; + break; + case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL: + stats->ierrors.icv_check++; + break; + case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR: + stats->ierrors.ipsec_length++; + break; + case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR: + stats->ierrors.misc++; + break; +} + + stats->ierrors.count++; + return; + } + + stats->icount++; + stats->ibytes += rxdp->wb.pkt_len & 0x3FFF; + + if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO && + ipsec_status != + IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS) + iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp); + } +} + + /* Translate the rx descriptor status and error fields to pkt flags */ static inline uint64_t iavf_rxd_to_pkt_flags(uint64_t qword) @@ -1399,6 +1479,8 @@ iavf_recv_pkts_flex_rxd(void *rx_queue, rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M & rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; iavf_flex_rxd_to_vlan_tci(rxm, &rxd); + iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd, + &rxq->stats.ipsec_crypto); rxq->rxd_to_pkt_fields(rxq, rxm, &rxd); pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0); rxm->ol_flags |= pkt_flags; @@ -1541,6 +1623,8 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts, first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M & rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; iavf_flex_rxd_to_vlan_tci(first_seg, &rxd); + iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd, + &rxq->stats.ipsec_crypto); rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd); pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0); @@ -1779,6 +1863,8 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq) mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M & rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)]; iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]); + iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j], + &rxq->stats.ipsec_crypto); rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]); stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0); pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0); @@ -2091,6 +2177,18 @@ iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m) *field |= cmd; } +static inline void +iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field, + struct iavf_ipsec_crypto_pkt_metadata *ipsec_md) +{ + uint64_t ipsec_field = + (uint64_t)ipsec_md->ctx_desc_ipsec_params << + IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT; + + *field |= ipsec_field; +} + + static inline void iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0, const struct rte_mbuf *m) @@ -2123,15 +2221,19 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0, static inline uint16_t iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field, - struct rte_mbuf *m) + struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md) { uint64_t segmentation_field = 0; uint64_t total_length = 0; - total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len); + if (m->ol_flags & PKT_TX_SEC_OFFLOAD) { + total_length = ipsec_md->l4_payload_len; + } else { + total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len); - if (m->ol_flags & PKT_TX_TUNNEL_MASK) - total_length -= m->outer_l3_len; + if (m->ol_flags & PKT_TX_TUNNEL_MASK) + total_length -= m->outer_l3_len; + } #ifdef RTE_LIBRTE_IAVF_DEBUG_TX if (!m->l4_len || !m->tso_segsz) @@ -2160,7 +2262,8 @@ struct iavf_tx_context_desc_qws { static inline void iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc, - struct rte_mbuf *m, uint16_t *tlen) + struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md, + uint16_t *tlen) { volatile struct iavf_tx_context_desc_qws *desc_qws = (volatile struct iavf_tx_context_desc_qws *)desc; @@ -2172,8 +2275,13 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc, /* fill segmentation field */ if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) { + /* fill IPsec field */ + if (m->ol_flags & PKT_TX_SEC_OFFLOAD) + iavf_fill_ctx_desc_ipsec_field(&desc_qws->qw1, + ipsec_md); + *tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1, - m); + m, ipsec_md); } /* fill tunnelling field */ @@ -2187,6 +2295,38 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc, } +static inline void +iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc, + const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len) +{ + desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len << + IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) | + ((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) | + ((uint64_t)md->esp_trailer_len << + IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT)); + + desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx << + IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) | + ((uint64_t)md->next_proto << + IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) | + ((uint64_t)(md->len_iv & 0x3) << + IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) | + ((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ? + 1ULL : 0ULL) << + IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) | + (uint64_t)IAVF_TX_DESC_DTYPE_IPSEC); + + /** + * TODO: Pre-calculate this in the Session initialization + * + * Calculate IPsec length required in data descriptor func when TSO + * offload is enabled + */ + *ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) + + (md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ? + sizeof(struct rte_udp_hdr) : 0); +} + static inline void iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1, struct rte_mbuf *m) @@ -2298,6 +2438,17 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc *desc, } +static struct iavf_ipsec_crypto_pkt_metadata * +iavf_ipsec_crypto_get_pkt_metadata(const struct iavf_tx_queue *txq, + struct rte_mbuf *m) +{ + if (m->ol_flags & PKT_TX_SEC_OFFLOAD) + return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset, + struct iavf_ipsec_crypto_pkt_metadata *); + + return NULL; +} + /* TX function */ uint16_t iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) @@ -2326,7 +2477,9 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) for (idx = 0; idx < nb_pkts; idx++) { volatile struct iavf_tx_desc *ddesc; - uint16_t nb_desc_ctx; + struct iavf_ipsec_crypto_pkt_metadata *ipsec_md; + + uint16_t nb_desc_ctx, nb_desc_ipsec; uint16_t nb_desc_data, nb_desc_required; uint16_t tlen = 0, ipseclen = 0; uint64_t ddesc_template = 0; @@ -2336,16 +2489,23 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf); + /** + * Get metadata for ipsec crypto from mbuf dynamic fields if + * security offload is specified. + */ + ipsec_md = iavf_ipsec_crypto_get_pkt_metadata(txq, mb); + nb_desc_data = mb->nb_segs; nb_desc_ctx = !!(mb->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK)); + nb_desc_ipsec = !!(mb->ol_flags & PKT_TX_SEC_OFFLOAD); /** * The number of descriptors that must be allocated for * a packet equals to the number of the segments of that * packet plus the context and ipsec descriptors if needed. */ - nb_desc_required = nb_desc_data + nb_desc_ctx; + nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec; desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1); @@ -2396,7 +2556,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) txe->mbuf = NULL; } - iavf_fill_context_desc(ctx_desc, mb, &tlen); + iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen); IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx); txe->last_id = desc_idx_last; @@ -2404,7 +2564,27 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) txe = txn; } + if (nb_desc_ipsec) { + volatile struct iavf_tx_ipsec_desc *ipsec_desc = + (volatile struct iavf_tx_ipsec_desc *) + &txr[desc_idx]; + + txn = &txe_ring[txe->next_id]; + RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); + if (txe->mbuf) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; + } + + iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen); + + IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx); + + txe->last_id = desc_idx_last; + desc_idx = txe->next_id; + txe = txn; + } mb_seg = mb; diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h index d05a525ef9..500ffb2d06 100644 --- a/drivers/net/iavf/iavf_rxtx.h +++ b/drivers/net/iavf/iavf_rxtx.h @@ -25,7 +25,8 @@ #define IAVF_TX_NO_VECTOR_FLAGS ( \ DEV_TX_OFFLOAD_MULTI_SEGS | \ - DEV_TX_OFFLOAD_TCP_TSO) + DEV_TX_OFFLOAD_TCP_TSO | \ + DEV_TX_OFFLOAD_SECURITY) #define IAVF_TX_VECTOR_OFFLOAD ( \ DEV_TX_OFFLOAD_VLAN_INSERT | \ @@ -47,7 +48,7 @@ #define DEFAULT_TX_RS_THRESH 32 #define DEFAULT_TX_FREE_THRESH 32 -#define IAVF_MIN_TSO_MSS 88 +#define IAVF_MIN_TSO_MSS 256 #define IAVF_MAX_TSO_MSS 9668 #define IAVF_TSO_MAX_SEG UINT8_MAX #define IAVF_TX_MAX_MTU_SEG 8 @@ -65,7 +66,8 @@ PKT_TX_VLAN_PKT | \ PKT_TX_IP_CKSUM | \ PKT_TX_L4_MASK | \ - PKT_TX_TCP_SEG) + PKT_TX_TCP_SEG | \ + DEV_TX_OFFLOAD_SECURITY) #define IAVF_TX_OFFLOAD_NOTSUP_MASK \ (PKT_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK) @@ -163,6 +165,24 @@ struct iavf_txq_ops { void (*release_mbufs)(struct iavf_tx_queue *txq); }; +struct iavf_ipsec_crypto_stats { + uint64_t icount; + uint64_t ibytes; + struct { + uint64_t count; + uint64_t sad_miss; + uint64_t not_processed; + uint64_t icv_check; + uint64_t ipsec_length; + uint64_t misc; + } ierrors; +}; + +struct iavf_rx_queue_stats { + uint64_t reserved; + struct iavf_ipsec_crypto_stats ipsec_crypto; +}; + /* Structure associated with each Rx queue. */ struct iavf_rx_queue { struct rte_mempool *mp; /* mbuf pool to populate Rx ring */ @@ -211,6 +231,7 @@ struct iavf_rx_queue { /* flexible descriptor metadata extraction offload flag */ iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle flexible descriptor by RXDID */ + struct iavf_rx_queue_stats stats; uint64_t offloads; }; @@ -245,6 +266,7 @@ struct iavf_tx_queue { uint64_t offloads; uint16_t next_dd; /* next to set RS, for VPMD */ uint16_t next_rs; /* next to check DD, for VPMD */ + uint16_t ipsec_crypto_pkt_md_offset; bool q_set; /* if rx queue has been configured */ bool tx_deferred_start; /* don't start this queue in dev start */ @@ -347,6 +369,40 @@ struct iavf_32b_rx_flex_desc_comms_ovs { } flex_ts; }; +/* Rx Flex Descriptor + * RxDID Profile ID 24 Inline IPsec + * Flex-field 0: RSS hash lower 16-bits + * Flex-field 1: RSS hash upper 16-bits + * Flex-field 2: Flow ID lower 16-bits + * Flex-field 3: Flow ID upper 16-bits + * Flex-field 4: Inline IPsec SAID lower 16-bits + * Flex-field 5: Inline IPsec SAID upper 16-bits + */ +struct iavf_32b_rx_flex_desc_comms_ipsec { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le32 rss_hash; + + /* Qword 2 */ + __le16 status_error1; + u8 flexi_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le32 flow_id; + __le32 ipsec_said; +}; + /* Receive Flex Descriptor profile IDs: There are a total * of 64 profiles where profile IDs 0/1 are for legacy; and * profiles 2-63 are flex profiles that can be programmed @@ -366,6 +422,7 @@ enum iavf_rxdid { IAVF_RXDID_COMMS_AUX_TCP = 21, IAVF_RXDID_COMMS_OVS_1 = 22, IAVF_RXDID_COMMS_OVS_2 = 23, + IAVF_RXDID_COMMS_IPSEC_CRYPTO = 24, IAVF_RXDID_COMMS_AUX_IP_OFFSET = 25, IAVF_RXDID_LAST = 63, }; @@ -393,9 +450,13 @@ enum iavf_rx_flex_desc_status_error_0_bits { enum iavf_rx_flex_desc_status_error_1_bits { /* Note: These are predefined bit offsets */ - IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */ - IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4, - IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5, + /* Bits 3:0 are reserved for inline ipsec status */ + IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0, + IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1, + IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2, + IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3, + IAVF_RX_FLEX_DESC_STATUS1_NAT_S, + IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED, /* [10:6] reserved */ IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11, IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12, @@ -405,6 +466,23 @@ enum iavf_rx_flex_desc_status_error_1_bits { IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */ }; +#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK ( \ + BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0) | \ + BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1) | \ + BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2) | \ + BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3)) + +enum iavf_rx_flex_desc_ipsec_crypto_status { + IAVF_IPSEC_CRYPTO_STATUS_SUCCESS = 0, + IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS, + IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED, + IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL, + IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR, + /* Reserved */ + IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR = 0xF +}; + + #define IAVF_TXD_DATA_QW1_DTYPE_SHIFT (0) #define IAVF_TXD_DATA_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT) @@ -672,6 +750,9 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq, case IAVF_TX_DESC_DTYPE_CONTEXT: name = "Tx_context_desc"; break; + case IAVF_TX_DESC_DTYPE_IPSEC: + name = "Tx_IPsec_desc"; + break; default: name = "unknown_desc"; break; diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index da4654957a..4827313ee7 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -1774,3 +1774,32 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter) return 0; } + + + +int +iavf_ipsec_crypto_request(struct iavf_adapter *adapter, + uint8_t *msg, size_t msg_len, + uint8_t *resp_msg, size_t resp_msg_len) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct iavf_cmd_info args; + int err; + + args.ops = VIRTCHNL_OP_INLINE_IPSEC_CRYPTO; + args.in_args = msg; + args.in_args_size = msg_len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + err = iavf_execute_vf_cmd(adapter, &args, 1); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command %s", + "OP_INLINE_IPSEC_CRYPTO"); + return err; + } + + memcpy(resp_msg, args.out_buffer, resp_msg_len); + + return 0; +} diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build index 36a82e3faa..5eb230f687 100644 --- a/drivers/net/iavf/meson.build +++ b/drivers/net/iavf/meson.build @@ -5,7 +5,7 @@ cflags += ['-Wno-strict-aliasing'] includes += include_directories('../../common/iavf') -deps += ['common_iavf'] +deps += ['common_iavf', 'security', 'cryptodev'] sources = files( 'iavf_ethdev.c', @@ -15,6 +15,7 @@ sources = files( 'iavf_fdir.c', 'iavf_hash.c', 'iavf_tm.c', + 'iavf_ipsec_crypto.c', ) if arch_subdir == 'x86' diff --git a/drivers/net/iavf/rte_pmd_iavf.h b/drivers/net/iavf/rte_pmd_iavf.h index 3a045040f1..7426eb9be3 100644 --- a/drivers/net/iavf/rte_pmd_iavf.h +++ b/drivers/net/iavf/rte_pmd_iavf.h @@ -92,6 +92,7 @@ extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask; extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask; extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask; extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask; +extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask; /** * The mbuf dynamic field pointer for flexible descriptor's extraction metadata. diff --git a/drivers/net/iavf/version.map b/drivers/net/iavf/version.map index f3efe756cf..97f0f87311 100644 --- a/drivers/net/iavf/version.map +++ b/drivers/net/iavf/version.map @@ -13,4 +13,7 @@ EXPERIMENTAL { rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask; rte_pmd_ifd_dynflag_proto_xtr_tcp_mask; rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask; + + # added in 21.11 + rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask; }; From patchwork Tue Oct 19 09:23:42 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Radu Nicolau X-Patchwork-Id: 102157 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 401D9A0C43; Tue, 19 Oct 2021 11:37:07 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1C7E441125; Tue, 19 Oct 2021 11:36:45 +0200 (CEST) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by mails.dpdk.org (Postfix) with ESMTP id 8D2F44111A for ; Tue, 19 Oct 2021 11:36:41 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10141"; a="314669770" X-IronPort-AV: E=Sophos;i="5.85,384,1624345200"; d="scan'208";a="314669770" Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 Oct 2021 02:36:41 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.85,384,1624345200"; d="scan'208";a="661739920" Received: from silpixa00400884.ir.intel.com ([10.243.22.82]) by orsmga005.jf.intel.com with ESMTP; 19 Oct 2021 02:36:39 -0700 From: Radu Nicolau To: Jingjing Wu , Beilei Xing Cc: dev@dpdk.org, declan.doherty@intel.com, abhijit.sinha@intel.com, qi.z.zhang@intel.com, bruce.richardson@intel.com, konstantin.ananyev@intel.com, Radu Nicolau Date: Tue, 19 Oct 2021 10:23:42 +0100 Message-Id: <20211019092344.1299368-6-radu.nicolau@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20211019092344.1299368-1-radu.nicolau@intel.com> References: <20210909142428.750634-1-radu.nicolau@intel.com> <20211019092344.1299368-1-radu.nicolau@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v10 5/7] net/iavf: add xstats support for inline IPsec crypto X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add per queue counters for maintaining statistics for inline IPsec crypto offload, which can be retrieved through the rte_security_session_stats_get() with more detailed errors through the rte_ethdev xstats. Signed-off-by: Declan Doherty Signed-off-by: Radu Nicolau Acked-by: Jingjing Wu --- drivers/net/iavf/iavf.h | 21 ++++++++- drivers/net/iavf/iavf_ethdev.c | 84 ++++++++++++++++++++++++++++------ drivers/net/iavf/iavf_rxtx.h | 12 ----- 3 files changed, 89 insertions(+), 28 deletions(-) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index e98c42ba08..90a7344bd5 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -96,6 +96,25 @@ struct iavf_adapter; struct iavf_rx_queue; struct iavf_tx_queue; + +struct iavf_ipsec_crypto_stats { + uint64_t icount; + uint64_t ibytes; + struct { + uint64_t count; + uint64_t sad_miss; + uint64_t not_processed; + uint64_t icv_check; + uint64_t ipsec_length; + uint64_t misc; + } ierrors; +}; + +struct iavf_eth_xstats { + struct virtchnl_eth_stats eth_stats; + struct iavf_ipsec_crypto_stats ips_stats; +}; + /* Structure that defines a VSI, associated with a adapter. */ struct iavf_vsi { struct iavf_adapter *adapter; /* Backreference to associated adapter */ @@ -105,7 +124,7 @@ struct iavf_vsi { uint16_t max_macaddrs; /* Maximum number of MAC addresses */ uint16_t base_vector; uint16_t msix_intr; /* The MSIX interrupt binds to VSI */ - struct virtchnl_eth_stats eth_stats_offset; + struct iavf_eth_xstats eth_stats_offset; }; struct rte_flow; diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index ac66e383a6..25476965ab 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -90,6 +90,7 @@ static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev); static int iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static int iavf_dev_stats_reset(struct rte_eth_dev *dev); +static int iavf_dev_xstats_reset(struct rte_eth_dev *dev); static int iavf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned int n); static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev, @@ -145,21 +146,37 @@ struct rte_iavf_xstats_name_off { unsigned int offset; }; +#define _OFF_OF(a) offsetof(struct iavf_eth_xstats, a) static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = { - {"rx_bytes", offsetof(struct iavf_eth_stats, rx_bytes)}, - {"rx_unicast_packets", offsetof(struct iavf_eth_stats, rx_unicast)}, - {"rx_multicast_packets", offsetof(struct iavf_eth_stats, rx_multicast)}, - {"rx_broadcast_packets", offsetof(struct iavf_eth_stats, rx_broadcast)}, - {"rx_dropped_packets", offsetof(struct iavf_eth_stats, rx_discards)}, + {"rx_bytes", _OFF_OF(eth_stats.rx_bytes)}, + {"rx_unicast_packets", _OFF_OF(eth_stats.rx_unicast)}, + {"rx_multicast_packets", _OFF_OF(eth_stats.rx_multicast)}, + {"rx_broadcast_packets", _OFF_OF(eth_stats.rx_broadcast)}, + {"rx_dropped_packets", _OFF_OF(eth_stats.rx_discards)}, {"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats, rx_unknown_protocol)}, - {"tx_bytes", offsetof(struct iavf_eth_stats, tx_bytes)}, - {"tx_unicast_packets", offsetof(struct iavf_eth_stats, tx_unicast)}, - {"tx_multicast_packets", offsetof(struct iavf_eth_stats, tx_multicast)}, - {"tx_broadcast_packets", offsetof(struct iavf_eth_stats, tx_broadcast)}, - {"tx_dropped_packets", offsetof(struct iavf_eth_stats, tx_discards)}, - {"tx_error_packets", offsetof(struct iavf_eth_stats, tx_errors)}, + {"tx_bytes", _OFF_OF(eth_stats.tx_bytes)}, + {"tx_unicast_packets", _OFF_OF(eth_stats.tx_unicast)}, + {"tx_multicast_packets", _OFF_OF(eth_stats.tx_multicast)}, + {"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)}, + {"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)}, + {"tx_error_packets", _OFF_OF(eth_stats.tx_errors)}, + + {"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)}, + {"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)}, + {"inline_ipsec_crypto_ierrors", _OFF_OF(ips_stats.ierrors.count)}, + {"inline_ipsec_crypto_ierrors_sad_lookup", + _OFF_OF(ips_stats.ierrors.sad_miss)}, + {"inline_ipsec_crypto_ierrors_not_processed", + _OFF_OF(ips_stats.ierrors.not_processed)}, + {"inline_ipsec_crypto_ierrors_icv_fail", + _OFF_OF(ips_stats.ierrors.icv_check)}, + {"inline_ipsec_crypto_ierrors_length", + _OFF_OF(ips_stats.ierrors.ipsec_length)}, + {"inline_ipsec_crypto_ierrors_misc", + _OFF_OF(ips_stats.ierrors.misc)}, }; +#undef _OFF_OF #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \ sizeof(rte_iavf_stats_strings[0])) @@ -177,7 +194,7 @@ static const struct eth_dev_ops iavf_eth_dev_ops = { .stats_reset = iavf_dev_stats_reset, .xstats_get = iavf_dev_xstats_get, .xstats_get_names = iavf_dev_xstats_get_names, - .xstats_reset = iavf_dev_stats_reset, + .xstats_reset = iavf_dev_xstats_reset, .promiscuous_enable = iavf_dev_promiscuous_enable, .promiscuous_disable = iavf_dev_promiscuous_disable, .allmulticast_enable = iavf_dev_allmulticast_enable, @@ -1544,7 +1561,7 @@ iavf_stat_update_32(uint64_t *offset, uint64_t *stat) static void iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes) { - struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset; + struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset.eth_stats; iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes); iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast); @@ -1606,7 +1623,18 @@ iavf_dev_stats_reset(struct rte_eth_dev *dev) return ret; /* set stats offset base on current values */ - vsi->eth_stats_offset = *pstats; + vsi->eth_stats_offset.eth_stats = *pstats; + + return 0; +} + +static int +iavf_dev_xstats_reset(struct rte_eth_dev *dev) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + iavf_dev_stats_reset(dev); + memset(&vf->vsi.eth_stats_offset, 0, sizeof(struct iavf_eth_xstats)); return 0; } @@ -1626,6 +1654,27 @@ static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, return IAVF_NB_XSTATS; } +static void +iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev, + struct iavf_ipsec_crypto_stats *ips) +{ + uint16_t idx; + for (idx = 0; idx < ethdev->data->nb_rx_queues; idx++) { + struct iavf_rx_queue *rxq; + struct iavf_ipsec_crypto_stats *stats; + rxq = (struct iavf_rx_queue *)ethdev->data->rx_queues[idx]; + stats = &rxq->stats.ipsec_crypto; + ips->icount += stats->icount; + ips->ibytes += stats->ibytes; + ips->ierrors.count += stats->ierrors.count; + ips->ierrors.sad_miss += stats->ierrors.sad_miss; + ips->ierrors.not_processed += stats->ierrors.not_processed; + ips->ierrors.icv_check += stats->ierrors.icv_check; + ips->ierrors.ipsec_length += stats->ierrors.ipsec_length; + ips->ierrors.misc += stats->ierrors.misc; + } +} + static int iavf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned int n) { @@ -1636,6 +1685,7 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev, struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct iavf_vsi *vsi = &vf->vsi; struct virtchnl_eth_stats *pstats = NULL; + struct iavf_eth_xstats iavf_xtats = {0}; if (n < IAVF_NB_XSTATS) return IAVF_NB_XSTATS; @@ -1648,11 +1698,15 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev, return 0; iavf_update_stats(vsi, pstats); + iavf_xtats.eth_stats = *pstats; + + if (iavf_ipsec_crypto_supported(adapter)) + iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats); /* loop over xstats array and values from pstats */ for (i = 0; i < IAVF_NB_XSTATS; i++) { xstats[i].id = i; - xstats[i].value = *(uint64_t *)(((char *)pstats) + + xstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) + rte_iavf_stats_strings[i].offset); } diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h index 500ffb2d06..5e39d2bc96 100644 --- a/drivers/net/iavf/iavf_rxtx.h +++ b/drivers/net/iavf/iavf_rxtx.h @@ -165,18 +165,6 @@ struct iavf_txq_ops { void (*release_mbufs)(struct iavf_tx_queue *txq); }; -struct iavf_ipsec_crypto_stats { - uint64_t icount; - uint64_t ibytes; - struct { - uint64_t count; - uint64_t sad_miss; - uint64_t not_processed; - uint64_t icv_check; - uint64_t ipsec_length; - uint64_t misc; - } ierrors; -}; struct iavf_rx_queue_stats { uint64_t reserved; From patchwork Tue Oct 19 09:23:43 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Radu Nicolau X-Patchwork-Id: 102158 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 35A83A0C43; Tue, 19 Oct 2021 11:37:13 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2A1D941137; Tue, 19 Oct 2021 11:36:46 +0200 (CEST) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by mails.dpdk.org (Postfix) with ESMTP id 0063041125 for ; Tue, 19 Oct 2021 11:36:43 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10141"; a="314669774" X-IronPort-AV: E=Sophos;i="5.85,384,1624345200"; d="scan'208";a="314669774" Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 Oct 2021 02:36:43 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.85,384,1624345200"; d="scan'208";a="661739960" Received: from silpixa00400884.ir.intel.com ([10.243.22.82]) by orsmga005.jf.intel.com with ESMTP; 19 Oct 2021 02:36:41 -0700 From: Radu Nicolau To: Jingjing Wu , Beilei Xing Cc: dev@dpdk.org, declan.doherty@intel.com, abhijit.sinha@intel.com, qi.z.zhang@intel.com, bruce.richardson@intel.com, konstantin.ananyev@intel.com, Radu Nicolau Date: Tue, 19 Oct 2021 10:23:43 +0100 Message-Id: <20211019092344.1299368-7-radu.nicolau@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20211019092344.1299368-1-radu.nicolau@intel.com> References: <20210909142428.750634-1-radu.nicolau@intel.com> <20211019092344.1299368-1-radu.nicolau@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v10 6/7] net/iavf: add watchdog for VFLR X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add watchdog to iAVF PMD which support monitoring the VFLR register. If the device is not already in reset then if a VF reset in progress is detected then notfiy user through callback and set into reset state. If the device is already in reset then poll for completion of reset. The watchdog is disabled by default, to enable it set IAVF_DEV_WATCHDOG_PERIOD to a non zero value (microseconds) Signed-off-by: Declan Doherty Signed-off-by: Radu Nicolau Acked-by: Jingjing Wu --- drivers/net/iavf/iavf.h | 5 ++ drivers/net/iavf/iavf_ethdev.c | 94 ++++++++++++++++++++++++++++++++++ 2 files changed, 99 insertions(+) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 90a7344bd5..f06979b4da 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -31,6 +31,8 @@ #define IAVF_NUM_MACADDR_MAX 64 +#define IAVF_DEV_WATCHDOG_PERIOD 0 + #define IAVF_DEFAULT_RX_PTHRESH 8 #define IAVF_DEFAULT_RX_HTHRESH 8 #define IAVF_DEFAULT_RX_WTHRESH 0 @@ -216,6 +218,9 @@ struct iavf_info { int cmd_retval; /* return value of the cmd response from PF */ uint8_t *aq_resp; /* buffer to store the adminq response from PF */ + /** iAVF watchdog enable */ + bool watchdog_enabled; + /* Event from pf */ bool dev_closed; bool link_up; diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index 25476965ab..7221e342ad 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "iavf.h" #include "iavf_rxtx.h" @@ -240,6 +241,91 @@ iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused, return 0; } +__rte_unused +static int +iavf_vfr_inprogress(struct iavf_hw *hw) +{ + int inprogress = 0; + + if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) & + IAVF_VFGEN_RSTAT_VFR_STATE_MASK) == + VIRTCHNL_VFR_INPROGRESS) + inprogress = 1; + + if (inprogress) + PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress"); + + return inprogress; +} + +__rte_unused +static void +iavf_dev_watchdog(void *cb_arg) +{ + struct iavf_adapter *adapter = cb_arg; + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); + int vfr_inprogress = 0, rc = 0; + + /* check if watchdog has been disabled since last call */ + if (!adapter->vf.watchdog_enabled) + return; + + /* If in reset then poll vfr_inprogress register for completion */ + if (adapter->vf.vf_reset) { + vfr_inprogress = iavf_vfr_inprogress(hw); + + if (!vfr_inprogress) { + PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed", + adapter->vf.eth_dev->data->name); + adapter->vf.vf_reset = false; + } + /* If not in reset then poll vfr_inprogress register for VFLR event */ + } else { + vfr_inprogress = iavf_vfr_inprogress(hw); + + if (vfr_inprogress) { + PMD_DRV_LOG(INFO, + "VF \"%s\" reset event detected by watchdog", + adapter->vf.eth_dev->data->name); + + /* enter reset state with VFLR event */ + adapter->vf.vf_reset = true; + + rte_eth_dev_callback_process(adapter->vf.eth_dev, + RTE_ETH_EVENT_INTR_RESET, NULL); + } + } + + /* re-alarm watchdog */ + rc = rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD, + &iavf_dev_watchdog, cb_arg); + + if (rc) + PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm", + adapter->vf.eth_dev->data->name); +} + +static void +iavf_dev_watchdog_enable(struct iavf_adapter *adapter __rte_unused) +{ +#if (IAVF_DEV_WATCHDOG_PERIOD > 0) + PMD_DRV_LOG(INFO, "Enabling device watchdog"); + adapter->vf.watchdog_enabled = true; + if (rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD, + &iavf_dev_watchdog, (void *)adapter)) + PMD_DRV_LOG(ERR, "Failed to enabled device watchdog"); +#endif +} + +static void +iavf_dev_watchdog_disable(struct iavf_adapter *adapter __rte_unused) +{ +#if (IAVF_DEV_WATCHDOG_PERIOD > 0) + PMD_DRV_LOG(INFO, "Disabling device watchdog"); + adapter->vf.watchdog_enabled = false; +#endif +} + static int iavf_set_mc_addr_list(struct rte_eth_dev *dev, struct rte_ether_addr *mc_addrs, @@ -2481,6 +2567,11 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) iavf_default_rss_disable(adapter); + + /* Start device watchdog */ + iavf_dev_watchdog_enable(adapter); + + return 0; flow_init_err: @@ -2564,6 +2655,9 @@ iavf_dev_close(struct rte_eth_dev *dev) if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true)) vf->vf_reset = false; + /* disable watchdog */ + iavf_dev_watchdog_disable(adapter); + return ret; } From patchwork Tue Oct 19 09:23:44 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Radu Nicolau X-Patchwork-Id: 102159 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 90DC6A0C43; Tue, 19 Oct 2021 11:37:18 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 41A9D4113D; Tue, 19 Oct 2021 11:36:48 +0200 (CEST) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by mails.dpdk.org (Postfix) with ESMTP id 5CB474113D for ; Tue, 19 Oct 2021 11:36:46 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10141"; a="314669778" X-IronPort-AV: E=Sophos;i="5.85,384,1624345200"; d="scan'208";a="314669778" Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 Oct 2021 02:36:45 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.85,384,1624345200"; d="scan'208";a="661740016" Received: from silpixa00400884.ir.intel.com ([10.243.22.82]) by orsmga005.jf.intel.com with ESMTP; 19 Oct 2021 02:36:43 -0700 From: Radu Nicolau To: Jingjing Wu , Beilei Xing , Haiyue Wang Cc: dev@dpdk.org, declan.doherty@intel.com, abhijit.sinha@intel.com, qi.z.zhang@intel.com, bruce.richardson@intel.com, konstantin.ananyev@intel.com, Radu Nicolau Date: Tue, 19 Oct 2021 10:23:44 +0100 Message-Id: <20211019092344.1299368-8-radu.nicolau@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20211019092344.1299368-1-radu.nicolau@intel.com> References: <20210909142428.750634-1-radu.nicolau@intel.com> <20211019092344.1299368-1-radu.nicolau@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v10 7/7] net/iavf: update doc with inline crypto support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Update the PMD doc, feature matrix and release notes with the new inline crypto feature. Signed-off-by: Radu Nicolau --- doc/guides/nics/features/iavf.ini | 2 ++ doc/guides/nics/intel_vf.rst | 10 ++++++++++ doc/guides/rel_notes/release_21_11.rst | 1 + 3 files changed, 13 insertions(+) diff --git a/doc/guides/nics/features/iavf.ini b/doc/guides/nics/features/iavf.ini index d00ca934c3..78f649c25f 100644 --- a/doc/guides/nics/features/iavf.ini +++ b/doc/guides/nics/features/iavf.ini @@ -28,6 +28,7 @@ L4 checksum offload = P Packet type parsing = Y Rx descriptor status = Y Tx descriptor status = Y +Inline crypto = Y Basic stats = Y Multiprocess aware = Y FreeBSD = Y @@ -64,3 +65,4 @@ mark = Y passthru = Y queue = Y rss = Y +security = Y diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst index 2efdd1a41b..038e7c02b6 100644 --- a/doc/guides/nics/intel_vf.rst +++ b/doc/guides/nics/intel_vf.rst @@ -633,3 +633,13 @@ Windows Support * To load NetUIO driver, follow the steps mentioned in `dpdk-kmods repository `_. + + +Inline IPsec Support +-------------------- + +* IAVF PMD supports inline crypto processing depending on the underlying + hardware crypto capabilities. IPsec Security Gateway Sample Application + supports inline IPsec processing for IAVF PMD. For more details see the + IPsec Security Gateway Sample Application and Security library + documentation. diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst index bd6a388c9d..9f258d481d 100644 --- a/doc/guides/rel_notes/release_21_11.rst +++ b/doc/guides/rel_notes/release_21_11.rst @@ -112,6 +112,7 @@ New Features * Added Intel iavf support on Windows. * Added IPv4 and L4 (TCP/UDP/SCTP) checksum hash support in RSS flow. + * Added Intel iavf inline crypto support. * **Updated Intel ice driver.**