On 10/24/22 16:12, Junfeng Guo wrote:
> Add dev ops dev_supported_ptypes_get.
>
> Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
> Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
Again, the patch should go after datapath implementation to
make it reviewable.
[snip]
> diff --git a/doc/guides/nics/features/idpf.ini b/doc/guides/nics/features/idpf.ini
> index 30e1c0831e..a03068df85 100644
> --- a/doc/guides/nics/features/idpf.ini
> +++ b/doc/guides/nics/features/idpf.ini
> @@ -8,4 +8,5 @@
> ;
> [Features]
> Queue start/stop = Y
> +Packet type parsing = Y
It is false without datapath implementation.
> Linux = Y
[snip]
> diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
> index 298eaf0a1a..c3c4acb69f 100644
> --- a/drivers/net/idpf/idpf_rxtx.c
> +++ b/drivers/net/idpf/idpf_rxtx.c
> @@ -8,6 +8,25 @@
> #include "idpf_ethdev.h"
> #include "idpf_rxtx.h"
>
> +const uint32_t *
> +idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
> +{
> + static const uint32_t ptypes[] = {
> + RTE_PTYPE_L2_ETHER,
> + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
> + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
> + RTE_PTYPE_L4_FRAG,
> + RTE_PTYPE_L4_NONFRAG,
> + RTE_PTYPE_L4_UDP,
> + RTE_PTYPE_L4_TCP,
> + RTE_PTYPE_L4_SCTP,
> + RTE_PTYPE_L4_ICMP,
> + RTE_PTYPE_UNKNOWN
> + };
It looks like above array must not be static. It should be
dynamically built based on idpf_get_pkt_type().
> +
> + return ptypes;
> +}
> +
> static inline int
> check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
> {
[snip]
@@ -8,4 +8,5 @@
;
[Features]
Queue start/stop = Y
+Packet type parsing = Y
Linux = Y
@@ -57,6 +57,7 @@ idpf_dev_link_update(struct rte_eth_dev *dev,
}
static const struct eth_dev_ops idpf_eth_dev_ops = {
+ .dev_supported_ptypes_get = idpf_dev_supported_ptypes_get,
.dev_configure = idpf_dev_configure,
.dev_start = idpf_dev_start,
.dev_stop = idpf_dev_stop,
@@ -641,6 +642,12 @@ idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
goto err_api;
}
+ ret = idpf_get_pkt_type(adapter);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to set ptype table");
+ goto err_api;
+ }
+
adapter->caps = rte_zmalloc("idpf_caps",
sizeof(struct virtchnl2_get_capabilities), 0);
if (adapter->caps == NULL) {
@@ -47,6 +47,8 @@
#define IDPF_NUM_MACADDR_MAX 64
+#define IDPF_MAX_PKT_TYPE 1024
+
#define IDPF_VLAN_TAG_SIZE 4
#define IDPF_ETH_OVERHEAD \
(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IDPF_VLAN_TAG_SIZE * 2)
@@ -141,6 +143,8 @@ struct idpf_adapter {
uint32_t max_rxq_per_msg;
uint32_t max_txq_per_msg;
+ uint32_t ptype_tbl[IDPF_MAX_PKT_TYPE] __rte_cache_min_aligned;
+
bool stopped;
};
@@ -202,6 +206,7 @@ int idpf_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete);
void idpf_handle_virtchnl_msg(struct rte_eth_dev *dev);
int idpf_vc_check_api_version(struct idpf_adapter *adapter);
+int idpf_get_pkt_type(struct idpf_adapter *adapter);
int idpf_vc_get_caps(struct idpf_adapter *adapter);
int idpf_vc_create_vport(struct idpf_adapter *adapter);
int idpf_vc_destroy_vport(struct idpf_vport *vport);
@@ -213,6 +218,7 @@ int idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
bool rx, bool on);
int idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable);
int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable);
+int idpf_vc_query_ptype_info(struct idpf_adapter *adapter);
int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,
uint16_t buf_len, uint8_t *buf);
@@ -8,6 +8,25 @@
#include "idpf_ethdev.h"
#include "idpf_rxtx.h"
+const uint32_t *
+idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_NONFRAG,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ return ptypes;
+}
+
static inline int
check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
{
@@ -21,6 +21,10 @@
#define IDPF_DEFAULT_TX_RS_THRESH 32
#define IDPF_DEFAULT_TX_FREE_THRESH 32
+#define IDPF_GET_PTYPE_SIZE(p) \
+ (sizeof(struct virtchnl2_ptype) + \
+ (((p)->proto_id_count ? ((p)->proto_id_count - 1) : 0) * sizeof((p)->proto_id[0])))
+
struct idpf_rx_queue {
struct idpf_adapter *adapter; /* the adapter this queue belongs to */
struct rte_mempool *mp; /* mbuf pool to populate Rx ring */
@@ -137,4 +141,7 @@ int idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
void idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void idpf_stop_queues(struct rte_eth_dev *dev);
+
+const uint32_t *idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+
#endif /* _IDPF_RXTX_H_ */
@@ -238,6 +238,11 @@ idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info *args)
err = idpf_read_one_msg(adapter, args->ops, args->out_size, args->out_buffer);
clear_cmd(adapter);
break;
+ case VIRTCHNL2_OP_GET_PTYPE_INFO:
+ /* for multuple response message,
+ * do not handle the response here.
+ */
+ break;
default:
/* For other virtchnl ops in running time,
* wait for the cmd done flag.
@@ -286,6 +291,215 @@ idpf_vc_check_api_version(struct idpf_adapter *adapter)
return err;
}
+int __rte_cold
+idpf_get_pkt_type(struct idpf_adapter *adapter)
+{
+ struct virtchnl2_get_ptype_info *ptype_info;
+ uint16_t ptype_recvd = 0, ptype_offset, i, j;
+ int ret;
+
+ ret = idpf_vc_query_ptype_info(adapter);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Fail to query packet type information");
+ return ret;
+ }
+
+ ptype_info = rte_zmalloc("ptype_info", IDPF_DFLT_MBX_BUF_SIZE, 0);
+ if (ptype_info == NULL)
+ return -ENOMEM;
+
+ while (ptype_recvd < IDPF_MAX_PKT_TYPE) {
+ ret = idpf_read_one_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO,
+ IDPF_DFLT_MBX_BUF_SIZE, (u8 *)ptype_info);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Fail to get packet type information");
+ goto free_ptype_info;
+ }
+
+ ptype_recvd += ptype_info->num_ptypes;
+ ptype_offset = sizeof(struct virtchnl2_get_ptype_info) -
+ sizeof(struct virtchnl2_ptype);
+
+ for (i = 0; i < rte_cpu_to_le_16(ptype_info->num_ptypes); i++) {
+ bool is_inner = false, is_ip = false;
+ struct virtchnl2_ptype *ptype;
+ uint32_t proto_hdr = 0;
+
+ ptype = (struct virtchnl2_ptype *)
+ ((u8 *)ptype_info + ptype_offset);
+ ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
+ if (ptype_offset > IDPF_DFLT_MBX_BUF_SIZE) {
+ ret = -EINVAL;
+ goto free_ptype_info;
+ }
+
+ if (rte_cpu_to_le_16(ptype->ptype_id_10) == 0xFFFF)
+ goto free_ptype_info;
+
+ for (j = 0; j < ptype->proto_id_count; j++) {
+ switch (rte_cpu_to_le_16(ptype->proto_id[j])) {
+ case VIRTCHNL2_PROTO_HDR_GRE:
+ case VIRTCHNL2_PROTO_HDR_VXLAN:
+ proto_hdr &= ~RTE_PTYPE_L4_MASK;
+ proto_hdr |= RTE_PTYPE_TUNNEL_GRENAT;
+ is_inner = true;
+ break;
+ case VIRTCHNL2_PROTO_HDR_MAC:
+ if (is_inner) {
+ proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;
+ proto_hdr |= RTE_PTYPE_INNER_L2_ETHER;
+ } else {
+ proto_hdr &= ~RTE_PTYPE_L2_MASK;
+ proto_hdr |= RTE_PTYPE_L2_ETHER;
+ }
+ break;
+ case VIRTCHNL2_PROTO_HDR_VLAN:
+ if (is_inner) {
+ proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;
+ proto_hdr |= RTE_PTYPE_INNER_L2_ETHER_VLAN;
+ }
+ break;
+ case VIRTCHNL2_PROTO_HDR_PTP:
+ proto_hdr &= ~RTE_PTYPE_L2_MASK;
+ proto_hdr |= RTE_PTYPE_L2_ETHER_TIMESYNC;
+ break;
+ case VIRTCHNL2_PROTO_HDR_LLDP:
+ proto_hdr &= ~RTE_PTYPE_L2_MASK;
+ proto_hdr |= RTE_PTYPE_L2_ETHER_LLDP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_ARP:
+ proto_hdr &= ~RTE_PTYPE_L2_MASK;
+ proto_hdr |= RTE_PTYPE_L2_ETHER_ARP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_PPPOE:
+ proto_hdr &= ~RTE_PTYPE_L2_MASK;
+ proto_hdr |= RTE_PTYPE_L2_ETHER_PPPOE;
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV4:
+ if (!is_ip) {
+ proto_hdr |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ is_ip = true;
+ } else {
+ proto_hdr |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP;
+ is_inner = true;
+ }
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV6:
+ if (!is_ip) {
+ proto_hdr |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ is_ip = true;
+ } else {
+ proto_hdr |= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP;
+ is_inner = true;
+ }
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
+ case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
+ if (is_inner)
+ proto_hdr |= RTE_PTYPE_INNER_L4_FRAG;
+ else
+ proto_hdr |= RTE_PTYPE_L4_FRAG;
+ break;
+ case VIRTCHNL2_PROTO_HDR_UDP:
+ if (is_inner)
+ proto_hdr |= RTE_PTYPE_INNER_L4_UDP;
+ else
+ proto_hdr |= RTE_PTYPE_L4_UDP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_TCP:
+ if (is_inner)
+ proto_hdr |= RTE_PTYPE_INNER_L4_TCP;
+ else
+ proto_hdr |= RTE_PTYPE_L4_TCP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_SCTP:
+ if (is_inner)
+ proto_hdr |= RTE_PTYPE_INNER_L4_SCTP;
+ else
+ proto_hdr |= RTE_PTYPE_L4_SCTP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_ICMP:
+ if (is_inner)
+ proto_hdr |= RTE_PTYPE_INNER_L4_ICMP;
+ else
+ proto_hdr |= RTE_PTYPE_L4_ICMP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_ICMPV6:
+ if (is_inner)
+ proto_hdr |= RTE_PTYPE_INNER_L4_ICMP;
+ else
+ proto_hdr |= RTE_PTYPE_L4_ICMP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_L2TPV2:
+ case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
+ case VIRTCHNL2_PROTO_HDR_L2TPV3:
+ is_inner = true;
+ proto_hdr |= RTE_PTYPE_TUNNEL_L2TP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_NVGRE:
+ is_inner = true;
+ proto_hdr |= RTE_PTYPE_TUNNEL_NVGRE;
+ break;
+ case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
+ is_inner = true;
+ proto_hdr |= RTE_PTYPE_TUNNEL_GTPC;
+ break;
+ case VIRTCHNL2_PROTO_HDR_GTPU:
+ case VIRTCHNL2_PROTO_HDR_GTPU_UL:
+ case VIRTCHNL2_PROTO_HDR_GTPU_DL:
+ is_inner = true;
+ proto_hdr |= RTE_PTYPE_TUNNEL_GTPU;
+ break;
+ case VIRTCHNL2_PROTO_HDR_PAY:
+ case VIRTCHNL2_PROTO_HDR_IPV6_EH:
+ case VIRTCHNL2_PROTO_HDR_PRE_MAC:
+ case VIRTCHNL2_PROTO_HDR_POST_MAC:
+ case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
+ case VIRTCHNL2_PROTO_HDR_SVLAN:
+ case VIRTCHNL2_PROTO_HDR_CVLAN:
+ case VIRTCHNL2_PROTO_HDR_MPLS:
+ case VIRTCHNL2_PROTO_HDR_MMPLS:
+ case VIRTCHNL2_PROTO_HDR_CTRL:
+ case VIRTCHNL2_PROTO_HDR_ECP:
+ case VIRTCHNL2_PROTO_HDR_EAPOL:
+ case VIRTCHNL2_PROTO_HDR_PPPOD:
+ case VIRTCHNL2_PROTO_HDR_IGMP:
+ case VIRTCHNL2_PROTO_HDR_AH:
+ case VIRTCHNL2_PROTO_HDR_ESP:
+ case VIRTCHNL2_PROTO_HDR_IKE:
+ case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
+ case VIRTCHNL2_PROTO_HDR_GTP:
+ case VIRTCHNL2_PROTO_HDR_GTP_EH:
+ case VIRTCHNL2_PROTO_HDR_GTPCV2:
+ case VIRTCHNL2_PROTO_HDR_ECPRI:
+ case VIRTCHNL2_PROTO_HDR_VRRP:
+ case VIRTCHNL2_PROTO_HDR_OSPF:
+ case VIRTCHNL2_PROTO_HDR_TUN:
+ case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
+ case VIRTCHNL2_PROTO_HDR_GENEVE:
+ case VIRTCHNL2_PROTO_HDR_NSH:
+ case VIRTCHNL2_PROTO_HDR_QUIC:
+ case VIRTCHNL2_PROTO_HDR_PFCP:
+ case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
+ case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
+ case VIRTCHNL2_PROTO_HDR_RTP:
+ case VIRTCHNL2_PROTO_HDR_NO_PROTO:
+ default:
+ continue;
+ }
+ adapter->ptype_tbl[ptype->ptype_id_10] = proto_hdr;
+ }
+ }
+ }
+
+free_ptype_info:
+ rte_free(ptype_info);
+ clear_cmd(adapter);
+ return ret;
+}
+
int
idpf_vc_get_caps(struct idpf_adapter *adapter)
{
@@ -984,3 +1198,29 @@ idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable)
return err;
}
+
+int
+idpf_vc_query_ptype_info(struct idpf_adapter *adapter)
+{
+ struct virtchnl2_get_ptype_info *ptype_info;
+ struct idpf_cmd_info args;
+ int len, err;
+
+ len = sizeof(struct virtchnl2_get_ptype_info);
+ ptype_info = rte_zmalloc("ptype_info", len, 0);
+ if (ptype_info == NULL)
+ return -ENOMEM;
+
+ ptype_info->start_ptype_id = 0;
+ ptype_info->num_ptypes = IDPF_MAX_PKT_TYPE;
+ args.ops = VIRTCHNL2_OP_GET_PTYPE_INFO;
+ args.in_args = (u8 *)ptype_info;
+ args.in_args_size = len;
+
+ err = idpf_execute_vc_cmd(adapter, &args);
+ if (err != 0)
+ PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_PTYPE_INFO");
+
+ rte_free(ptype_info);
+ return err;
+}