@@ -981,6 +981,12 @@ F: drivers/net/sfc/
F: doc/guides/nics/sfc_efx.rst
F: doc/guides/nics/features/sfc.ini
+Linkdata sxe
+M: Jie Li <lijie@linkdatatechnology.com>
+F: drivers/net/sxe/
+F: doc/guides/nics/sxe.rst
+F: doc/guides/nics/features/sxe*.ini
+
Wangxun ngbe
M: Jiawen Wu <jiawenwu@trustnetic.com>
F: drivers/net/ngbe/
@@ -72,6 +72,9 @@ endif
if dpdk_conf.has('RTE_NET_DPAA')
deps += ['bus_dpaa', 'mempool_dpaa', 'net_dpaa']
endif
+if dpdk_conf.has('RTE_NET_SXE')
+ deps += 'net_sxe'
+endif
# Driver-specific commands are located in driver directories.
includes = include_directories('.')
new file mode 100644
@@ -0,0 +1,82 @@
+;
+; Supported features of the 'sxe' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities = Y
+Link speed configuration = Y
+Link status = Y
+Link status event = Y
+Rx interrupt = Y
+Queue start/stop = Y
+Power mgmt address monitor = Y
+MTU update = Y
+Scattered Rx = Y
+LRO = Y
+TSO = Y
+Promiscuous mode = Y
+Allmulticast mode = Y
+Unicast MAC filter = Y
+Multicast MAC filter = Y
+RSS hash = Y
+RSS key update = Y
+RSS reta update = Y
+VMDq = Y
+SR-IOV = Y
+DCB = Y
+VLAN filter = Y
+Flow control = Y
+Rate limitation = Y
+Traffic manager = Y
+Inline crypto = Y
+CRC offload = P
+VLAN offload = P
+QinQ offload = P
+L3 checksum offload = P
+L4 checksum offload = P
+MACsec offload = P
+Inner L3 checksum = P
+Inner L4 checksum = P
+Packet type parsing = Y
+Timesync = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
+Basic stats = Y
+Extended stats = Y
+Stats per queue = Y
+FW version = Y
+EEPROM dump = Y
+Module EEPROM dump = Y
+Registers dump = Y
+Multiprocess aware = Y
+FreeBSD = Y
+Linux = Y
+ARMv8 = Y
+LoongArch64 = Y
+rv64 = Y
+x86-32 = Y
+x86-64 = Y
+
+[rte_flow items]
+eth = P
+e_tag = Y
+fuzzy = Y
+ipv4 = Y
+ipv6 = Y
+nvgre = Y
+raw = Y
+sctp = Y
+tcp = Y
+udp = Y
+vlan = P
+vxlan = Y
+
+[rte_flow actions]
+drop = Y
+mark = Y
+pf = Y
+queue = Y
+rss = Y
+security = Y
+vf = Y
new file mode 100644
@@ -0,0 +1,39 @@
+;
+; Supported features of the 'sxe_vf' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Link status = Y
+Rx interrupt = Y
+Power mgmt address monitor = Y
+MTU update = Y
+Scattered Rx = Y
+LRO = Y
+TSO = Y
+Promiscuous mode = Y
+Allmulticast mode = Y
+Unicast MAC filter = Y
+RSS hash = Y
+RSS key update = Y
+RSS reta update = Y
+VLAN filter = Y
+Inline crypto = Y
+CRC offload = P
+VLAN offload = P
+QinQ offload = P
+L3 checksum offload = P
+L4 checksum offload = P
+Inner L3 checksum = P
+Inner L4 checksum = P
+Packet type parsing = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
+Basic stats = Y
+Extended stats = Y
+Registers dump = Y
+FreeBSD = Y
+Linux = Y
+ARMv8 = Y
+x86-32 = Y
+x86-64 = Y
@@ -69,3 +69,4 @@ Network Interface Controller Drivers
vhost
virtio
vmxnet3
+ sxe
new file mode 100644
@@ -0,0 +1,71 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright (C), 2022, Linkdata Technology Co., Ltd.
+
+SXE Poll Mode Driver
+======================
+
+The SXE PMD (librte_pmd_sxe) provides poll mode driver support
+for Linkdata 1160-2X 10GE Ethernet Adapter.
+
+Features
+--------
+- PXE boot
+- PTP(Precision Time Protocol)
+- VMDq(Virtual Machine Device Queues)
+- SR-IOV,max 2PF,63VF per PF
+- 128 L2 Ethernet MAC Address Filters (unicast and multicast)
+- 64 L2 VLAN filters
+- pldm over mctp over smbus
+- 802.1q VLAN
+- Low Latency Interrupts
+- LRO
+- Promiscuous mode
+- Multicast mode
+- Multiple queues for TX and RX
+- Receiver Side Scaling (RSS)
+- MAC/VLAN filtering
+- Packet type information
+- Checksum offload
+- VLAN/QinQ stripping and inserting
+- TSO offload
+- Port hardware statistics
+- Link state information
+- Link flow control
+- Interrupt mode for RX
+- Scattered and gather for TX and RX
+- DCB
+- IEEE 1588
+- FW version
+- Generic flow API
+
+Configuration
+-------------
+
+Dynamic Logging Parameters
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+One may leverage EAL option "--log-level" to change default levels
+for the log types supported by the driver. The option is used with
+an argument typically consisting of two parts separated by a colon.
+
+SXE PMD provides the following log types available for control:
+
+- ``pmd.net.sxe.drv`` (default level is **DEBUG**)
+
+ Affects driver-wide messages unrelated to any particular devices.
+
+- ``pmd.net.sxe.init`` (default level is **DEBUG**)
+
+ Extra logging of the messages during PMD initialization.
+
+- ``pmd.net.sxe.rx`` (default level is **DEBUG**)
+
+ Affects rx-wide messages.
+- ``pmd.net.sxe.tx`` (default level is **DEBUG**)
+
+ Affects tx-wide messages.
+------------------------------
+
+Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+for details.
+
@@ -62,6 +62,7 @@ drivers = [
'vhost',
'virtio',
'vmxnet3',
+ 'sxe',
]
std_deps = ['ethdev', 'kvargs'] # 'ethdev' also pulls in mbuf, net, eal etc
std_deps += ['bus_pci'] # very many PMDs depend on PCI, so make std
new file mode 100644
@@ -0,0 +1,119 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2016 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_sxe.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -DSXE_DPDK
+CFLAGS += -DSXE_HOST_DRIVER
+CFLAGS += -DSXE_DPDK_L4_FEATURES
+CFLAGS += -DSXE_DPDK_SRIOV
+CFLAGS += -DSXE_DPDK_FILTER_CTRL
+CFLAGS += -DSXE_DPDK_MACSEC
+CFLAGS += -DSXE_DPDK_TM
+CFLAGS += -DSXE_DPDK_SIMD
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_sxe_version.map
+
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+#
+# CFLAGS for icc
+#
+CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869
+CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259
+
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
+#
+# CFLAGS for clang
+#
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+
+else
+#
+# CFLAGS for gcc
+#
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+CFLAGS_BASE_DRIVER += -Wmissing-prototypes
+
+endif
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
+LDLIBS += -lrte_bus_pci
+LDLIBS += -lpthread
+
+#
+# Add extra flags for base driver files (also known as shared code)
+# to disable warnings in them
+#
+
+$(shell cp $(SRCDIR)/pf/* $(SRCDIR))
+$(shell cp $(SRCDIR)/vf/* $(SRCDIR))
+$(shell cp $(SRCDIR)/base/* $(SRCDIR))
+$(shell cp $(SRCDIR)/include/*.h $(SRCDIR))
+$(shell cp $(SRCDIR)/include/sxe/*.h $(SRCDIR))
+$(shell cp $(SRCDIR)/include/sxe/mgl/*.h $(SRCDIR))
+$(warning "file copy done")
+
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_testpmd.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_offload_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_queue_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_rx_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_tx_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_hw.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_dcb.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_flow_ctrl.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_irq.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_offload.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_pmd_hdc.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_ptp.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_queue.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_stats.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_vf.c
+
+ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_vec_neon.c
+else
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_vec_sse.c
+endif
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_filter_ctrl.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_fnav.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_macsec.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxe_tm.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_msg.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_irq.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_stats.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_queue.c
+SRCS-$(CONFIG_RTE_LIBRTE_SXE_PMD) += sxevf_offload.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_SXE_PMD)-include := rte_pmd_sxe.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_SXE_PMD)-include += sxe_dcb.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
+
new file mode 100644
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include <pthread.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "sxe_types.h"
+#include "sxe_common.h"
+
+#define SXE_TRACE_ID_COUNT_MASK 0x00000000000000FFLLU
+#define SXE_TRACE_ID_TID_MASK 0x0000000000FFFF00LLU
+#define SXE_TRACE_ID_TIME_MASK 0x00FFFFFFFF000000LLU
+#define SXE_TRACE_ID_FLAG 0xFF00000000000000LLU
+
+#define SXE_TRACE_ID_COUNT_SHIFT 0
+#define SXE_TRACE_ID_TID_SHIFT 8
+#define SXE_TRACE_ID_TIME_SHIFT 24
+
+#define SXE_SEC_TO_MS(sec) (sec * 1000ULL)
+#define SXE_SEC_TO_NS(sec) (sec * 1000000000ULL)
+
+#define SXE_USEC_PER_MS 1000
+
+u64 sxe_trace_id;
+
+u64 sxe_time_get_real_ms(void)
+{
+ u64 ms = 0;
+ struct timeval tv = { 0 };
+ s32 ret = gettimeofday(&tv, NULL);
+ if (ret < 0)
+ goto l_end;
+
+ ms = SXE_SEC_TO_MS(tv.tv_sec) + tv.tv_usec / SXE_USEC_PER_MS;
+
+l_end:
+ return ms;
+}
+
+u64 sxe_trace_id_gen(void)
+{
+ u64 tid = getpid() + (pthread_self() << 20);
+ u64 index = 0;
+ u64 timestamp = sxe_time_get_real_ms();
+
+ sxe_trace_id = (SXE_TRACE_ID_FLAG)
+ | ((timestamp << SXE_TRACE_ID_TIME_SHIFT) & SXE_TRACE_ID_TIME_MASK)
+ | ((tid << SXE_TRACE_ID_TID_SHIFT) & SXE_TRACE_ID_TID_MASK)
+ | ((index << SXE_TRACE_ID_COUNT_SHIFT) & SXE_TRACE_ID_COUNT_MASK);
+ return sxe_trace_id;
+}
+
+void sxe_trace_id_clean(void)
+{
+ sxe_trace_id = 0;
+}
+
+u64 sxe_trace_id_get(void)
+{
+ return sxe_trace_id++;
+}
new file mode 100644
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_DPDK_COMMON_H__
+#define __SXE_DPDK_COMMON_H__
+
+u64 sxe_trace_id_gen(void);
+
+void sxe_trace_id_clean(void);
+
+u64 sxe_trace_id_get(void);
+
+u64 sxe_time_get_real_ms(void);
+
+#endif
new file mode 100644
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_COMPAT_PLATFORM_H__
+#define __SXE_COMPAT_PLATFORM_H__
+
+#include <rte_cycles.h>
+#include <rte_branch_prediction.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_common.h>
+
+#include "sxe_types.h"
+
+#define false 0
+#define true 1
+
+#ifdef SXE_TEST
+#define STATIC
+#else
+#define static static
+#endif
+
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+
+#define __iomem
+#define __force
+
+#define min(a, b) RTE_MIN(a, b)
+
+#ifdef __has_attribute
+#if __has_attribute(__fallthrough__)
+# define fallthrough __attribute__((__fallthrough__))
+#else
+# define fallthrough do {} while (0)
+#endif
+#else
+# define fallthrough do {} while (0)
+#endif
+
+#define __swab32(_value) \
+ (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
+ (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
+#define __swab16(_value) \
+ (((u16)(_value) >> 8) | ((u16)(_value) << 8))
+
+#define cpu_to_be16(o) rte_cpu_to_be_16(o)
+#define cpu_to_be32(o) rte_cpu_to_be_32(o)
+#define cpu_to_be64(o) rte_cpu_to_be_64(o)
+#define cpu_to_le32(o) rte_cpu_to_le_32(o)
+#define be16_to_cpu(o) rte_be_to_cpu_16(o)
+#define be32_to_cpu(o) rte_be_to_cpu_32(o)
+#define be64_to_cpu(o) rte_be_to_cpu_64(o)
+#define le32_to_cpu(o) rte_le_to_cpu_32(o)
+
+#ifndef ntohs
+#define ntohs(o) be16_to_cpu(o)
+#endif
+
+#ifndef ntohl
+#define ntohl(o) be32_to_cpu(o)
+#endif
+
+#ifndef htons
+#define htons(o) cpu_to_be16(o)
+#endif
+
+#ifndef htonl
+#define htonl(o) cpu_to_be32(o)
+#endif
+#define mdelay rte_delay_ms
+#define udelay rte_delay_us
+#define usleep_range(min, max) rte_delay_us(min)
+#define msleep(x) rte_delay_us(x*1000)
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#define BIT(x) (1UL << (x))
+#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+
+#define NSEC_PER_SEC 1000000000L
+
+#define ETH_P_1588 0x88F7
+
+#define VLAN_PRIO_SHIFT 13
+
+static inline void
+set_bit(unsigned long nr, void *addr)
+{
+ int *m = ((int *)addr) + (nr >> 5);
+ *m |= 1 << (nr & 31);
+}
+
+static inline int
+test_bit(int nr, const void *addr)
+{
+ return (1UL & (((const int *)addr)[nr >> 5] >> (nr & 31))) != 0UL;
+}
+
+static inline void
+clear_bit(unsigned long nr, void *addr)
+{
+ int *m = ((int *)addr) + (nr >> 5);
+ *m &= ~(1 << (nr & 31));
+}
+
+static inline int
+test_and_clear_bit(unsigned long nr, void *addr)
+{
+ unsigned long mask = 1 << (nr & 0x1f);
+ int *m = ((int *)addr) + (nr >> 5);
+ int old = *m;
+
+ *m = old & ~mask;
+ return (old & mask) != 0;
+}
+
+static __rte_always_inline uint64_t
+readq(volatile void *addr)
+{
+ return rte_le_to_cpu_64(rte_read64(addr));
+}
+
+static __rte_always_inline void
+writeq(uint64_t value, volatile void *addr)
+{
+ rte_write64(rte_cpu_to_le_64(value), addr);
+}
+
+static inline u32 sxe_read_addr(const volatile void *addr)
+{
+ return rte_le_to_cpu_32(rte_read32(addr));
+}
+
+static inline void sxe_write_addr(u32 value, volatile void *addr)
+{
+ rte_write32((rte_cpu_to_le_32(value)), addr);
+}
+
+#endif
new file mode 100644
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_COMPAT_VERSION_H__
+#define __SXE_COMPAT_VERSION_H__
+
+#include <stdbool.h>
+#include "sxe_dpdk_version.h"
+
+struct rte_eth_dev;
+enum rte_eth_event_type;
+
+int sxe_eth_dev_callback_process(struct rte_eth_dev *dev,
+ enum rte_eth_event_type event, void *ret_param);
+
+#ifdef DPDK_19_11_6
+#define ETH_DEV_OPS_HAS_DESC_RELATE
+
+#define __rte_cold __attribute__((cold))
+
+#define ETH_SPEED_NUM_UNKNOWN UINT32_MAX
+#ifdef RTE_ARCH_ARM64
+#define RTE_ARCH_ARM
+#endif
+
+#else
+
+#define SET_AUTOFILL_QUEUE_XSTATS
+#define PCI_REG_WC_WRITE
+
+#endif
+
+#ifndef PCI_REG_WC_WRITE
+#define rte_write32_wc rte_write32
+#define rte_write32_wc_relaxed rte_write32_relaxed
+#endif
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+
+#define RTE_ETH_RSS_IPV4 ETH_RSS_IPV4
+#define RTE_ETH_RSS_NONFRAG_IPV4_TCP ETH_RSS_NONFRAG_IPV4_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV4_UDP ETH_RSS_NONFRAG_IPV4_UDP
+#define RTE_ETH_RSS_IPV6 ETH_RSS_IPV6
+#define RTE_ETH_RSS_NONFRAG_IPV6_TCP ETH_RSS_NONFRAG_IPV6_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV6_UDP ETH_RSS_NONFRAG_IPV6_UDP
+#define RTE_ETH_RSS_IPV6_EX ETH_RSS_IPV6_EX
+#define RTE_ETH_RSS_IPV6_TCP_EX ETH_RSS_IPV6_TCP_EX
+#define RTE_ETH_RSS_IPV6_UDP_EX ETH_RSS_IPV6_UDP_EX
+
+
+#define RTE_ETH_VLAN_TYPE_UNKNOWN ETH_VLAN_TYPE_UNKNOWN
+#define RTE_ETH_VLAN_TYPE_INNER ETH_VLAN_TYPE_INNER
+#define RTE_ETH_VLAN_TYPE_OUTER ETH_VLAN_TYPE_OUTER
+#define RTE_ETH_VLAN_TYPE_MAX ETH_VLAN_TYPE_MAX
+
+
+#define RTE_ETH_8_POOLS ETH_8_POOLS
+#define RTE_ETH_16_POOLS ETH_16_POOLS
+#define RTE_ETH_32_POOLS ETH_32_POOLS
+#define RTE_ETH_64_POOLS ETH_64_POOLS
+
+
+#define RTE_ETH_4_TCS ETH_4_TCS
+#define RTE_ETH_8_TCS ETH_8_TCS
+
+
+#define RTE_ETH_MQ_RX_NONE ETH_MQ_RX_NONE
+#define RTE_ETH_MQ_RX_RSS ETH_MQ_RX_RSS
+#define RTE_ETH_MQ_RX_DCB ETH_MQ_RX_DCB
+#define RTE_ETH_MQ_RX_DCB_RSS ETH_MQ_RX_DCB_RSS
+#define RTE_ETH_MQ_RX_VMDQ_ONLY ETH_MQ_RX_VMDQ_ONLY
+#define RTE_ETH_MQ_RX_VMDQ_RSS ETH_MQ_RX_VMDQ_RSS
+#define RTE_ETH_MQ_RX_VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
+#define RTE_ETH_MQ_RX_VMDQ_DCB_RSS ETH_MQ_RX_VMDQ_DCB_RSS
+
+
+#define RTE_ETH_MQ_TX_NONE ETH_MQ_TX_NONE
+#define RTE_ETH_MQ_TX_DCB ETH_MQ_TX_DCB
+#define RTE_ETH_MQ_TX_VMDQ_DCB ETH_MQ_TX_VMDQ_DCB
+#define RTE_ETH_MQ_TX_VMDQ_ONLY ETH_MQ_TX_VMDQ_ONLY
+
+
+#define RTE_ETH_FC_NONE RTE_FC_NONE
+#define RTE_ETH_FC_RX_PAUSE RTE_FC_RX_PAUSE
+#define RTE_ETH_FC_TX_PAUSE RTE_FC_TX_PAUSE
+#define RTE_ETH_FC_FULL RTE_FC_FULL
+
+
+#define RTE_ETH_MQ_RX_RSS_FLAG ETH_MQ_RX_RSS_FLAG
+#define RTE_ETH_MQ_RX_DCB_FLAG ETH_MQ_RX_DCB_FLAG
+#define RTE_ETH_MQ_RX_VMDQ_FLAG ETH_MQ_RX_VMDQ_FLAG
+
+
+#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP DEV_RX_OFFLOAD_VLAN_STRIP
+#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM DEV_RX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM DEV_RX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM DEV_RX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_LRO DEV_RX_OFFLOAD_TCP_LRO
+#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP DEV_RX_OFFLOAD_QINQ_STRIP
+#define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP DEV_RX_OFFLOAD_MACSEC_STRIP
+#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER DEV_RX_OFFLOAD_VLAN_FILTER
+#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND DEV_RX_OFFLOAD_VLAN_EXTEND
+#define RTE_ETH_RX_OFFLOAD_SCATTER DEV_RX_OFFLOAD_SCATTER
+#define RTE_ETH_RX_OFFLOAD_TIMESTAMP DEV_RX_OFFLOAD_TIMESTAMP
+#define RTE_ETH_RX_OFFLOAD_SECURITY DEV_RX_OFFLOAD_SECURITY
+#define RTE_ETH_RX_OFFLOAD_KEEP_CRC DEV_RX_OFFLOAD_KEEP_CRC
+#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM DEV_RX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM DEV_RX_OFFLOAD_OUTER_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_RSS_HASH DEV_RX_OFFLOAD_RSS_HASH
+
+
+#define RTE_ETH_TX_OFFLOAD_VLAN_INSERT DEV_TX_OFFLOAD_VLAN_INSERT
+#define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM DEV_TX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_UDP_CKSUM DEV_TX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_CKSUM DEV_TX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM DEV_TX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_TSO DEV_TX_OFFLOAD_TCP_TSO
+#define RTE_ETH_TX_OFFLOAD_UDP_TSO DEV_TX_OFFLOAD_UDP_TSO
+#define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_QINQ_INSERT DEV_TX_OFFLOAD_QINQ_INSERT
+#define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO DEV_TX_OFFLOAD_VXLAN_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO DEV_TX_OFFLOAD_GRE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO DEV_TX_OFFLOAD_IPIP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO DEV_TX_OFFLOAD_GENEVE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT DEV_TX_OFFLOAD_MACSEC_INSERT
+#define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE DEV_TX_OFFLOAD_MT_LOCKFREE
+#define RTE_ETH_TX_OFFLOAD_MULTI_SEGS DEV_TX_OFFLOAD_MULTI_SEGS
+#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE DEV_TX_OFFLOAD_MBUF_FAST_FREE
+#define RTE_ETH_TX_OFFLOAD_SECURITY DEV_TX_OFFLOAD_SECURITY
+#define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO DEV_TX_OFFLOAD_UDP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO DEV_TX_OFFLOAD_IP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM DEV_TX_OFFLOAD_OUTER_UDP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP
+
+
+#define RTE_ETH_LINK_SPEED_AUTONEG ETH_LINK_SPEED_AUTONEG
+#define RTE_ETH_LINK_SPEED_FIXED ETH_LINK_SPEED_FIXED
+#define RTE_ETH_LINK_SPEED_1G ETH_LINK_SPEED_1G
+#define RTE_ETH_LINK_SPEED_10G ETH_LINK_SPEED_10G
+
+#define RTE_ETH_SPEED_NUM_NONE ETH_SPEED_NUM_NONE
+#define RTE_ETH_SPEED_NUM_1G ETH_SPEED_NUM_1G
+#define RTE_ETH_SPEED_NUM_10G ETH_SPEED_NUM_10G
+#define RTE_ETH_SPEED_NUM_UNKNOWN ETH_SPEED_NUM_UNKNOWN
+
+
+#define RTE_ETH_LINK_HALF_DUPLEX ETH_LINK_HALF_DUPLEX
+#define RTE_ETH_LINK_FULL_DUPLEX ETH_LINK_FULL_DUPLEX
+#define RTE_ETH_LINK_DOWN ETH_LINK_DOWN
+#define RTE_ETH_LINK_UP ETH_LINK_UP
+
+
+#define RTE_ETH_RSS_RETA_SIZE_128 ETH_RSS_RETA_SIZE_128
+#define RTE_ETH_RETA_GROUP_SIZE RTE_RETA_GROUP_SIZE
+
+#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS ETH_VMDQ_MAX_VLAN_FILTERS
+#define RTE_ETH_DCB_NUM_USER_PRIORITIES ETH_DCB_NUM_USER_PRIORITIES
+#define RTE_ETH_VMDQ_DCB_NUM_QUEUES ETH_VMDQ_DCB_NUM_QUEUES
+#define RTE_ETH_DCB_NUM_QUEUES ETH_DCB_NUM_QUEUES
+
+#define RTE_ETH_DCB_PFC_SUPPORT ETH_DCB_PFC_SUPPORT
+
+
+#define RTE_ETH_VLAN_STRIP_OFFLOAD ETH_VLAN_STRIP_OFFLOAD
+#define RTE_ETH_VLAN_FILTER_OFFLOAD ETH_VLAN_FILTER_OFFLOAD
+#define RTE_ETH_VLAN_EXTEND_OFFLOAD ETH_VLAN_EXTEND_OFFLOAD
+#define RTE_ETH_QINQ_STRIP_OFFLOAD ETH_QINQ_STRIP_OFFLOAD
+
+#define RTE_ETH_VLAN_STRIP_MASK ETH_VLAN_STRIP_MASK
+#define RTE_ETH_VLAN_FILTER_MASK ETH_VLAN_FILTER_MASK
+#define RTE_ETH_VLAN_EXTEND_MASK ETH_VLAN_EXTEND_MASK
+#define RTE_ETH_QINQ_STRIP_MASK ETH_QINQ_STRIP_MASK
+#define RTE_ETH_VLAN_ID_MAX ETH_VLAN_ID_MAX
+
+
+#define RTE_ETH_NUM_RECEIVE_MAC_ADDR ETH_NUM_RECEIVE_MAC_ADDR
+#define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY ETH_VMDQ_NUM_UC_HASH_ARRAY
+
+#define RTE_ETH_VMDQ_ACCEPT_UNTAG ETH_VMDQ_ACCEPT_UNTAG
+#define RTE_ETH_VMDQ_ACCEPT_HASH_MC ETH_VMDQ_ACCEPT_HASH_MC
+#define RTE_ETH_VMDQ_ACCEPT_HASH_UC ETH_VMDQ_ACCEPT_HASH_UC
+#define RTE_ETH_VMDQ_ACCEPT_BROADCAST ETH_VMDQ_ACCEPT_BROADCAST
+#define RTE_ETH_VMDQ_ACCEPT_MULTICAST ETH_VMDQ_ACCEPT_MULTICAST
+
+#define RTE_VLAN_HLEN 4
+
+#define RTE_MBUF_F_RX_VLAN PKT_RX_VLAN
+#define RTE_MBUF_F_RX_RSS_HASH PKT_RX_RSS_HASH
+#define RTE_MBUF_F_RX_FDIR PKT_RX_FDIR
+#define RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD PKT_RX_EIP_CKSUM_BAD
+#define RTE_MBUF_F_RX_VLAN_STRIPPED PKT_RX_VLAN_STRIPPED
+#define RTE_MBUF_F_RX_IP_CKSUM_MASK PKT_RX_IP_CKSUM_MASK
+#define RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN PKT_RX_IP_CKSUM_UNKNOWN
+#define RTE_MBUF_F_RX_IP_CKSUM_BAD PKT_RX_IP_CKSUM_BAD
+#define RTE_MBUF_F_RX_IP_CKSUM_GOOD PKT_RX_IP_CKSUM_GOOD
+#define RTE_MBUF_F_RX_IP_CKSUM_NONE PKT_RX_IP_CKSUM_NONE
+#define RTE_MBUF_F_RX_L4_CKSUM_MASK PKT_RX_L4_CKSUM_MASK
+#define RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN PKT_RX_L4_CKSUM_UNKNOWN
+#define RTE_MBUF_F_RX_L4_CKSUM_BAD PKT_RX_L4_CKSUM_BAD
+#define RTE_MBUF_F_RX_L4_CKSUM_GOOD PKT_RX_L4_CKSUM_GOOD
+#define RTE_MBUF_F_RX_L4_CKSUM_NONE PKT_RX_L4_CKSUM_NONE
+#define RTE_MBUF_F_RX_IEEE1588_PTP PKT_RX_IEEE1588_PTP
+#define RTE_MBUF_F_RX_IEEE1588_TMST PKT_RX_IEEE1588_TMST
+#define RTE_MBUF_F_RX_FDIR_ID PKT_RX_FDIR_ID
+#define RTE_MBUF_F_RX_FDIR_FLX PKT_RX_FDIR_FLX
+#define RTE_MBUF_F_RX_QINQ_STRIPPED PKT_RX_QINQ_STRIPPED
+#define RTE_MBUF_F_RX_LRO PKT_RX_LRO
+#define RTE_MBUF_F_RX_SEC_OFFLOAD PKT_RX_SEC_OFFLOAD
+#define RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED PKT_RX_SEC_OFFLOAD_FAILED
+#define RTE_MBUF_F_RX_QINQ PKT_RX_QINQ
+
+#define RTE_MBUF_F_TX_SEC_OFFLOAD PKT_TX_SEC_OFFLOAD
+#define RTE_MBUF_F_TX_MACSEC PKT_TX_MACSEC
+#define RTE_MBUF_F_TX_QINQ PKT_TX_QINQ
+#define RTE_MBUF_F_TX_TCP_SEG PKT_TX_TCP_SEG
+#define RTE_MBUF_F_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#define RTE_MBUF_F_TX_L4_NO_CKSUM PKT_TX_L4_NO_CKSUM
+#define RTE_MBUF_F_TX_TCP_CKSUM PKT_TX_TCP_CKSUM
+#define RTE_MBUF_F_TX_SCTP_CKSUM PKT_TX_SCTP_CKSUM
+#define RTE_MBUF_F_TX_UDP_CKSUM PKT_TX_UDP_CKSUM
+#define RTE_MBUF_F_TX_L4_MASK PKT_TX_L4_MASK
+#define RTE_MBUF_F_TX_IP_CKSUM PKT_TX_IP_CKSUM
+#define RTE_MBUF_F_TX_IPV4 PKT_TX_IPV4
+#define RTE_MBUF_F_TX_IPV6 PKT_TX_IPV6
+#define RTE_MBUF_F_TX_VLAN PKT_TX_VLAN
+#define RTE_MBUF_F_TX_OUTER_IP_CKSUM PKT_TX_OUTER_IP_CKSUM
+#define RTE_MBUF_F_TX_OUTER_IPV4 PKT_TX_OUTER_IPV4
+#define RTE_MBUF_F_TX_OUTER_IPV6 PKT_TX_OUTER_IPV6
+
+#define RTE_MBUF_F_TX_OFFLOAD_MASK PKT_TX_OFFLOAD_MASK
+
+#define RTE_ETH_8_POOLS ETH_8_POOLS
+#define RTE_ETH_16_POOLS ETH_16_POOLS
+#define RTE_ETH_32_POOLS ETH_32_POOLS
+#define RTE_ETH_64_POOLS ETH_64_POOLS
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#define RTE_ETHDEV_DEBUG_RX
+#define RTE_ETHDEV_DEBUG_TX
+#endif
+
+#endif
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#define rte_eth_fdir_pballoc_type rte_fdir_pballoc_type
+#define rte_eth_fdir_conf rte_fdir_conf
+
+#define RTE_ETH_FDIR_PBALLOC_64K RTE_FDIR_PBALLOC_64K
+#define RTE_ETH_FDIR_PBALLOC_128K RTE_FDIR_PBALLOC_128K
+#define RTE_ETH_FDIR_PBALLOC_256K RTE_FDIR_PBALLOC_256K
+#endif
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+
+#define SXE_PCI_INTR_HANDLE(pci_dev) \
+ (&((pci_dev)->intr_handle))
+
+#define SXE_DEV_FNAV_CONF(dev) \
+ (&((dev)->data->dev_conf.fdir_conf))
+#define SXE_GET_FRAME_SIZE(dev) \
+ (dev->data->dev_conf.rxmode.max_rx_pkt_len)
+
+#elif defined DPDK_21_11_5
+#define SXE_PCI_INTR_HANDLE(pci_dev) \
+ ((pci_dev)->intr_handle)
+#define SXE_DEV_FNAV_CONF(dev) \
+ (&((dev)->data->dev_conf.fdir_conf))
+#define SXE_GET_FRAME_SIZE(dev) \
+ (dev->data->mtu + SXE_ETH_OVERHEAD)
+
+#else
+#define SXE_PCI_INTR_HANDLE(pci_dev) \
+ ((pci_dev)->intr_handle)
+#define SXE_DEV_FNAV_CONF(dev) \
+ (&((struct sxe_adapter *)(dev)->data->dev_private)->fnav_conf)
+#define RTE_ADAPTER_HAVE_FNAV_CONF
+#define SXE_GET_FRAME_SIZE(dev) \
+ (dev->data->mtu + SXE_ETH_OVERHEAD)
+
+#endif
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#define ETH_DEV_OPS_FILTER_CTRL
+#define DEV_RX_JUMBO_FRAME
+#define ETH_DEV_MIRROR_RULE
+#define ETH_DEV_RX_DESC_DONE
+#else
+#define ETH_DEV_OPS_MONITOR
+#endif
+
+#ifdef DPDK_22_11_3
+#define DEV_RX_OFFLOAD_CHECKSUM RTE_ETH_RX_OFFLOAD_CHECKSUM
+#endif
+
+#ifdef DPDK_22_11_3
+#define ETH_DCB_NUM_USER_PRIORITIES RTE_ETH_DCB_NUM_USER_PRIORITIES
+#endif
+
+#endif
new file mode 100644
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_DPDK_VERSION_H__
+#define __SXE_DPDK_VERSION_H__
+
+#include <rte_version.h>
+
+#if (RTE_VERSION >= RTE_VERSION_NUM(19, 0, 0, 0) && RTE_VERSION < RTE_VERSION_NUM(19, 12, 0, 0))
+ #define DPDK_19_11_6
+#elif (RTE_VERSION >= RTE_VERSION_NUM(20, 0, 0, 0) && RTE_VERSION < RTE_VERSION_NUM(20, 12, 0, 0))
+ #define DPDK_20_11_5
+#elif (RTE_VERSION >= RTE_VERSION_NUM(21, 0, 0, 0) && RTE_VERSION < RTE_VERSION_NUM(21, 12, 0, 0))
+ #define DPDK_21_11_5
+#elif (RTE_VERSION >= RTE_VERSION_NUM(22, 0, 0, 0) && RTE_VERSION < RTE_VERSION_NUM(22, 12, 0, 0))
+ #define DPDK_22_11_3
+#endif
+
+#endif
new file mode 100644
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_ERRNO_H__
+#define __SXE_ERRNO_H__
+
+#define SXE_ERR_MODULE_STANDARD 0
+#define SXE_ERR_MODULE_PF 1
+#define SXE_ERR_MODULE_VF 2
+#define SXE_ERR_MODULE_HDC 3
+
+#define SXE_ERR_MODULE_OFFSET 16
+#define SXE_ERR_MODULE(module, errcode) \
+ ((module << SXE_ERR_MODULE_OFFSET) | errcode)
+#define SXE_ERR_PF(errcode) SXE_ERR_MODULE(SXE_ERR_MODULE_PF, errcode)
+#define SXE_ERR_VF(errcode) SXE_ERR_MODULE(SXE_ERR_MODULE_VF, errcode)
+#define SXE_ERR_HDC(errcode) SXE_ERR_MODULE(SXE_ERR_MODULE_HDC, errcode)
+
+#define SXE_ERR_CONFIG EINVAL
+#define SXE_ERR_PARAM EINVAL
+#define SXE_ERR_RESET_FAILED EPERM
+#define SXE_ERR_NO_SPACE ENOSPC
+#define SXE_ERR_FNAV_CMD_INCOMPLETE EBUSY
+#define SXE_ERR_MBX_LOCK_FAIL EBUSY
+#define SXE_ERR_OPRATION_NOT_PERM EPERM
+#define SXE_ERR_LINK_STATUS_INVALID EINVAL
+#define SXE_ERR_LINK_SPEED_INVALID EINVAL
+#define SXE_ERR_DEVICE_NOT_SUPPORTED EOPNOTSUPP
+#define SXE_ERR_HDC_LOCK_BUSY EBUSY
+#define SXE_ERR_HDC_FW_OV_TIMEOUT ETIMEDOUT
+#define SXE_ERR_MDIO_CMD_TIMEOUT ETIMEDOUT
+#define SXE_ERR_INVALID_LINK_SETTINGS EINVAL
+#define SXE_ERR_FNAV_REINIT_FAILED EIO
+#define SXE_ERR_CLI_FAILED EIO
+#define SXE_ERR_MASTER_REQUESTS_PENDING SXE_ERR_PF(1)
+#define SXE_ERR_SFP_NO_INIT_SEQ_PRESENT SXE_ERR_PF(2)
+#define SXE_ERR_ENABLE_SRIOV_FAIL SXE_ERR_PF(3)
+#define SXE_ERR_IPSEC_SA_STATE_NOT_EXSIT SXE_ERR_PF(4)
+#define SXE_ERR_SFP_NOT_PERSENT SXE_ERR_PF(5)
+#define SXE_ERR_PHY_NOT_PERSENT SXE_ERR_PF(6)
+#define SXE_ERR_PHY_RESET_FAIL SXE_ERR_PF(7)
+#define SXE_ERR_FC_NOT_NEGOTIATED SXE_ERR_PF(8)
+#define SXE_ERR_SFF_NOT_SUPPORTED SXE_ERR_PF(9)
+
+#define SXEVF_ERR_MAC_ADDR_INVALID EINVAL
+#define SXEVF_ERR_RESET_FAILED EIO
+#define SXEVF_ERR_ARGUMENT_INVALID EINVAL
+#define SXEVF_ERR_NOT_READY EBUSY
+#define SXEVF_ERR_POLL_ACK_FAIL EIO
+#define SXEVF_ERR_POLL_MSG_FAIL EIO
+#define SXEVF_ERR_MBX_LOCK_FAIL EBUSY
+#define SXEVF_ERR_REPLY_INVALID EINVAL
+#define SXEVF_ERR_IRQ_NUM_INVALID EINVAL
+#define SXEVF_ERR_PARAM EINVAL
+#define SXEVF_ERR_MAILBOX_FAIL SXE_ERR_VF(1)
+#define SXEVF_ERR_MSG_HANDLE_ERR SXE_ERR_VF(2)
+#define SXEVF_ERR_DEVICE_NOT_SUPPORTED SXE_ERR_VF(3)
+#define SXEVF_ERR_IPSEC_SA_STATE_NOT_EXSIT SXE_ERR_VF(4)
+
+#endif
new file mode 100644
@@ -0,0 +1,6286 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifdef SXE_PHY_CONFIGURE
+#include <linux/mdio.h>
+#endif
+#if defined(__KERNEL__) || defined(SXE_KERNEL_TEST)
+#include "sxe_pci.h"
+#include "sxe_log.h"
+#include "sxe_debug.h"
+#include "sxe_host_hdc.h"
+#include "sxe_sriov.h"
+#include "sxe_compat.h"
+#else
+#include "sxe_errno.h"
+#include "sxe_logs.h"
+#include "sxe.h"
+
+#include "sxe_hw.h"
+#endif
+
+
+#define SXE_PFMSG_MASK (0xFF00)
+
+#define SXE_MSGID_MASK (0xFFFFFFFF)
+
+#define SXE_CTRL_MSG_MASK (0x700)
+
+#define SXE_RING_WAIT_LOOP 10
+#define SXE_REG_NAME_LEN 16
+#define SXE_DUMP_REG_STRING_LEN 73
+#define SXE_DUMP_REGS_NUM 64
+#define SXE_MAX_RX_DESC_POLL 10
+#define SXE_LPBK_EN 0x00000001
+#define SXE_MACADDR_LOW_4_BYTE 4
+#define SXE_MACADDR_HIGH_2_BYTE 2
+#define SXE_RSS_FIELD_MASK 0xffff0000
+#define SXE_MRQE_MASK 0x0000000f
+
+#define SXE_HDC_DATA_LEN_MAX 256
+
+#define SXE_8_TC_MSB (0x11111111)
+
+static u32 sxe_read_reg(struct sxe_hw *hw, u32 reg);
+static void sxe_write_reg(struct sxe_hw *hw, u32 reg, u32 value);
+static void sxe_write_reg64(struct sxe_hw *hw, u32 reg, u64 value);
+
+#define SXE_WRITE_REG_ARRAY_32(a, reg, offset, value) \
+ sxe_write_reg(a, reg + (offset << 2), value)
+#define SXE_READ_REG_ARRAY_32(a, reg, offset) \
+ sxe_read_reg(a, reg + (offset << 2))
+
+#define SXE_REG_READ(hw, addr) sxe_read_reg(hw, addr)
+#define SXE_REG_WRITE(hw, reg, value) sxe_write_reg(hw, reg, value)
+#define SXE_WRITE_FLUSH(a) sxe_read_reg(a, SXE_STATUS)
+#define SXE_REG_WRITE_ARRAY(hw, reg, offset, value) \
+ sxe_write_reg(hw, (reg) + ((offset) << 2), (value))
+
+#define SXE_SWAP_32(_value) __swab32((_value))
+
+#define SXE_REG_WRITE_BE32(a, reg, value) \
+ SXE_REG_WRITE((a), (reg), SXE_SWAP_32(ntohl(value)))
+
+#define SXE_SWAP_16(_value) __swab16((_value))
+
+#define SXE_REG64_WRITE(a, reg, value) sxe_write_reg64((a), (reg), (value))
+
+enum sxe_ipsec_table {
+ SXE_IPSEC_IP_TABLE = 0,
+ SXE_IPSEC_SPI_TABLE,
+ SXE_IPSEC_KEY_TABLE,
+};
+
+u32 mac_regs[] = {
+ SXE_COMCTRL,
+ SXE_PCCTRL,
+ SXE_LPBKCTRL,
+ SXE_MAXFS,
+ SXE_VLANCTRL,
+ SXE_VLANID,
+ SXE_LINKS,
+ SXE_HLREG0,
+ SXE_MFLCN,
+ SXE_MACC,
+};
+
+u16 sxe_mac_reg_num_get(void)
+{
+ return ARRAY_SIZE(mac_regs);
+}
+
+
+#ifndef SXE_DPDK
+
+void sxe_hw_fault_handle(struct sxe_hw *hw)
+{
+ struct sxe_adapter *adapter = hw->adapter;
+
+ if (test_bit(SXE_HW_FAULT, &hw->state))
+ return;
+
+ set_bit(SXE_HW_FAULT, &hw->state);
+
+ LOG_DEV_ERR("sxe nic hw fault\n");
+
+ if ((hw->fault_handle != NULL) && (hw->priv != NULL))
+ hw->fault_handle(hw->priv);
+}
+
+static u32 sxe_hw_fault_check(struct sxe_hw *hw, u32 reg)
+{
+ u32 i, value;
+ u8 __iomem *base_addr = hw->reg_base_addr;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ if (sxe_is_hw_fault(hw))
+ goto l_out;
+
+ for (i = 0; i < SXE_REG_READ_RETRY; i++) {
+ value = hw->reg_read(base_addr + SXE_STATUS);
+ if (value != SXE_REG_READ_FAIL)
+ break;
+
+ mdelay(3);
+ }
+
+ if (value == SXE_REG_READ_FAIL) {
+ LOG_ERROR_BDF("read registers multiple times failed, ret=%#x\n", value);
+ sxe_hw_fault_handle(hw);
+ } else
+ value = hw->reg_read(base_addr + reg);
+
+ return value;
+l_out:
+ return SXE_REG_READ_FAIL;
+}
+
+static u32 sxe_read_reg(struct sxe_hw *hw, u32 reg)
+{
+ u32 value;
+ u8 __iomem *base_addr = hw->reg_base_addr;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ if (sxe_is_hw_fault(hw)) {
+ value = SXE_REG_READ_FAIL;
+ goto l_ret;
+ }
+
+ value = hw->reg_read(base_addr + reg);
+ if (unlikely(value == SXE_REG_READ_FAIL)) {
+ LOG_ERROR_BDF("reg[0x%x] read failed, ret=%#x\n", reg, value);
+ value = sxe_hw_fault_check(hw, reg);
+ }
+
+l_ret:
+ return value;
+}
+
+static void sxe_write_reg(struct sxe_hw *hw, u32 reg, u32 value)
+{
+ u8 __iomem *base_addr = hw->reg_base_addr;
+
+ if (sxe_is_hw_fault(hw))
+ return;
+
+ hw->reg_write(value, base_addr + reg);
+
+}
+
+#else
+
+static u32 sxe_read_reg(struct sxe_hw *hw, u32 reg)
+{
+ u32 i, value;
+ u8 __iomem *base_addr = hw->reg_base_addr;
+
+ value = rte_le_to_cpu_32(rte_read32(base_addr + reg));
+ if (unlikely(value == SXE_REG_READ_FAIL)) {
+
+ value = rte_le_to_cpu_32(rte_read32(base_addr + SXE_STATUS));
+ if (unlikely(value != SXE_REG_READ_FAIL)) {
+
+ value = rte_le_to_cpu_32(rte_read32(base_addr + reg));
+ } else {
+ LOG_ERROR("reg[0x%x] and reg[0x%x] read failed, ret=%#x\n",
+ reg, SXE_STATUS, value);
+ for (i = 0; i < SXE_REG_READ_RETRY; i++) {
+
+ value = rte_le_to_cpu_32(rte_read32(base_addr + SXE_STATUS));
+ if (unlikely(value != SXE_REG_READ_FAIL)) {
+
+ value = rte_le_to_cpu_32(rte_read32(base_addr + reg));
+ LOG_INFO("reg[0x%x] read ok, value=%#x\n",
+ reg, value);
+ break;
+ }
+ LOG_ERROR("reg[0x%x] and reg[0x%x] read failed, ret=%#x\n",
+ reg, SXE_STATUS, value);
+
+ mdelay(3);
+ }
+ }
+ }
+
+ return value;
+}
+
+static void sxe_write_reg(struct sxe_hw *hw, u32 reg, u32 value)
+{
+ u8 __iomem *base_addr = hw->reg_base_addr;
+
+ rte_write32((rte_cpu_to_le_32(value)), (base_addr + reg));
+
+}
+#endif
+
+static void sxe_write_reg64(struct sxe_hw *hw, u32 reg, u64 value)
+{
+ u8 __iomem *reg_addr = hw->reg_base_addr;
+
+ if (sxe_is_hw_fault(hw))
+ return;
+
+ writeq(value, reg_addr + reg);
+
+}
+
+
+void sxe_hw_no_snoop_disable(struct sxe_hw *hw)
+{
+ u32 ctrl_ext;
+
+ ctrl_ext = SXE_REG_READ(hw, SXE_CTRL_EXT);
+ ctrl_ext |= SXE_CTRL_EXT_NS_DIS;
+ SXE_REG_WRITE(hw, SXE_CTRL_EXT, ctrl_ext);
+ SXE_WRITE_FLUSH(hw);
+
+}
+
+s32 sxe_hw_uc_addr_pool_enable(struct sxe_hw *hw,
+ u8 rar_idx, u8 pool_idx)
+{
+ s32 ret = 0;
+ u32 value;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ if (rar_idx > SXE_UC_ENTRY_NUM_MAX) {
+ ret = -SXE_ERR_PARAM;
+ LOG_DEV_ERR("pool_idx:%d rar_idx:%d invalid.\n",
+ pool_idx, rar_idx);
+ goto l_end;
+ }
+
+ if (pool_idx < 32) {
+ value = SXE_REG_READ(hw, SXE_MPSAR_LOW(rar_idx));
+ value |= BIT(pool_idx);
+ SXE_REG_WRITE(hw, SXE_MPSAR_LOW(rar_idx), value);
+ } else {
+ value = SXE_REG_READ(hw, SXE_MPSAR_HIGH(rar_idx));
+ value |= BIT(pool_idx - 32);
+ SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(rar_idx), value);
+ }
+
+l_end:
+ return ret;
+}
+
+static s32 sxe_hw_uc_addr_pool_disable(struct sxe_hw *hw, u8 rar_idx)
+{
+ u32 hi;
+ u32 low;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ hi = SXE_REG_READ(hw, SXE_MPSAR_HIGH(rar_idx));
+ low = SXE_REG_READ(hw, SXE_MPSAR_LOW(rar_idx));
+
+ if (sxe_is_hw_fault(hw))
+ goto l_end;
+
+ if (!hi & !low) {
+ LOG_DEBUG_BDF("no need clear rar-pool relation register.\n");
+ goto l_end;
+ }
+
+ if (low)
+ SXE_REG_WRITE(hw, SXE_MPSAR_LOW(rar_idx), 0);
+
+ if (hi)
+ SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(rar_idx), 0);
+
+
+l_end:
+ return 0;
+}
+
+s32 sxe_hw_nic_reset(struct sxe_hw *hw)
+{
+ s32 ret = 0;
+ u32 ctrl, i;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ ctrl = SXE_CTRL_RST;
+ ctrl |= SXE_REG_READ(hw, SXE_CTRL);
+ ctrl &= ~SXE_CTRL_GIO_DIS;
+ SXE_REG_WRITE(hw, SXE_CTRL, ctrl);
+
+ SXE_WRITE_FLUSH(hw);
+ usleep_range(1000, 1200);
+
+ for (i = 0; i < 10; i++) {
+ ctrl = SXE_REG_READ(hw, SXE_CTRL);
+ if (!(ctrl & SXE_CTRL_RST_MASK))
+ break;
+
+ udelay(1);
+ }
+
+ if (ctrl & SXE_CTRL_RST_MASK) {
+ ret = -SXE_ERR_RESET_FAILED;
+ LOG_DEV_ERR("reset polling failed to complete\n");
+ }
+
+ return ret;
+}
+
+void sxe_hw_pf_rst_done_set(struct sxe_hw *hw)
+{
+ u32 value;
+
+ value = SXE_REG_READ(hw, SXE_CTRL_EXT);
+ value |= SXE_CTRL_EXT_PFRSTD;
+ SXE_REG_WRITE(hw, SXE_CTRL_EXT, value);
+
+}
+
+static void sxe_hw_regs_flush(struct sxe_hw *hw)
+{
+ SXE_WRITE_FLUSH(hw);
+}
+
+static const struct sxe_reg_info sxe_reg_info_tbl[] = {
+
+ {SXE_CTRL, 1, 1, "CTRL"},
+ {SXE_STATUS, 1, 1, "STATUS"},
+ {SXE_CTRL_EXT, 1, 1, "CTRL_EXT"},
+
+ {SXE_EICR, 1, 1, "EICR"},
+
+ {SXE_SRRCTL(0), 16, 0x4, "SRRCTL"},
+ {SXE_RDH(0), 64, 0x40, "RDH"},
+ {SXE_RDT(0), 64, 0x40, "RDT"},
+ {SXE_RXDCTL(0), 64, 0x40, "RXDCTL"},
+ {SXE_RDBAL(0), 64, 0x40, "RDBAL"},
+ {SXE_RDBAH(0), 64, 0x40, "RDBAH"},
+
+ {SXE_TDBAL(0), 32, 0x40, "TDBAL"},
+ {SXE_TDBAH(0), 32, 0x40, "TDBAH"},
+ {SXE_TDLEN(0), 32, 0x40, "TDLEN"},
+ {SXE_TDH(0), 32, 0x40, "TDH"},
+ {SXE_TDT(0), 32, 0x40, "TDT"},
+ {SXE_TXDCTL(0), 32, 0x40, "TXDCTL"},
+
+ { .name = NULL }
+};
+
+static void sxe_hw_reg_print(struct sxe_hw *hw,
+ const struct sxe_reg_info *reginfo)
+{
+ u32 i, j;
+ s8 *value;
+ u32 first_reg_idx = 0;
+ u32 regs[SXE_DUMP_REGS_NUM];
+ s8 reg_name[SXE_REG_NAME_LEN];
+ s8 buf[SXE_DUMP_REG_STRING_LEN];
+ struct sxe_adapter *adapter = hw->adapter;
+
+ switch (reginfo->addr) {
+ case SXE_SRRCTL(0):
+ for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
+ regs[i] = SXE_REG_READ(hw, SXE_SRRCTL(i));
+
+ break;
+ case SXE_RDLEN(0):
+ for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
+ regs[i] = SXE_REG_READ(hw, SXE_RDLEN(i));
+
+ break;
+ case SXE_RDH(0):
+ for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
+ regs[i] = SXE_REG_READ(hw, SXE_RDH(i));
+
+ break;
+ case SXE_RDT(0):
+ for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
+ regs[i] = SXE_REG_READ(hw, SXE_RDT(i));
+
+ break;
+ case SXE_RXDCTL(0):
+ for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
+ regs[i] = SXE_REG_READ(hw, SXE_RXDCTL(i));
+
+ break;
+ case SXE_RDBAL(0):
+ for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
+ regs[i] = SXE_REG_READ(hw, SXE_RDBAL(i));
+
+ break;
+ case SXE_RDBAH(0):
+ for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
+ regs[i] = SXE_REG_READ(hw, SXE_RDBAH(i));
+
+ break;
+ case SXE_TDBAL(0):
+ for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
+ regs[i] = SXE_REG_READ(hw, SXE_TDBAL(i));
+
+ break;
+ case SXE_TDBAH(0):
+ for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
+ regs[i] = SXE_REG_READ(hw, SXE_TDBAH(i));
+
+ break;
+ case SXE_TDLEN(0):
+ for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
+ regs[i] = SXE_REG_READ(hw, SXE_TDLEN(i));
+
+ break;
+ case SXE_TDH(0):
+ for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
+ regs[i] = SXE_REG_READ(hw, SXE_TDH(i));
+
+ break;
+ case SXE_TDT(0):
+ for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
+ regs[i] = SXE_REG_READ(hw, SXE_TDT(i));
+
+ break;
+ case SXE_TXDCTL(0):
+ for (i = 0; i < SXE_DUMP_REGS_NUM; i++)
+ regs[i] = SXE_REG_READ(hw, SXE_TXDCTL(i));
+
+ break;
+ default:
+ LOG_DEV_INFO("%-15s %08x\n",
+ reginfo->name, SXE_REG_READ(hw, reginfo->addr));
+ return;
+ }
+
+ while (first_reg_idx < SXE_DUMP_REGS_NUM) {
+ value = buf;
+ snprintf(reg_name, SXE_REG_NAME_LEN,
+ "%s[%d-%d]", reginfo->name,
+ first_reg_idx, (first_reg_idx + 7));
+
+ for (j = 0; j < 8; j++)
+ value += sprintf(value, " %08x", regs[first_reg_idx++]);
+
+ LOG_DEV_ERR("%-15s%s\n", reg_name, buf);
+ }
+
+}
+
+static void sxe_hw_reg_dump(struct sxe_hw *hw)
+{
+ const struct sxe_reg_info *reginfo;
+
+ for (reginfo = (const struct sxe_reg_info *)sxe_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ sxe_hw_reg_print(hw, reginfo);
+ }
+
+}
+
+static s32 sxe_hw_status_reg_test(struct sxe_hw *hw)
+{
+ s32 ret = 0;
+ u32 value, before, after;
+ u32 toggle = 0x7FFFF30F;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ before = SXE_REG_READ(hw, SXE_STATUS);
+ value = (SXE_REG_READ(hw, SXE_STATUS) & toggle);
+ SXE_REG_WRITE(hw, SXE_STATUS, toggle);
+ after = SXE_REG_READ(hw, SXE_STATUS) & toggle;
+ if (value != after) {
+ LOG_MSG_ERR(drv, "failed status register test got: "
+ "0x%08X expected: 0x%08X\n",
+ after, value);
+ ret = -SXE_DIAG_TEST_BLOCKED;
+ goto l_end;
+ }
+
+ SXE_REG_WRITE(hw, SXE_STATUS, before);
+
+l_end:
+ return ret;
+}
+
+#define PATTERN_TEST 1
+#define SET_READ_TEST 2
+#define WRITE_NO_TEST 3
+#define TABLE32_TEST 4
+#define TABLE64_TEST_LO 5
+#define TABLE64_TEST_HI 6
+
+struct sxe_self_test_reg {
+ u32 reg;
+ u8 array_len;
+ u8 test_type;
+ u32 mask;
+ u32 write;
+};
+
+static const struct sxe_self_test_reg self_test_reg[] = {
+ { SXE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFE0, 0x8007FFF0 },
+ { SXE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFE0, 0x8007FFF0 },
+ { SXE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { SXE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { SXE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
+ { SXE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+ { SXE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { SXE_RDLEN(0), 4, PATTERN_TEST, 0x000FFFFF, 0x000FFFFF },
+ { SXE_RXDCTL(0), 4, WRITE_NO_TEST, 0, SXE_RXDCTL_ENABLE },
+ { SXE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { SXE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
+ { SXE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { SXE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { SXE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
+ { SXE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
+ { SXE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+ { SXE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
+ { SXE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { .reg = 0 }
+};
+
+static s32 sxe_hw_reg_pattern_test(struct sxe_hw *hw, u32 reg,
+ u32 mask, u32 write)
+{
+ s32 ret = 0;
+ u32 pat, val, before;
+ struct sxe_adapter *adapter = hw->adapter;
+ static const u32 test_pattern[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFE};
+
+ if (sxe_is_hw_fault(hw)) {
+ LOG_ERROR_BDF("hw fault\n");
+ ret = -SXE_DIAG_TEST_BLOCKED;
+ goto l_end;
+ }
+
+ for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
+ before = SXE_REG_READ(hw, reg);
+
+ SXE_REG_WRITE(hw, reg, test_pattern[pat] & write);
+ val = SXE_REG_READ(hw, reg);
+ if (val != (test_pattern[pat] & write & mask)) {
+ LOG_MSG_ERR(drv, "pattern test reg %04X failed: "
+ "got 0x%08X expected 0x%08X\n",
+ reg, val, (test_pattern[pat] & write & mask));
+ SXE_REG_WRITE(hw, reg, before);
+ ret = -SXE_DIAG_REG_PATTERN_TEST_ERR;
+ goto l_end;
+ }
+
+ SXE_REG_WRITE(hw, reg, before);
+ }
+
+l_end:
+ return ret;
+}
+
+static s32 sxe_hw_reg_set_and_check(struct sxe_hw *hw, int reg,
+ u32 mask, u32 write)
+{
+ s32 ret = 0;
+ u32 val, before;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ if (sxe_is_hw_fault(hw)) {
+ LOG_ERROR_BDF("hw fault\n");
+ ret = -SXE_DIAG_TEST_BLOCKED;
+ goto l_end;
+ }
+
+ before = SXE_REG_READ(hw, reg);
+ SXE_REG_WRITE(hw, reg, write & mask);
+ val = SXE_REG_READ(hw, reg);
+ if ((write & mask) != (val & mask)) {
+ LOG_MSG_ERR(drv, "set/check reg %04X test failed: "
+ "got 0x%08X expected 0x%08X\n",
+ reg, (val & mask), (write & mask));
+ SXE_REG_WRITE(hw, reg, before);
+ ret = -SXE_DIAG_CHECK_REG_TEST_ERR;
+ goto l_end;
+ }
+
+ SXE_REG_WRITE(hw, reg, before);
+
+l_end:
+ return ret;
+}
+
+static s32 sxe_hw_regs_test(struct sxe_hw *hw)
+{
+ u32 i;
+ s32 ret = 0;
+ const struct sxe_self_test_reg *test = self_test_reg;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ ret = sxe_hw_status_reg_test(hw);
+ if (ret) {
+ LOG_MSG_ERR(drv, "status register test failed\n");
+ goto l_end;
+ }
+
+ while (test->reg) {
+ for (i = 0; i < test->array_len; i++) {
+ switch (test->test_type) {
+ case PATTERN_TEST:
+ ret = sxe_hw_reg_pattern_test(hw,
+ test->reg + (i * 0x40),
+ test->mask, test->write);
+ break;
+ case TABLE32_TEST:
+ ret = sxe_hw_reg_pattern_test(hw,
+ test->reg + (i * 4),
+ test->mask, test->write);
+ break;
+ case TABLE64_TEST_LO:
+ ret = sxe_hw_reg_pattern_test(hw,
+ test->reg + (i * 8),
+ test->mask, test->write);
+ break;
+ case TABLE64_TEST_HI:
+ ret = sxe_hw_reg_pattern_test(hw,
+ (test->reg + 4) + (i * 8),
+ test->mask, test->write);
+ break;
+ case SET_READ_TEST:
+ ret = sxe_hw_reg_set_and_check(hw,
+ test->reg + (i * 0x40),
+ test->mask, test->write);
+ break;
+ case WRITE_NO_TEST:
+ SXE_REG_WRITE(hw, test->reg + (i * 0x40),
+ test->write);
+ break;
+ default:
+ LOG_ERROR_BDF("reg test mod err, type=%d\n",
+ test->test_type);
+ break;
+ }
+
+ if (ret)
+ goto l_end;
+
+ }
+ test++;
+ }
+
+l_end:
+ return ret;
+}
+
+static const struct sxe_setup_operations sxe_setup_ops = {
+ .regs_dump = sxe_hw_reg_dump,
+ .reg_read = sxe_read_reg,
+ .reg_write = sxe_write_reg,
+ .regs_test = sxe_hw_regs_test,
+ .reset = sxe_hw_nic_reset,
+ .regs_flush = sxe_hw_regs_flush,
+ .pf_rst_done_set = sxe_hw_pf_rst_done_set,
+ .no_snoop_disable = sxe_hw_no_snoop_disable,
+};
+
+
+static void sxe_hw_ring_irq_enable(struct sxe_hw *hw, u64 qmask)
+{
+ u32 mask0, mask1;
+
+ mask0 = qmask & 0xFFFFFFFF;
+ mask1 = qmask >> 32;
+
+ if (mask0 && mask1) {
+ SXE_REG_WRITE(hw, SXE_EIMS_EX(0), mask0);
+ SXE_REG_WRITE(hw, SXE_EIMS_EX(1), mask1);
+ } else if (mask0) {
+ SXE_REG_WRITE(hw, SXE_EIMS_EX(0), mask0);
+ } else if (mask1) {
+ SXE_REG_WRITE(hw, SXE_EIMS_EX(1), mask1);
+ }
+
+}
+
+u32 sxe_hw_pending_irq_read_clear(struct sxe_hw *hw)
+{
+ return SXE_REG_READ(hw, SXE_EICR);
+}
+
+void sxe_hw_pending_irq_write_clear(struct sxe_hw *hw, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_EICR, value);
+}
+
+u32 sxe_hw_irq_cause_get(struct sxe_hw *hw)
+{
+ return SXE_REG_READ(hw, SXE_EICS);
+}
+
+static void sxe_hw_event_irq_trigger(struct sxe_hw *hw)
+{
+ SXE_REG_WRITE(hw, SXE_EICS, (SXE_EICS_TCP_TIMER | SXE_EICS_OTHER));
+
+}
+
+static void sxe_hw_ring_irq_trigger(struct sxe_hw *hw, u64 eics)
+{
+ u32 mask;
+
+ mask = (eics & 0xFFFFFFFF);
+ SXE_REG_WRITE(hw, SXE_EICS_EX(0), mask);
+ mask = (eics >> 32);
+ SXE_REG_WRITE(hw, SXE_EICS_EX(1), mask);
+}
+
+void sxe_hw_ring_irq_auto_disable(struct sxe_hw *hw,
+ bool is_msix)
+{
+ if (true == is_msix) {
+ SXE_REG_WRITE(hw, SXE_EIAM_EX(0), 0xFFFFFFFF);
+ SXE_REG_WRITE(hw, SXE_EIAM_EX(1), 0xFFFFFFFF);
+ } else {
+ SXE_REG_WRITE(hw, SXE_EIAM, SXE_EICS_RTX_QUEUE);
+ }
+
+}
+
+void sxe_hw_irq_general_reg_set(struct sxe_hw *hw, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_GPIE, value);
+
+}
+
+u32 sxe_hw_irq_general_reg_get(struct sxe_hw *hw)
+{
+ return SXE_REG_READ(hw, SXE_GPIE);
+}
+
+static void sxe_hw_set_eitrsel(struct sxe_hw *hw, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_EITRSEL, value);
+
+}
+
+void sxe_hw_event_irq_map(struct sxe_hw *hw, u8 offset, u16 irq_idx)
+{
+ u8 allocation;
+ u32 ivar, position;
+
+ allocation = irq_idx | SXE_IVAR_ALLOC_VALID;
+
+ position = (offset & 1) * 8;
+
+ ivar = SXE_REG_READ(hw, SXE_IVAR_MISC);
+ ivar &= ~(0xFF << position);
+ ivar |= (allocation << position);
+
+ SXE_REG_WRITE(hw, SXE_IVAR_MISC, ivar);
+
+}
+
+void sxe_hw_ring_irq_map(struct sxe_hw *hw, bool is_tx,
+ u16 reg_idx, u16 irq_idx)
+{
+ u8 allocation;
+ u32 ivar, position;
+
+ allocation = irq_idx | SXE_IVAR_ALLOC_VALID;
+
+ position = ((reg_idx & 1) * 16) + (8 * is_tx);
+
+ ivar = SXE_REG_READ(hw, SXE_IVAR(reg_idx >> 1));
+ ivar &= ~(0xFF << position);
+ ivar |= (allocation << position);
+
+ SXE_REG_WRITE(hw, SXE_IVAR(reg_idx >> 1), ivar);
+
+}
+
+void sxe_hw_ring_irq_interval_set(struct sxe_hw *hw,
+ u16 irq_idx, u32 interval)
+{
+ u32 eitr = interval & SXE_EITR_ITR_MASK;
+
+ eitr |= SXE_EITR_CNT_WDIS;
+
+ SXE_REG_WRITE(hw, SXE_EITR(irq_idx), eitr);
+
+}
+
+static void sxe_hw_event_irq_interval_set(struct sxe_hw *hw,
+ u16 irq_idx, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_EITR(irq_idx), value);
+
+}
+
+void sxe_hw_event_irq_auto_clear_set(struct sxe_hw *hw, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_EIAC, value);
+
+}
+
+void sxe_hw_specific_irq_disable(struct sxe_hw *hw, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_EIMC, value);
+
+}
+
+void sxe_hw_specific_irq_enable(struct sxe_hw *hw, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_EIMS, value);
+
+}
+
+void sxe_hw_all_irq_disable(struct sxe_hw *hw)
+{
+ SXE_REG_WRITE(hw, SXE_EIMC, 0xFFFF0000);
+
+ SXE_REG_WRITE(hw, SXE_EIMC_EX(0), ~0);
+ SXE_REG_WRITE(hw, SXE_EIMC_EX(1), ~0);
+
+ SXE_WRITE_FLUSH(hw);
+
+}
+
+static void sxe_hw_spp_configure(struct sxe_hw *hw, u32 hw_spp_proc_delay_us)
+{
+ SXE_REG_WRITE(hw, SXE_SPP_PROC,
+ (SXE_REG_READ(hw, SXE_SPP_PROC) &
+ ~SXE_SPP_PROC_DELAY_US_MASK) |
+ hw_spp_proc_delay_us);
+
+}
+
+static s32 sxe_hw_irq_test(struct sxe_hw *hw, u32 *icr, bool shared)
+{
+ s32 ret = 0;
+ u32 i, mask;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ sxe_hw_specific_irq_disable(hw, 0xFFFFFFFF);
+ sxe_hw_regs_flush(hw);
+ usleep_range(10000, 20000);
+
+ for (i = 0; i < 10; i++) {
+ mask = BIT(i);
+ if (!shared) {
+ LOG_INFO_BDF("test irq: irq test start\n");
+ *icr = 0;
+ SXE_REG_WRITE(hw, SXE_EIMC, ~mask & 0x00007FFF);
+ SXE_REG_WRITE(hw, SXE_EICS, ~mask & 0x00007FFF);
+ sxe_hw_regs_flush(hw);
+ usleep_range(10000, 20000);
+
+ if (*icr & mask) {
+ LOG_ERROR_BDF("test irq: failed, eicr = %x\n", *icr);
+ ret = -SXE_DIAG_DISABLE_IRQ_TEST_ERR;
+ break;
+ }
+ LOG_INFO_BDF("test irq: irq test end\n");
+ }
+
+ LOG_INFO_BDF("test irq: mask irq test start\n");
+ *icr = 0;
+ SXE_REG_WRITE(hw, SXE_EIMS, mask);
+ SXE_REG_WRITE(hw, SXE_EICS, mask);
+ sxe_hw_regs_flush(hw);
+ usleep_range(10000, 20000);
+
+ if (!(*icr & mask)) {
+ LOG_ERROR_BDF("test irq: mask failed, eicr = %x\n", *icr);
+ ret = -SXE_DIAG_ENABLE_IRQ_TEST_ERR;
+ break;
+ }
+ LOG_INFO_BDF("test irq: mask irq test end\n");
+
+ sxe_hw_specific_irq_disable(hw, mask);
+ sxe_hw_regs_flush(hw);
+ usleep_range(10000, 20000);
+
+ if (!shared) {
+ LOG_INFO_BDF("test irq: other irq test start\n");
+ *icr = 0;
+ SXE_REG_WRITE(hw, SXE_EIMC, ~mask & 0x00007FFF);
+ SXE_REG_WRITE(hw, SXE_EICS, ~mask & 0x00007FFF);
+ sxe_hw_regs_flush(hw);
+ usleep_range(10000, 20000);
+
+ if (*icr) {
+ LOG_ERROR_BDF("test irq: other irq failed, eicr = %x\n", *icr);
+ ret = -SXE_DIAG_DISABLE_OTHER_IRQ_TEST_ERR;
+ break;
+ }
+ LOG_INFO_BDF("test irq: other irq test end\n");
+ }
+ }
+
+ sxe_hw_specific_irq_disable(hw, 0xFFFFFFFF);
+ sxe_hw_regs_flush(hw);
+ usleep_range(10000, 20000);
+
+ return ret;
+}
+
+static const struct sxe_irq_operations sxe_irq_ops = {
+ .event_irq_auto_clear_set = sxe_hw_event_irq_auto_clear_set,
+ .ring_irq_interval_set = sxe_hw_ring_irq_interval_set,
+ .event_irq_interval_set = sxe_hw_event_irq_interval_set,
+ .set_eitrsel = sxe_hw_set_eitrsel,
+ .ring_irq_map = sxe_hw_ring_irq_map,
+ .event_irq_map = sxe_hw_event_irq_map,
+ .irq_general_reg_set = sxe_hw_irq_general_reg_set,
+ .irq_general_reg_get = sxe_hw_irq_general_reg_get,
+ .ring_irq_auto_disable = sxe_hw_ring_irq_auto_disable,
+ .pending_irq_read_clear = sxe_hw_pending_irq_read_clear,
+ .pending_irq_write_clear = sxe_hw_pending_irq_write_clear,
+ .ring_irq_enable = sxe_hw_ring_irq_enable,
+ .irq_cause_get = sxe_hw_irq_cause_get,
+ .event_irq_trigger = sxe_hw_event_irq_trigger,
+ .ring_irq_trigger = sxe_hw_ring_irq_trigger,
+ .specific_irq_disable = sxe_hw_specific_irq_disable,
+ .specific_irq_enable = sxe_hw_specific_irq_enable,
+ .all_irq_disable = sxe_hw_all_irq_disable,
+ .spp_configure = sxe_hw_spp_configure,
+ .irq_test = sxe_hw_irq_test,
+};
+
+
+u32 sxe_hw_link_speed_get(struct sxe_hw *hw)
+{
+ u32 speed, value;
+ struct sxe_adapter *adapter = hw->adapter;
+ value = SXE_REG_READ(hw, SXE_COMCTRL);
+
+ if ((value & SXE_COMCTRL_SPEED_10G) == SXE_COMCTRL_SPEED_10G)
+ speed = SXE_LINK_SPEED_10GB_FULL;
+ else if ((value & SXE_COMCTRL_SPEED_1G) == SXE_COMCTRL_SPEED_1G)
+ speed = SXE_LINK_SPEED_1GB_FULL;
+ else
+ speed = SXE_LINK_SPEED_UNKNOWN;
+
+ LOG_DEBUG_BDF("hw link speed=%x, (0x80=10G, 0x20=1G)\n, reg=%x",
+ speed, value);
+
+ return speed;
+}
+
+void sxe_hw_link_speed_set(struct sxe_hw *hw, u32 speed)
+{
+ u32 ctrl;
+
+ ctrl = SXE_REG_READ(hw, SXE_COMCTRL);
+
+ if (speed == SXE_LINK_SPEED_1GB_FULL) {
+ ctrl |= SXE_COMCTRL_SPEED_1G;
+ } else if (speed == SXE_LINK_SPEED_10GB_FULL) {
+ ctrl |= SXE_COMCTRL_SPEED_10G;
+ }
+
+ SXE_REG_WRITE(hw, SXE_COMCTRL, ctrl);
+
+}
+
+static bool sxe_hw_1g_link_up_check(struct sxe_hw *hw)
+{
+ return (SXE_REG_READ(hw, SXE_LINKS) & SXE_LINKS_UP) ? true : false;
+}
+
+bool sxe_hw_is_link_state_up(struct sxe_hw *hw)
+{
+ bool ret = false;
+ u32 links_reg, link_speed;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ links_reg = SXE_REG_READ(hw, SXE_LINKS);
+
+ LOG_DEBUG_BDF("nic link reg: 0x%x\n", links_reg);
+
+ if (links_reg & SXE_LINKS_UP) {
+ ret = true;
+
+ link_speed = sxe_hw_link_speed_get(hw);
+ if ((link_speed == SXE_LINK_SPEED_10GB_FULL) &&
+ (links_reg & SXE_10G_LINKS_DOWN))
+ ret = false;
+
+ }
+
+ return ret;
+}
+
+void sxe_hw_mac_pad_enable(struct sxe_hw *hw)
+{
+ u32 ctl;
+
+ ctl = SXE_REG_READ(hw, SXE_MACCFG);
+ ctl |= SXE_MACCFG_PAD_EN;
+ SXE_REG_WRITE(hw, SXE_MACCFG, ctl);
+
+}
+
+s32 sxe_hw_fc_enable(struct sxe_hw *hw)
+{
+ s32 ret = 0;
+ u8 i;
+ u32 reg;
+ u32 flctrl_val;
+ u32 fcrtl, fcrth;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ flctrl_val = SXE_REG_READ(hw, SXE_FLCTRL);
+ flctrl_val &= ~(SXE_FCTRL_TFCE_MASK | SXE_FCTRL_RFCE_MASK |
+ SXE_FCTRL_TFCE_FCEN_MASK | SXE_FCTRL_TFCE_XONE_MASK);
+
+ switch (hw->fc.current_mode) {
+ case SXE_FC_NONE:
+ break;
+ case SXE_FC_RX_PAUSE:
+ flctrl_val |= SXE_FCTRL_RFCE_LFC_EN;
+ break;
+ case SXE_FC_TX_PAUSE:
+ flctrl_val |= SXE_FCTRL_TFCE_LFC_EN;
+ break;
+ case SXE_FC_FULL:
+ flctrl_val |= SXE_FCTRL_RFCE_LFC_EN;
+ flctrl_val |= SXE_FCTRL_TFCE_LFC_EN;
+ break;
+ default:
+ LOG_DEV_DEBUG("flow control param set incorrectly\n");
+ ret = -SXE_ERR_CONFIG;
+ goto l_ret;
+ }
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & SXE_FC_TX_PAUSE) &&
+ hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 9) | SXE_FCRTL_XONE;
+ SXE_REG_WRITE(hw, SXE_FCRTL(i), fcrtl);
+ fcrth = (hw->fc.high_water[i] << 9) | SXE_FCRTH_FCEN;
+ } else {
+ SXE_REG_WRITE(hw, SXE_FCRTL(i), 0);
+ fcrth = (SXE_REG_READ(hw, SXE_RXPBSIZE(i)) - 24576) >> 1;
+ }
+
+ SXE_REG_WRITE(hw, SXE_FCRTH(i), fcrth);
+ }
+
+ flctrl_val |= SXE_FCTRL_TFCE_DPF_EN;
+
+ if ((hw->fc.current_mode & SXE_FC_TX_PAUSE))
+ flctrl_val |= (SXE_FCTRL_TFCE_FCEN_MASK | SXE_FCTRL_TFCE_XONE_MASK);
+
+ SXE_REG_WRITE(hw, SXE_FLCTRL, flctrl_val);
+
+ reg = SXE_REG_READ(hw, SXE_PFCTOP);
+ reg &= ~SXE_PFCTOP_FCOP_MASK;
+ reg |= SXE_PFCTOP_FCT;
+ reg |= SXE_PFCTOP_FCOP_LFC;
+ SXE_REG_WRITE(hw, SXE_PFCTOP, reg);
+
+ reg = hw->fc.pause_time * 0x00010001U;
+ for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
+ SXE_REG_WRITE(hw, SXE_FCTTV(i), reg);
+
+ SXE_REG_WRITE(hw, SXE_FCRTV, hw->fc.pause_time / 2);
+
+l_ret:
+ return ret;
+}
+
+void sxe_fc_autoneg_localcap_set(struct sxe_hw *hw)
+{
+ u32 reg = 0;
+
+ if (hw->fc.requested_mode == SXE_FC_DEFAULT)
+ hw->fc.requested_mode = SXE_FC_FULL;
+
+ reg = SXE_REG_READ(hw, SXE_PCS1GANA);
+
+ switch (hw->fc.requested_mode) {
+ case SXE_FC_NONE:
+ reg &= ~(SXE_PCS1GANA_SYM_PAUSE | SXE_PCS1GANA_ASM_PAUSE);
+ break;
+ case SXE_FC_TX_PAUSE:
+ reg |= SXE_PCS1GANA_ASM_PAUSE;
+ reg &= ~SXE_PCS1GANA_SYM_PAUSE;
+ break;
+ case SXE_FC_RX_PAUSE:
+ case SXE_FC_FULL:
+ reg |= SXE_PCS1GANA_SYM_PAUSE | SXE_PCS1GANA_ASM_PAUSE;
+ break;
+ default:
+ LOG_ERROR("Flow control param set incorrectly.");
+ break;
+ }
+
+ SXE_REG_WRITE(hw, SXE_PCS1GANA, reg);
+}
+
+s32 sxe_hw_pfc_enable(struct sxe_hw *hw, u8 tc_idx)
+{
+ s32 ret = 0;
+ u8 i;
+ u32 reg;
+ u32 flctrl_val;
+ u32 fcrtl, fcrth;
+ struct sxe_adapter *adapter = hw->adapter;
+ u8 rx_en_num;
+
+ flctrl_val = SXE_REG_READ(hw, SXE_FLCTRL);
+ flctrl_val &= ~(SXE_FCTRL_TFCE_MASK | SXE_FCTRL_RFCE_MASK |
+ SXE_FCTRL_TFCE_FCEN_MASK | SXE_FCTRL_TFCE_XONE_MASK);
+
+ switch (hw->fc.current_mode) {
+ case SXE_FC_NONE:
+ rx_en_num = 0;
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ reg = SXE_REG_READ(hw, SXE_FCRTH(i));
+ if (reg & SXE_FCRTH_FCEN)
+ rx_en_num++;
+ }
+ if (rx_en_num > 1)
+ flctrl_val |= SXE_FCTRL_TFCE_PFC_EN;
+
+ break;
+
+ case SXE_FC_RX_PAUSE:
+ flctrl_val |= SXE_FCTRL_RFCE_PFC_EN;
+
+ rx_en_num = 0;
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ reg = SXE_REG_READ(hw, SXE_FCRTH(i));
+ if (reg & SXE_FCRTH_FCEN)
+ rx_en_num++;
+ }
+
+ if (rx_en_num > 1)
+ flctrl_val |= SXE_FCTRL_TFCE_PFC_EN;
+
+ break;
+ case SXE_FC_TX_PAUSE:
+ flctrl_val |= SXE_FCTRL_TFCE_PFC_EN;
+ break;
+ case SXE_FC_FULL:
+ flctrl_val |= SXE_FCTRL_RFCE_PFC_EN;
+ flctrl_val |= SXE_FCTRL_TFCE_PFC_EN;
+ break;
+ default:
+ LOG_DEV_DEBUG("flow control param set incorrectly\n");
+ ret = -SXE_ERR_CONFIG;
+ goto l_ret;
+ }
+
+ if ((hw->fc.current_mode & SXE_FC_TX_PAUSE) &&
+ hw->fc.high_water[tc_idx]) {
+ fcrtl = (hw->fc.low_water[tc_idx] << 9) | SXE_FCRTL_XONE;
+ SXE_REG_WRITE(hw, SXE_FCRTL(tc_idx), fcrtl);
+ fcrth = (hw->fc.high_water[tc_idx] << 9) | SXE_FCRTH_FCEN;
+ } else {
+ SXE_REG_WRITE(hw, SXE_FCRTL(tc_idx), 0);
+ fcrth = (SXE_REG_READ(hw, SXE_RXPBSIZE(tc_idx)) - 24576) >> 1;
+ }
+
+ SXE_REG_WRITE(hw, SXE_FCRTH(tc_idx), fcrth);
+
+ flctrl_val |= SXE_FCTRL_TFCE_DPF_EN;
+
+ if ((hw->fc.current_mode & SXE_FC_TX_PAUSE)) {
+ flctrl_val |= (BIT(tc_idx) << 16) & SXE_FCTRL_TFCE_FCEN_MASK;
+ flctrl_val |= (BIT(tc_idx) << 24) & SXE_FCTRL_TFCE_XONE_MASK;
+ }
+
+ SXE_REG_WRITE(hw, SXE_FLCTRL, flctrl_val);
+
+ reg = SXE_REG_READ(hw, SXE_PFCTOP);
+ reg &= ~SXE_PFCTOP_FCOP_MASK;
+ reg |= SXE_PFCTOP_FCT;
+ reg |= SXE_PFCTOP_FCOP_PFC;
+ SXE_REG_WRITE(hw, SXE_PFCTOP, reg);
+
+ reg = hw->fc.pause_time * 0x00010001U;
+ for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
+ SXE_REG_WRITE(hw, SXE_FCTTV(i), reg);
+
+ SXE_REG_WRITE(hw, SXE_FCRTV, hw->fc.pause_time / 2);
+
+l_ret:
+ return ret;
+}
+
+void sxe_hw_crc_configure(struct sxe_hw *hw)
+{
+ u32 ctrl = SXE_REG_READ(hw, SXE_PCCTRL);
+
+ ctrl |= SXE_PCCTRL_TXCE | SXE_PCCTRL_RXCE | SXE_PCCTRL_PCSC_ALL;
+ SXE_REG_WRITE(hw, SXE_PCCTRL, ctrl);
+
+}
+
+void sxe_hw_loopback_switch(struct sxe_hw *hw, bool is_enable)
+{
+ u32 value;
+
+ value = (true == is_enable) ? SXE_LPBK_EN : 0;
+
+ SXE_REG_WRITE(hw, SXE_LPBKCTRL, value);
+
+}
+
+void sxe_hw_mac_txrx_enable(struct sxe_hw *hw)
+{
+ u32 ctl;
+
+ ctl = SXE_REG_READ(hw, SXE_COMCTRL);
+ ctl |= SXE_COMCTRL_TXEN | SXE_COMCTRL_RXEN | SXE_COMCTRL_EDSEL;
+ SXE_REG_WRITE(hw, SXE_COMCTRL, ctl);
+
+}
+
+void sxe_hw_mac_max_frame_set(struct sxe_hw *hw, u32 max_frame)
+{
+ u32 maxfs = SXE_REG_READ(hw, SXE_MAXFS);
+
+ if (max_frame != (maxfs >> SXE_MAXFS_MFS_SHIFT)) {
+ maxfs &= ~SXE_MAXFS_MFS_MASK;
+ maxfs |= max_frame << SXE_MAXFS_MFS_SHIFT;
+ }
+
+ maxfs |= SXE_MAXFS_RFSEL | SXE_MAXFS_TFSEL;
+ SXE_REG_WRITE(hw, SXE_MAXFS, maxfs);
+
+}
+
+u32 sxe_hw_mac_max_frame_get(struct sxe_hw *hw)
+{
+ u32 maxfs = SXE_REG_READ(hw, SXE_MAXFS);
+
+ maxfs &= SXE_MAXFS_MFS_MASK;
+ maxfs >>= SXE_MAXFS_MFS_SHIFT;
+
+ return maxfs;
+}
+
+bool sxe_device_supports_autoneg_fc(struct sxe_hw *hw)
+{
+ bool supported = true;
+ bool link_up = sxe_hw_is_link_state_up(hw);
+ u32 link_speed = sxe_hw_link_speed_get(hw);
+
+ if (link_up) {
+ supported = (link_speed == SXE_LINK_SPEED_1GB_FULL) ?
+ true : false;
+ }
+
+ return supported;
+}
+
+static void sxe_hw_fc_param_init(struct sxe_hw *hw)
+{
+ hw->fc.requested_mode = SXE_FC_FULL;
+ hw->fc.current_mode = SXE_FC_FULL;
+ hw->fc.pause_time = SXE_DEFAULT_FCPAUSE;
+
+ hw->fc.disable_fc_autoneg = true;
+}
+
+void sxe_hw_fc_tc_high_water_mark_set(struct sxe_hw *hw,
+ u8 tc_idx, u32 mark)
+{
+ hw->fc.high_water[tc_idx] = mark;
+
+}
+
+void sxe_hw_fc_tc_low_water_mark_set(struct sxe_hw *hw,
+ u8 tc_idx, u32 mark)
+{
+ hw->fc.low_water[tc_idx] = mark;
+
+}
+
+bool sxe_hw_is_fc_autoneg_disabled(struct sxe_hw *hw)
+{
+ return hw->fc.disable_fc_autoneg;
+}
+
+void sxe_hw_fc_autoneg_disable_set(struct sxe_hw *hw,
+ bool is_disabled)
+{
+ hw->fc.disable_fc_autoneg = is_disabled;
+}
+
+static enum sxe_fc_mode sxe_hw_fc_current_mode_get(struct sxe_hw *hw)
+{
+ return hw->fc.current_mode;
+}
+
+static enum sxe_fc_mode sxe_hw_fc_requested_mode_get(struct sxe_hw *hw)
+{
+ return hw->fc.requested_mode;
+}
+
+void sxe_hw_fc_requested_mode_set(struct sxe_hw *hw,
+ enum sxe_fc_mode mode)
+{
+ hw->fc.requested_mode = mode;
+}
+
+static const struct sxe_mac_operations sxe_mac_ops = {
+ .link_up_1g_check = sxe_hw_1g_link_up_check,
+ .link_state_is_up = sxe_hw_is_link_state_up,
+ .link_speed_get = sxe_hw_link_speed_get,
+ .link_speed_set = sxe_hw_link_speed_set,
+ .pad_enable = sxe_hw_mac_pad_enable,
+ .crc_configure = sxe_hw_crc_configure,
+ .loopback_switch = sxe_hw_loopback_switch,
+ .txrx_enable = sxe_hw_mac_txrx_enable,
+ .max_frame_set = sxe_hw_mac_max_frame_set,
+ .max_frame_get = sxe_hw_mac_max_frame_get,
+ .fc_enable = sxe_hw_fc_enable,
+ .fc_autoneg_localcap_set = sxe_fc_autoneg_localcap_set,
+ .fc_tc_high_water_mark_set = sxe_hw_fc_tc_high_water_mark_set,
+ .fc_tc_low_water_mark_set = sxe_hw_fc_tc_low_water_mark_set,
+ .fc_param_init = sxe_hw_fc_param_init,
+ .fc_current_mode_get = sxe_hw_fc_current_mode_get,
+ .fc_requested_mode_get = sxe_hw_fc_requested_mode_get,
+ .fc_requested_mode_set = sxe_hw_fc_requested_mode_set,
+ .is_fc_autoneg_disabled = sxe_hw_is_fc_autoneg_disabled,
+ .fc_autoneg_disable_set = sxe_hw_fc_autoneg_disable_set,
+};
+
+u32 sxe_hw_rx_mode_get(struct sxe_hw *hw)
+{
+ return SXE_REG_READ(hw, SXE_FCTRL);
+}
+
+u32 sxe_hw_pool_rx_mode_get(struct sxe_hw *hw, u16 pool_idx)
+{
+ return SXE_REG_READ(hw, SXE_VMOLR(pool_idx));
+}
+
+void sxe_hw_rx_mode_set(struct sxe_hw *hw, u32 filter_ctrl)
+{
+ SXE_REG_WRITE(hw, SXE_FCTRL, filter_ctrl);
+}
+
+void sxe_hw_pool_rx_mode_set(struct sxe_hw *hw,
+ u32 vmolr, u16 pool_idx)
+{
+ SXE_REG_WRITE(hw, SXE_VMOLR(pool_idx), vmolr);
+}
+
+void sxe_hw_rx_lro_enable(struct sxe_hw *hw, bool is_enable)
+{
+ u32 rfctl = SXE_REG_READ(hw, SXE_RFCTL);
+ rfctl &= ~SXE_RFCTL_LRO_DIS;
+
+ if (!is_enable)
+ rfctl |= SXE_RFCTL_LRO_DIS;
+
+ SXE_REG_WRITE(hw, SXE_RFCTL, rfctl);
+}
+
+void sxe_hw_rx_nfs_filter_disable(struct sxe_hw *hw)
+{
+ u32 rfctl = 0;
+
+ rfctl |= (SXE_RFCTL_NFSW_DIS | SXE_RFCTL_NFSR_DIS);
+ SXE_REG_WRITE(hw, SXE_RFCTL, rfctl);
+}
+
+void sxe_hw_rx_udp_frag_checksum_disable(struct sxe_hw *hw)
+{
+ u32 rxcsum;
+
+ rxcsum = SXE_REG_READ(hw, SXE_RXCSUM);
+ rxcsum |= SXE_RXCSUM_PCSD;
+ SXE_REG_WRITE(hw, SXE_RXCSUM, rxcsum);
+}
+
+void sxe_hw_fc_mac_addr_set(struct sxe_hw *hw, u8 *mac_addr)
+{
+ u32 mac_addr_h, mac_addr_l;
+
+ mac_addr_l = ((u32)mac_addr[5] |
+ ((u32)mac_addr[4] << 8) |
+ ((u32)mac_addr[3] << 16) |
+ ((u32)mac_addr[2] << 24));
+ mac_addr_h = (((u32)mac_addr[1] << 16) |
+ ((u32)mac_addr[0] << 24));
+
+ SXE_REG_WRITE(hw, SXE_SACONH, mac_addr_h);
+ SXE_REG_WRITE(hw, SXE_SACONL, mac_addr_l);
+
+}
+
+s32 sxe_hw_uc_addr_add(struct sxe_hw *hw, u32 rar_idx,
+ u8 *addr, u32 pool_idx)
+{
+ s32 ret = 0;
+ u32 rar_low, rar_high;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ if (rar_idx >= SXE_UC_ENTRY_NUM_MAX) {
+ LOG_DEV_DEBUG("RAR rar_idx %d is out of range:%u.\n",
+ rar_idx, SXE_UC_ENTRY_NUM_MAX);
+ ret = -SXE_ERR_PARAM;
+ goto l_end;
+ }
+
+ sxe_hw_uc_addr_pool_enable(hw, rar_idx, pool_idx);
+
+ rar_low = ((u32)addr[0] |
+ ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) |
+ ((u32)addr[3] << 24));
+
+ rar_high = SXE_REG_READ(hw, SXE_RAH(rar_idx));
+ rar_high &= ~(0x0000FFFF | SXE_RAH_AV);
+ rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
+
+ rar_high |= SXE_RAH_AV;
+
+ SXE_REG_WRITE(hw, SXE_RAL(rar_idx), rar_low);
+ SXE_WRITE_FLUSH(hw);
+ SXE_REG_WRITE(hw, SXE_RAH(rar_idx), rar_high);
+
+ LOG_DEBUG_BDF("rar_idx:%d pool_idx:%u addr:%pM add to rar done\n",
+ rar_idx, pool_idx, addr);
+
+l_end:
+ return ret;
+}
+
+s32 sxe_hw_uc_addr_del(struct sxe_hw *hw, u32 index)
+{
+ s32 ret = 0;
+ u32 rar_high;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ if (index >= SXE_UC_ENTRY_NUM_MAX) {
+ ret = -SXE_ERR_PARAM;
+ LOG_ERROR_BDF("uc_entry_num:%d index:%u invalid.(err:%d)\n",
+ SXE_UC_ENTRY_NUM_MAX, index, ret);
+ goto l_end;
+ }
+
+ rar_high = SXE_REG_READ(hw, SXE_RAH(index));
+ rar_high &= ~(0x0000FFFF | SXE_RAH_AV);
+
+ SXE_REG_WRITE(hw, SXE_RAH(index), rar_high);
+ SXE_WRITE_FLUSH(hw);
+ SXE_REG_WRITE(hw, SXE_RAL(index), 0);
+
+ sxe_hw_uc_addr_pool_disable(hw, index);
+
+l_end:
+ return ret;
+}
+
+void sxe_hw_mta_hash_table_set(struct sxe_hw *hw,
+ u8 index, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_MTA(index), value);
+}
+
+void sxe_hw_mta_hash_table_update(struct sxe_hw *hw,
+ u8 reg_idx, u8 bit_idx)
+{
+ u32 value = SXE_REG_READ(hw, SXE_MTA(reg_idx));
+
+ value |= BIT(bit_idx);
+
+ LOG_INFO("mta update value:0x%x.\n", value);
+ SXE_REG_WRITE(hw, SXE_MTA(reg_idx), value);
+
+}
+
+void sxe_hw_mc_filter_enable(struct sxe_hw *hw)
+{
+ u32 value = SXE_MC_FILTER_TYPE0 | SXE_MCSTCTRL_MFE;
+
+ SXE_REG_WRITE(hw, SXE_MCSTCTRL, value);
+
+}
+
+static void sxe_hw_mc_filter_disable(struct sxe_hw *hw)
+{
+ u32 value = SXE_REG_READ(hw, SXE_MCSTCTRL);
+
+ value &= ~SXE_MCSTCTRL_MFE;
+
+ SXE_REG_WRITE(hw, SXE_MCSTCTRL, value);
+
+}
+
+void sxe_hw_uc_addr_clear(struct sxe_hw *hw)
+{
+ u32 i;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ sxe_hw_uc_addr_pool_disable(hw, 0);
+
+ LOG_DEV_DEBUG("clear uc filter addr register:0-%d\n",
+ SXE_UC_ENTRY_NUM_MAX - 1);
+ for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
+ SXE_REG_WRITE(hw, SXE_RAL(i), 0);
+ SXE_REG_WRITE(hw, SXE_RAH(i), 0);
+ }
+
+ LOG_DEV_DEBUG("clear %u uta filter addr register\n",
+ SXE_UTA_ENTRY_NUM_MAX);
+ for (i = 0; i < SXE_UTA_ENTRY_NUM_MAX; i++)
+ SXE_REG_WRITE(hw, SXE_UTA(i), 0);
+
+ SXE_REG_WRITE(hw, SXE_MCSTCTRL, SXE_MC_FILTER_TYPE0);
+
+ LOG_DEV_DEBUG("clear %u mta filter addr register\n",
+ SXE_MTA_ENTRY_NUM_MAX);
+ for (i = 0; i < SXE_MTA_ENTRY_NUM_MAX; i++)
+ SXE_REG_WRITE(hw, SXE_MTA(i), 0);
+
+}
+
+static void sxe_hw_ethertype_filter_set(struct sxe_hw *hw,
+ u8 filter_type, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_ETQF(filter_type), value);
+}
+
+void sxe_hw_vt_ctrl_cfg(struct sxe_hw *hw, u8 default_pool)
+{
+ u32 ctrl;
+
+ ctrl = SXE_REG_READ(hw, SXE_VT_CTL);
+
+ ctrl |= SXE_VT_CTL_VT_ENABLE;
+ ctrl &= ~SXE_VT_CTL_POOL_MASK;
+ ctrl |= default_pool << SXE_VT_CTL_POOL_SHIFT;
+ ctrl |= SXE_VT_CTL_REPLEN;
+
+ SXE_REG_WRITE(hw, SXE_VT_CTL, ctrl);
+
+}
+
+void sxe_hw_vt_disable(struct sxe_hw *hw)
+{
+ u32 vmdctl;
+
+ vmdctl = SXE_REG_READ(hw, SXE_VT_CTL);
+ vmdctl &= ~SXE_VMD_CTL_POOL_EN;
+ SXE_REG_WRITE(hw, SXE_VT_CTL, vmdctl);
+
+}
+
+#ifdef SXE_WOL_CONFIGURE
+
+static void sxe_hw_wol_status_set(struct sxe_hw *hw)
+{
+ SXE_REG_WRITE(hw, SXE_WUS, ~0);
+
+}
+
+static void sxe_hw_wol_mode_set(struct sxe_hw *hw, u32 wol_status)
+{
+ u32 fctrl;
+
+ SXE_REG_WRITE(hw, SXE_WUC, SXE_WUC_PME_EN);
+
+ fctrl = SXE_REG_READ(hw, SXE_FCTRL);
+ fctrl |= SXE_FCTRL_BAM;
+ if (wol_status & SXE_WUFC_MC)
+ fctrl |= SXE_FCTRL_MPE;
+
+ SXE_REG_WRITE(hw, SXE_FCTRL, fctrl);
+
+ SXE_REG_WRITE(hw, SXE_WUFC, wol_status);
+ sxe_hw_wol_status_set(hw);
+
+}
+
+static void sxe_hw_wol_mode_clean(struct sxe_hw *hw)
+{
+ SXE_REG_WRITE(hw, SXE_WUC, 0);
+ SXE_REG_WRITE(hw, SXE_WUFC, 0);
+
+}
+#endif
+
+static const struct sxe_filter_mac_operations sxe_filter_mac_ops = {
+ .rx_mode_get = sxe_hw_rx_mode_get,
+ .rx_mode_set = sxe_hw_rx_mode_set,
+ .pool_rx_mode_get = sxe_hw_pool_rx_mode_get,
+ .pool_rx_mode_set = sxe_hw_pool_rx_mode_set,
+ .rx_lro_enable = sxe_hw_rx_lro_enable,
+ .uc_addr_add = sxe_hw_uc_addr_add,
+ .uc_addr_del = sxe_hw_uc_addr_del,
+ .uc_addr_clear = sxe_hw_uc_addr_clear,
+ .fc_mac_addr_set = sxe_hw_fc_mac_addr_set,
+ .mta_hash_table_set = sxe_hw_mta_hash_table_set,
+ .mta_hash_table_update = sxe_hw_mta_hash_table_update,
+
+ .mc_filter_enable = sxe_hw_mc_filter_enable,
+ .mc_filter_disable = sxe_hw_mc_filter_disable,
+ .rx_nfs_filter_disable = sxe_hw_rx_nfs_filter_disable,
+ .ethertype_filter_set = sxe_hw_ethertype_filter_set,
+ .vt_ctrl_configure = sxe_hw_vt_ctrl_cfg,
+ .uc_addr_pool_enable = sxe_hw_uc_addr_pool_enable,
+ .rx_udp_frag_checksum_disable = sxe_hw_rx_udp_frag_checksum_disable,
+
+#ifdef SXE_WOL_CONFIGURE
+ .wol_mode_set = sxe_hw_wol_mode_set,
+ .wol_mode_clean = sxe_hw_wol_mode_clean,
+ .wol_status_set = sxe_hw_wol_status_set,
+#endif
+
+ .vt_disable = sxe_hw_vt_disable,
+};
+
+u32 sxe_hw_vlan_pool_filter_read(struct sxe_hw *hw, u16 reg_index)
+{
+ return SXE_REG_READ(hw, SXE_VLVF(reg_index));
+}
+
+static void sxe_hw_vlan_pool_filter_write(struct sxe_hw *hw,
+ u16 reg_index, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_VLVF(reg_index), value);
+}
+
+static u32 sxe_hw_vlan_pool_filter_bitmap_read(struct sxe_hw *hw,
+ u16 reg_index)
+{
+ return SXE_REG_READ(hw, SXE_VLVFB(reg_index));
+}
+
+static void sxe_hw_vlan_pool_filter_bitmap_write(struct sxe_hw *hw,
+ u16 reg_index, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_VLVFB(reg_index), value);
+}
+
+void sxe_hw_vlan_filter_array_write(struct sxe_hw *hw,
+ u16 reg_index, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_VFTA(reg_index), value);
+}
+
+u32 sxe_hw_vlan_filter_array_read(struct sxe_hw *hw, u16 reg_index)
+{
+ return SXE_REG_READ(hw, SXE_VFTA(reg_index));
+}
+
+void sxe_hw_vlan_filter_switch(struct sxe_hw *hw, bool is_enable)
+{
+ u32 vlnctrl;
+
+ vlnctrl = SXE_REG_READ(hw, SXE_VLNCTRL);
+ if (is_enable)
+ vlnctrl |= SXE_VLNCTRL_VFE;
+ else
+ vlnctrl &= ~SXE_VLNCTRL_VFE;
+
+ SXE_REG_WRITE(hw, SXE_VLNCTRL, vlnctrl);
+}
+
+static void sxe_hw_vlan_untagged_pkts_rcv_switch(struct sxe_hw *hw,
+ u32 vf, bool accept)
+{
+ u32 vmolr = SXE_REG_READ(hw, SXE_VMOLR(vf));
+ vmolr |= SXE_VMOLR_BAM;
+ if (accept)
+ vmolr |= SXE_VMOLR_AUPE;
+ else
+ vmolr &= ~SXE_VMOLR_AUPE;
+
+ LOG_WARN("vf:%u value:0x%x.\n", vf, vmolr);
+ SXE_REG_WRITE(hw, SXE_VMOLR(vf), vmolr);
+}
+
+s32 sxe_hw_vlvf_slot_find(struct sxe_hw *hw, u32 vlan, bool vlvf_bypass)
+{
+ s32 ret, regindex, first_empty_slot;
+ u32 bits;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ if (vlan == 0) {
+ ret = 0;
+ goto l_end;
+ }
+
+ first_empty_slot = vlvf_bypass ? -SXE_ERR_NO_SPACE : 0;
+
+ vlan |= SXE_VLVF_VIEN;
+
+ for (regindex = SXE_VLVF_ENTRIES; --regindex;) {
+ bits = SXE_REG_READ(hw, SXE_VLVF(regindex));
+ if (bits == vlan) {
+ ret = regindex;
+ goto l_end;
+ }
+
+ if (!first_empty_slot && !bits)
+ first_empty_slot = regindex;
+ }
+
+ if (!first_empty_slot)
+ LOG_DEV_WARN("no space in VLVF.\n");
+
+ ret = first_empty_slot ? : -SXE_ERR_NO_SPACE;
+l_end:
+ return ret;
+}
+
+s32 sxe_hw_vlan_filter_configure(struct sxe_hw *hw,
+ u32 vid, u32 pool,
+ bool vlan_on, bool vlvf_bypass)
+{
+ s32 ret = 0;
+ u32 regidx, vfta_delta, vfta, bits;
+ s32 vlvf_index;
+
+ LOG_DEBUG("vid: %u, pool: %u, vlan_on: %d, vlvf_bypass: %d",
+ vid, pool, vlan_on, vlvf_bypass);
+
+ if ((vid > 4095) || (pool > 63)) {
+ ret = -SXE_ERR_PARAM;
+ goto l_end;
+ }
+
+
+ regidx = vid / 32;
+ vfta_delta = BIT(vid % 32);
+ vfta = SXE_REG_READ(hw, SXE_VFTA(regidx));
+
+ vfta_delta &= vlan_on ? ~vfta : vfta;
+ vfta ^= vfta_delta;
+
+ if (!(SXE_REG_READ(hw, SXE_VT_CTL) & SXE_VT_CTL_VT_ENABLE))
+ goto vfta_update;
+
+ vlvf_index = sxe_hw_vlvf_slot_find(hw, vid, vlvf_bypass);
+ if (vlvf_index < 0) {
+ if (vlvf_bypass)
+ goto vfta_update;
+
+ ret = vlvf_index;
+ goto l_end;
+ }
+
+ bits = SXE_REG_READ(hw, SXE_VLVFB(vlvf_index * 2 + pool / 32));
+
+ bits |= BIT(pool % 32);
+ if (vlan_on)
+ goto vlvf_update;
+
+ bits ^= BIT(pool % 32);
+
+ if (!bits &&
+ !SXE_REG_READ(hw, SXE_VLVFB(vlvf_index * 2 + 1 - pool / 32))) {
+ if (vfta_delta)
+ SXE_REG_WRITE(hw, SXE_VFTA(regidx), vfta);
+
+ SXE_REG_WRITE(hw, SXE_VLVF(vlvf_index), 0);
+ SXE_REG_WRITE(hw, SXE_VLVFB(vlvf_index * 2 + pool / 32), 0);
+
+ goto l_end;
+ }
+
+ vfta_delta = 0;
+
+vlvf_update:
+ SXE_REG_WRITE(hw, SXE_VLVFB(vlvf_index * 2 + pool / 32), bits);
+ SXE_REG_WRITE(hw, SXE_VLVF(vlvf_index), SXE_VLVF_VIEN | vid);
+
+vfta_update:
+ if (vfta_delta)
+ SXE_REG_WRITE(hw, SXE_VFTA(regidx), vfta);
+
+l_end:
+ return ret;
+}
+
+void sxe_hw_vlan_filter_array_clear(struct sxe_hw *hw)
+{
+ u32 offset;
+
+ for (offset = 0; offset < SXE_VFT_TBL_SIZE; offset++)
+ SXE_REG_WRITE(hw, SXE_VFTA(offset), 0);
+
+ for (offset = 0; offset < SXE_VLVF_ENTRIES; offset++) {
+ SXE_REG_WRITE(hw, SXE_VLVF(offset), 0);
+ SXE_REG_WRITE(hw, SXE_VLVFB(offset * 2), 0);
+ SXE_REG_WRITE(hw, SXE_VLVFB(offset * 2 + 1), 0);
+ }
+
+}
+
+static const struct sxe_filter_vlan_operations sxe_filter_vlan_ops = {
+ .pool_filter_read = sxe_hw_vlan_pool_filter_read,
+ .pool_filter_write = sxe_hw_vlan_pool_filter_write,
+ .pool_filter_bitmap_read = sxe_hw_vlan_pool_filter_bitmap_read,
+ .pool_filter_bitmap_write = sxe_hw_vlan_pool_filter_bitmap_write,
+ .filter_array_write = sxe_hw_vlan_filter_array_write,
+ .filter_array_read = sxe_hw_vlan_filter_array_read,
+ .filter_array_clear = sxe_hw_vlan_filter_array_clear,
+ .filter_switch = sxe_hw_vlan_filter_switch,
+ .untagged_pkts_rcv_switch = sxe_hw_vlan_untagged_pkts_rcv_switch,
+ .filter_configure = sxe_hw_vlan_filter_configure,
+};
+
+
+static void sxe_hw_rx_pkt_buf_switch(struct sxe_hw *hw, bool is_on)
+{
+ u32 dbucfg = SXE_REG_READ(hw, SXE_DRXCFG);
+
+ if (is_on)
+ dbucfg |= SXE_DRXCFG_DBURX_START;
+ else
+ dbucfg &= ~SXE_DRXCFG_DBURX_START;
+
+ SXE_REG_WRITE(hw, SXE_DRXCFG, dbucfg);
+
+}
+
+static void sxe_hw_rx_pkt_buf_size_configure(struct sxe_hw *hw,
+ u8 num_pb,
+ u32 headroom,
+ u16 strategy)
+{
+ u16 total_buf_size = (SXE_RX_PKT_BUF_SIZE - headroom);
+ u32 rx_buf_size;
+ u16 i = 0;
+
+ if (!num_pb)
+ num_pb = 1;
+
+ switch (strategy) {
+ case (PBA_STRATEGY_WEIGHTED):
+ rx_buf_size = ((total_buf_size * 5 * 2) / (num_pb * 8));
+ total_buf_size -= rx_buf_size * (num_pb / 2);
+ rx_buf_size <<= SXE_RX_PKT_BUF_SIZE_SHIFT;
+ for (i = 0; i < (num_pb / 2); i++)
+ SXE_REG_WRITE(hw, SXE_RXPBSIZE(i), rx_buf_size);
+
+ fallthrough;
+ case (PBA_STRATEGY_EQUAL):
+ rx_buf_size = (total_buf_size / (num_pb - i))
+ << SXE_RX_PKT_BUF_SIZE_SHIFT;
+ for (; i < num_pb; i++)
+ SXE_REG_WRITE(hw, SXE_RXPBSIZE(i), rx_buf_size);
+
+ break;
+
+ default:
+ break;
+ }
+
+ for (; i < SXE_PKG_BUF_NUM_MAX; i++)
+ SXE_REG_WRITE(hw, SXE_RXPBSIZE(i), 0);
+
+}
+
+u32 sxe_hw_rx_pkt_buf_size_get(struct sxe_hw *hw, u8 pb)
+{
+ return SXE_REG_READ(hw, SXE_RXPBSIZE(pb));
+}
+
+void sxe_hw_rx_multi_ring_configure(struct sxe_hw *hw,
+ u8 tcs, bool is_4q_per_pool,
+ bool sriov_enable)
+{
+ u32 mrqc = SXE_REG_READ(hw, SXE_MRQC);
+
+ mrqc &= ~SXE_MRQE_MASK;
+
+ if (sriov_enable) {
+ if (tcs > 4)
+ mrqc |= SXE_MRQC_VMDQRT8TCEN;
+ else if (tcs > 1)
+ mrqc |= SXE_MRQC_VMDQRT4TCEN;
+ else if (is_4q_per_pool == true)
+ mrqc |= SXE_MRQC_VMDQRSS32EN;
+ else
+ mrqc |= SXE_MRQC_VMDQRSS64EN;
+
+ } else {
+ if (tcs > 4)
+ mrqc |= SXE_MRQC_RTRSS8TCEN;
+ else if (tcs > 1)
+ mrqc |= SXE_MRQC_RTRSS4TCEN;
+ else
+ mrqc |= SXE_MRQC_RSSEN;
+
+ }
+
+ SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+}
+
+static void sxe_hw_rss_hash_pkt_type_set(struct sxe_hw *hw, u32 version)
+{
+ u32 mrqc = 0;
+ u32 rss_field = 0;
+
+ rss_field |= SXE_MRQC_RSS_FIELD_IPV4 |
+ SXE_MRQC_RSS_FIELD_IPV4_TCP |
+ SXE_MRQC_RSS_FIELD_IPV6 |
+ SXE_MRQC_RSS_FIELD_IPV6_TCP;
+
+ if (version == SXE_RSS_IP_VER_4)
+ rss_field |= SXE_MRQC_RSS_FIELD_IPV4_UDP;
+
+ if (version == SXE_RSS_IP_VER_6)
+ rss_field |= SXE_MRQC_RSS_FIELD_IPV6_UDP;
+
+ mrqc |= rss_field;
+ SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+}
+
+static void sxe_hw_rss_hash_pkt_type_update(struct sxe_hw *hw,
+ u32 version)
+{
+ u32 mrqc;
+
+ mrqc = SXE_REG_READ(hw, SXE_MRQC);
+
+ mrqc |= SXE_MRQC_RSS_FIELD_IPV4
+ | SXE_MRQC_RSS_FIELD_IPV4_TCP
+ | SXE_MRQC_RSS_FIELD_IPV6
+ | SXE_MRQC_RSS_FIELD_IPV6_TCP;
+
+ mrqc &= ~(SXE_MRQC_RSS_FIELD_IPV4_UDP |
+ SXE_MRQC_RSS_FIELD_IPV6_UDP);
+
+ if (version == SXE_RSS_IP_VER_4)
+ mrqc |= SXE_MRQC_RSS_FIELD_IPV4_UDP;
+
+ if (version == SXE_RSS_IP_VER_6)
+ mrqc |= SXE_MRQC_RSS_FIELD_IPV6_UDP;
+
+ SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+}
+
+static void sxe_hw_rss_rings_used_set(struct sxe_hw *hw, u32 rss_num,
+ u16 pool, u16 pf_offset)
+{
+ u32 psrtype = 0;
+
+ if (rss_num > 3)
+ psrtype |= 2u << 29;
+ else if (rss_num > 1)
+ psrtype |= 1u << 29;
+
+ while (pool--)
+ SXE_REG_WRITE(hw, SXE_PSRTYPE(pf_offset + pool), psrtype);
+
+}
+
+void sxe_hw_rss_key_set_all(struct sxe_hw *hw, u32 *rss_key)
+{
+ u32 i;
+
+ for (i = 0; i < SXE_MAX_RSS_KEY_ENTRIES; i++)
+ SXE_REG_WRITE(hw, SXE_RSSRK(i), rss_key[i]);
+
+}
+
+void sxe_hw_rss_redir_tbl_reg_write(struct sxe_hw *hw,
+ u16 reg_idx, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_RETA(reg_idx >> 2), value);
+}
+
+void sxe_hw_rss_redir_tbl_set_all(struct sxe_hw *hw, u8 *redir_tbl)
+{
+ u32 i;
+ u32 tbl = 0;
+ u32 indices_multi = 0x1;
+
+
+ for (i = 0; i < SXE_MAX_RETA_ENTRIES; i++) {
+ tbl |= indices_multi * redir_tbl[i] << (i & 0x3) * 8;
+ if ((i & 3) == 3) {
+ sxe_hw_rss_redir_tbl_reg_write(hw, i, tbl);
+ tbl = 0;
+ }
+ }
+}
+
+void sxe_hw_rx_cap_switch_on(struct sxe_hw *hw)
+{
+ u32 rxctrl;
+
+ if (hw->mac.set_lben) {
+ u32 pfdtxgswc = SXE_REG_READ(hw, SXE_PFDTXGSWC);
+ pfdtxgswc |= SXE_PFDTXGSWC_VT_LBEN;
+ SXE_REG_WRITE(hw, SXE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = false;
+ }
+
+ rxctrl = SXE_REG_READ(hw, SXE_RXCTRL);
+ rxctrl |= SXE_RXCTRL_RXEN;
+ SXE_REG_WRITE(hw, SXE_RXCTRL, rxctrl);
+
+}
+
+void sxe_hw_rx_cap_switch_off(struct sxe_hw *hw)
+{
+ u32 rxctrl;
+
+ rxctrl = SXE_REG_READ(hw, SXE_RXCTRL);
+ if (rxctrl & SXE_RXCTRL_RXEN) {
+ u32 pfdtxgswc = SXE_REG_READ(hw, SXE_PFDTXGSWC);
+ if (pfdtxgswc & SXE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~SXE_PFDTXGSWC_VT_LBEN;
+ SXE_REG_WRITE(hw, SXE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = true;
+ } else {
+ hw->mac.set_lben = false;
+ }
+ rxctrl &= ~SXE_RXCTRL_RXEN;
+ SXE_REG_WRITE(hw, SXE_RXCTRL, rxctrl);
+ }
+
+}
+
+static void sxe_hw_rx_func_switch_on(struct sxe_hw *hw)
+{
+ u32 rxctrl;
+
+ rxctrl = SXE_REG_READ(hw, SXE_COMCTRL);
+ rxctrl |= SXE_COMCTRL_RXEN | SXE_COMCTRL_EDSEL;
+ SXE_REG_WRITE(hw, SXE_COMCTRL, rxctrl);
+
+}
+
+void sxe_hw_tx_pkt_buf_switch(struct sxe_hw *hw, bool is_on)
+{
+ u32 dbucfg;
+
+ dbucfg = SXE_REG_READ(hw, SXE_DTXCFG);
+
+ if (is_on) {
+ dbucfg |= SXE_DTXCFG_DBUTX_START;
+ dbucfg |= SXE_DTXCFG_DBUTX_BUF_ALFUL_CFG;
+ SXE_REG_WRITE(hw, SXE_DTXCFG, dbucfg);
+ } else {
+ dbucfg &= ~SXE_DTXCFG_DBUTX_START;
+ SXE_REG_WRITE(hw, SXE_DTXCFG, dbucfg);
+ }
+
+}
+
+void sxe_hw_tx_pkt_buf_size_configure(struct sxe_hw *hw, u8 num_pb)
+{
+ u32 i, tx_pkt_size;
+
+ if (!num_pb)
+ num_pb = 1;
+
+ tx_pkt_size = SXE_TX_PBSIZE_MAX / num_pb;
+ for (i = 0; i < num_pb; i++)
+ SXE_REG_WRITE(hw, SXE_TXPBSIZE(i), tx_pkt_size);
+
+ for (; i < SXE_PKG_BUF_NUM_MAX; i++)
+ SXE_REG_WRITE(hw, SXE_TXPBSIZE(i), 0);
+
+}
+
+void sxe_hw_rx_lro_ack_switch(struct sxe_hw *hw, bool is_on)
+{
+ u32 lro_dbu = SXE_REG_READ(hw, SXE_LRODBU);
+
+ if (is_on) {
+ lro_dbu &= ~SXE_LRODBU_LROACKDIS;
+ } else {
+ lro_dbu |= SXE_LRODBU_LROACKDIS;
+ }
+
+ SXE_REG_WRITE(hw, SXE_LRODBU, lro_dbu);
+
+}
+
+static void sxe_hw_vf_rx_switch(struct sxe_hw *hw,
+ u32 reg_offset, u32 vf_index, bool is_off)
+{
+ u32 vfre = SXE_REG_READ(hw, SXE_VFRE(reg_offset));
+ if (is_off) {
+ vfre &= ~BIT(vf_index);
+ } else {
+ vfre |= BIT(vf_index);
+ }
+
+ SXE_REG_WRITE(hw, SXE_VFRE(reg_offset), vfre);
+
+}
+
+static s32 sxe_hw_fnav_wait_init_done(struct sxe_hw *hw)
+{
+ u32 i;
+ s32 ret = 0;
+ struct sxe_adapter *adapter = hw->adapter;
+ for (i = 0; i < SXE_FNAV_INIT_DONE_POLL; i++) {
+ if (SXE_REG_READ(hw, SXE_FNAVCTRL) &
+ SXE_FNAVCTRL_INIT_DONE) {
+ break;
+ }
+
+ usleep_range(1000, 2000);
+ }
+
+ if (i >= SXE_FNAV_INIT_DONE_POLL) {
+ LOG_DEV_DEBUG("flow navigator poll time exceeded!\n");
+ ret = -SXE_ERR_FNAV_REINIT_FAILED;
+ }
+
+ return ret;
+}
+
+void sxe_hw_fnav_enable(struct sxe_hw *hw, u32 fnavctrl)
+{
+ u32 fnavctrl_ori;
+ bool is_clear_stat = false;
+
+ SXE_REG_WRITE(hw, SXE_FNAVHKEY, SXE_FNAV_BUCKET_HASH_KEY);
+ SXE_REG_WRITE(hw, SXE_FNAVSKEY, SXE_FNAV_SAMPLE_HASH_KEY);
+
+ fnavctrl_ori = SXE_REG_READ(hw, SXE_FNAVCTRL);
+ if ((fnavctrl_ori & 0x13) != (fnavctrl & 0x13))
+ is_clear_stat = true;
+
+ SXE_REG_WRITE(hw, SXE_FNAVCTRL, fnavctrl);
+ SXE_WRITE_FLUSH(hw);
+
+ sxe_hw_fnav_wait_init_done(hw);
+
+ if (is_clear_stat) {
+ SXE_REG_READ(hw, SXE_FNAVUSTAT);
+ SXE_REG_READ(hw, SXE_FNAVFSTAT);
+ SXE_REG_READ(hw, SXE_FNAVMATCH);
+ SXE_REG_READ(hw, SXE_FNAVMISS);
+ SXE_REG_READ(hw, SXE_FNAVLEN);
+ }
+
+}
+
+static s32 sxe_hw_fnav_mode_init(struct sxe_hw *hw,
+ u32 fnavctrl, u32 sxe_fnav_mode)
+{
+ struct sxe_adapter *adapter = hw->adapter;
+
+ LOG_DEBUG_BDF("fnavctrl=0x%x, sxe_fnav_mode=%u\n", fnavctrl, sxe_fnav_mode);
+
+ if ((sxe_fnav_mode != SXE_FNAV_SAMPLE_MODE) &&
+ (sxe_fnav_mode != SXE_FNAV_SPECIFIC_MODE)) {
+ LOG_ERROR_BDF("mode[%u] a error fnav mode, fnav do not work. please"
+ " use SXE_FNAV_SAMPLE_MODE or SXE_FNAV_SPECIFIC_MODE\n",
+ sxe_fnav_mode);
+ goto l_end;
+ }
+
+ if (sxe_fnav_mode == SXE_FNAV_SPECIFIC_MODE) {
+ fnavctrl |= SXE_FNAVCTRL_SPECIFIC_MATCH |
+ (SXE_FNAV_DROP_QUEUE << SXE_FNAVCTRL_DROP_Q_SHIFT);
+ }
+
+ fnavctrl |= (0x6 << SXE_FNAVCTRL_FLEX_SHIFT) |
+ (0xA << SXE_FNAVCTRL_MAX_LENGTH_SHIFT) |
+ (4 << SXE_FNAVCTRL_FULL_THRESH_SHIFT);
+
+ sxe_hw_fnav_enable(hw, fnavctrl);
+
+l_end:
+ return 0;
+}
+
+u32 sxe_hw_fnav_port_mask_get(__be16 src_port_mask, __be16 dst_port_mask)
+{
+ u32 mask = ntohs(dst_port_mask);
+
+ mask <<= SXE_FNAVTCPM_DPORTM_SHIFT;
+ mask |= ntohs(src_port_mask);
+ mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+ mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+ mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+ return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+static s32 sxe_hw_fnav_vm_pool_mask_get(struct sxe_hw *hw,
+ u8 vm_pool, u32 *fnavm)
+{
+ s32 ret = 0;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ switch (vm_pool & SXE_SAMPLE_VM_POOL_MASK) {
+ case 0x0:
+ *fnavm |= SXE_FNAVM_POOL;
+ fallthrough;
+ case 0x7F:
+ break;
+ default:
+ LOG_DEV_ERR("error on vm pool mask\n");
+ ret = -SXE_ERR_CONFIG;
+ }
+
+ return ret;
+}
+
+static s32 sxe_hw_fnav_flow_type_mask_get(struct sxe_hw *hw,
+ union sxe_fnav_rule_info *input_mask,
+ u32 *fnavm)
+{
+ s32 ret = 0;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ switch (input_mask->ntuple.flow_type & SXE_SAMPLE_L4TYPE_MASK) {
+ case 0x0:
+ *fnavm |= SXE_FNAVM_L4P;
+ if (input_mask->ntuple.dst_port ||
+ input_mask->ntuple.src_port) {
+ LOG_DEV_ERR("error on src/dst port mask\n");
+ ret = -SXE_ERR_CONFIG;
+ goto l_ret;
+ }
+ break;
+ case SXE_SAMPLE_L4TYPE_MASK:
+ break;
+ default:
+ LOG_DEV_ERR("error on flow type mask\n");
+ ret = -SXE_ERR_CONFIG;
+ }
+
+l_ret:
+ return ret;
+}
+
+static s32 sxe_hw_fnav_vlan_mask_get(struct sxe_hw *hw,
+ __be16 vlan_id, u32 *fnavm)
+{
+ s32 ret = 0;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ switch (ntohs(vlan_id) & SXE_SAMPLE_VLAN_MASK) {
+ case 0x0000:
+ *fnavm |= SXE_FNAVM_VLANID;
+ fallthrough;
+ case 0x0FFF:
+ *fnavm |= SXE_FNAVM_VLANP;
+ break;
+ case 0xE000:
+ *fnavm |= SXE_FNAVM_VLANID;
+ fallthrough;
+ case 0xEFFF:
+ break;
+ default:
+ LOG_DEV_ERR("error on VLAN mask\n");
+ ret = -SXE_ERR_CONFIG;
+ }
+
+ return ret;
+}
+
+static s32 sxe_hw_fnav_flex_bytes_mask_get(struct sxe_hw *hw,
+ __be16 flex_bytes, u32 *fnavm)
+{
+ s32 ret = 0;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ switch ((__force u16)flex_bytes & SXE_SAMPLE_FLEX_BYTES_MASK) {
+ case 0x0000:
+ *fnavm |= SXE_FNAVM_FLEX;
+ fallthrough;
+ case 0xFFFF:
+ break;
+ default:
+ LOG_DEV_ERR("error on flexible byte mask\n");
+ ret = -SXE_ERR_CONFIG;
+ }
+
+ return ret;
+}
+
+s32 sxe_hw_fnav_specific_rule_mask_set(struct sxe_hw *hw,
+ union sxe_fnav_rule_info *input_mask)
+{
+ s32 ret;
+ u32 fnavm = SXE_FNAVM_DIPv6;
+ u32 fnavtcpm;
+ struct sxe_adapter *adapter = hw->adapter;
+
+
+ if (input_mask->ntuple.bkt_hash)
+ LOG_DEV_ERR("bucket hash should always be 0 in mask\n");
+
+ ret = sxe_hw_fnav_vm_pool_mask_get(hw, input_mask->ntuple.vm_pool, &fnavm);
+ if (ret)
+ goto l_err_config;
+
+ ret = sxe_hw_fnav_flow_type_mask_get(hw, input_mask, &fnavm);
+ if (ret)
+ goto l_err_config;
+
+ ret = sxe_hw_fnav_vlan_mask_get(hw, input_mask->ntuple.vlan_id, &fnavm);
+ if (ret)
+ goto l_err_config;
+
+ ret = sxe_hw_fnav_flex_bytes_mask_get(hw, input_mask->ntuple.flex_bytes, &fnavm);
+ if (ret)
+ goto l_err_config;
+
+ LOG_DEBUG_BDF("fnavm = 0x%x\n", fnavm);
+ SXE_REG_WRITE(hw, SXE_FNAVM, fnavm);
+
+ fnavtcpm = sxe_hw_fnav_port_mask_get(input_mask->ntuple.src_port,
+ input_mask->ntuple.dst_port);
+
+ LOG_DEBUG_BDF("fnavtcpm = 0x%x\n", fnavtcpm);
+ SXE_REG_WRITE(hw, SXE_FNAVTCPM, ~fnavtcpm);
+ SXE_REG_WRITE(hw, SXE_FNAVUDPM, ~fnavtcpm);
+
+ SXE_REG_WRITE_BE32(hw, SXE_FNAVSIP4M,
+ ~input_mask->ntuple.src_ip[0]);
+ SXE_REG_WRITE_BE32(hw, SXE_FNAVDIP4M,
+ ~input_mask->ntuple.dst_ip[0]);
+
+ return 0;
+
+l_err_config:
+ return -SXE_ERR_CONFIG;
+}
+
+static s32 sxe_hw_fnav_cmd_complete_check(struct sxe_hw *hw,
+ u32 *fnavcmd)
+{
+ u32 i;
+
+ for (i = 0; i < SXE_FNAVCMD_CMD_POLL * 10; i++) {
+ *fnavcmd = SXE_REG_READ(hw, SXE_FNAVCMD);
+ if (!(*fnavcmd & SXE_FNAVCMD_CMD_MASK))
+ return 0;
+
+ udelay(10);
+ }
+
+ return -SXE_ERR_FNAV_CMD_INCOMPLETE;
+}
+
+static void sxe_hw_fnav_filter_ip_set(struct sxe_hw *hw,
+ union sxe_fnav_rule_info *input)
+{
+ SXE_REG_WRITE_BE32(hw, SXE_FNAVSIPv6(0),
+ input->ntuple.src_ip[0]);
+ SXE_REG_WRITE_BE32(hw, SXE_FNAVSIPv6(1),
+ input->ntuple.src_ip[1]);
+ SXE_REG_WRITE_BE32(hw, SXE_FNAVSIPv6(2),
+ input->ntuple.src_ip[2]);
+
+ SXE_REG_WRITE_BE32(hw, SXE_FNAVIPSA, input->ntuple.src_ip[0]);
+
+ SXE_REG_WRITE_BE32(hw, SXE_FNAVIPDA, input->ntuple.dst_ip[0]);
+
+}
+
+static void sxe_hw_fnav_filter_port_set(struct sxe_hw *hw,
+ union sxe_fnav_rule_info *input)
+{
+ u32 fnavport;
+
+ fnavport = be16_to_cpu(input->ntuple.dst_port);
+ fnavport <<= SXE_FNAVPORT_DESTINATION_SHIFT;
+ fnavport |= be16_to_cpu(input->ntuple.src_port);
+ SXE_REG_WRITE(hw, SXE_FNAVPORT, fnavport);
+
+}
+
+static void sxe_hw_fnav_filter_vlan_set(struct sxe_hw *hw,
+ union sxe_fnav_rule_info *input)
+{
+ u32 fnavvlan;
+
+ fnavvlan = ntohs(SXE_SWAP_16(input->ntuple.flex_bytes));
+ fnavvlan <<= SXE_FNAVVLAN_FLEX_SHIFT;
+ fnavvlan |= ntohs(input->ntuple.vlan_id);
+ SXE_REG_WRITE(hw, SXE_FNAVVLAN, fnavvlan);
+
+}
+
+static void sxe_hw_fnav_filter_bkt_hash_set(struct sxe_hw *hw,
+ union sxe_fnav_rule_info *input,
+ u16 soft_id)
+{
+ u32 fnavhash;
+
+ fnavhash = (__force u32)input->ntuple.bkt_hash;
+ fnavhash |= soft_id << SXE_FNAVHASH_SIG_SW_INDEX_SHIFT;
+ SXE_REG_WRITE(hw, SXE_FNAVHASH, fnavhash);
+
+}
+
+static s32 sxe_hw_fnav_filter_cmd_set(struct sxe_hw *hw,
+ union sxe_fnav_rule_info *input,
+ u8 queue)
+{
+ u32 fnavcmd;
+ s32 ret;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ fnavcmd = SXE_FNAVCMD_CMD_ADD_FLOW | SXE_FNAVCMD_FILTER_UPDATE |
+ SXE_FNAVCMD_LAST | SXE_FNAVCMD_QUEUE_EN;
+
+#ifndef SXE_DPDK
+ if (queue == SXE_FNAV_DROP_QUEUE)
+ fnavcmd |= SXE_FNAVCMD_DROP;
+#endif
+
+ fnavcmd |= input->ntuple.flow_type << SXE_FNAVCMD_FLOW_TYPE_SHIFT;
+ fnavcmd |= (u32)queue << SXE_FNAVCMD_RX_QUEUE_SHIFT;
+ fnavcmd |= (u32)input->ntuple.vm_pool << SXE_FNAVCMD_VT_POOL_SHIFT;
+
+ SXE_REG_WRITE(hw, SXE_FNAVCMD, fnavcmd);
+ ret = sxe_hw_fnav_cmd_complete_check(hw, &fnavcmd);
+ if (ret)
+ LOG_DEV_ERR("flow navigator command did not complete!\n");
+
+ return ret;
+}
+
+s32 sxe_hw_fnav_specific_rule_add(struct sxe_hw *hw,
+ union sxe_fnav_rule_info *input,
+ u16 soft_id, u8 queue)
+{
+ s32 ret;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ sxe_hw_fnav_filter_ip_set(hw, input);
+
+ sxe_hw_fnav_filter_port_set(hw, input);
+
+ sxe_hw_fnav_filter_vlan_set(hw, input);
+
+ sxe_hw_fnav_filter_bkt_hash_set(hw, input, soft_id);
+
+ SXE_WRITE_FLUSH(hw);
+
+ ret = sxe_hw_fnav_filter_cmd_set(hw, input, queue);
+ if (ret)
+ LOG_ERROR_BDF("set fnav filter cmd error. ret=%d\n", ret);
+
+ return ret;
+}
+
+s32 sxe_hw_fnav_specific_rule_del(struct sxe_hw *hw,
+ union sxe_fnav_rule_info *input,
+ u16 soft_id)
+{
+ u32 fnavhash;
+ u32 fnavcmd;
+ s32 ret;
+ struct sxe_adapter *adapter = hw->adapter;
+
+
+ fnavhash = (__force u32)input->ntuple.bkt_hash;
+ fnavhash |= soft_id << SXE_FNAVHASH_SIG_SW_INDEX_SHIFT;
+ SXE_REG_WRITE(hw, SXE_FNAVHASH, fnavhash);
+
+ SXE_WRITE_FLUSH(hw);
+
+ SXE_REG_WRITE(hw, SXE_FNAVCMD, SXE_FNAVCMD_CMD_QUERY_REM_FILT);
+
+ ret = sxe_hw_fnav_cmd_complete_check(hw, &fnavcmd);
+ if (ret) {
+ LOG_DEV_ERR("flow navigator command did not complete!\n");
+ return ret;
+ }
+
+ if (fnavcmd & SXE_FNAVCMD_FILTER_VALID) {
+ SXE_REG_WRITE(hw, SXE_FNAVHASH, fnavhash);
+ SXE_WRITE_FLUSH(hw);
+ SXE_REG_WRITE(hw, SXE_FNAVCMD,
+ SXE_FNAVCMD_CMD_REMOVE_FLOW);
+ }
+
+ return 0;
+}
+
+void sxe_hw_fnav_sample_rule_configure(struct sxe_hw *hw,
+ u8 flow_type, u32 hash_value, u8 queue)
+{
+ u32 fnavcmd;
+ u64 fnavhashcmd;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ fnavcmd = SXE_FNAVCMD_CMD_ADD_FLOW | SXE_FNAVCMD_FILTER_UPDATE |
+ SXE_FNAVCMD_LAST | SXE_FNAVCMD_QUEUE_EN;
+ fnavcmd |= (u32)flow_type << SXE_FNAVCMD_FLOW_TYPE_SHIFT;
+ fnavcmd |= (u32)queue << SXE_FNAVCMD_RX_QUEUE_SHIFT;
+
+ fnavhashcmd = (u64)fnavcmd << 32;
+ fnavhashcmd |= hash_value;
+ SXE_REG64_WRITE(hw, SXE_FNAVHASH, fnavhashcmd);
+
+ LOG_DEV_DEBUG("tx queue=%x hash=%x\n", queue, (u32)fnavhashcmd);
+
+}
+
+static u64 sxe_hw_fnav_sample_rule_hash_get(struct sxe_hw *hw,
+ u8 flow_type, u32 hash_value, u8 queue)
+{
+ u32 fnavcmd;
+ u64 fnavhashcmd;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ fnavcmd = SXE_FNAVCMD_CMD_ADD_FLOW | SXE_FNAVCMD_FILTER_UPDATE |
+ SXE_FNAVCMD_LAST | SXE_FNAVCMD_QUEUE_EN;
+ fnavcmd |= (u32)flow_type << SXE_FNAVCMD_FLOW_TYPE_SHIFT;
+ fnavcmd |= (u32)queue << SXE_FNAVCMD_RX_QUEUE_SHIFT;
+
+ fnavhashcmd = (u64)fnavcmd << 32;
+ fnavhashcmd |= hash_value;
+
+ LOG_DEV_DEBUG("tx queue=%x hash=%x\n", queue, (u32)fnavhashcmd);
+
+ return fnavhashcmd;
+}
+
+static s32 sxe_hw_fnav_sample_hash_cmd_get(struct sxe_hw *hw,
+ u8 flow_type,
+ u32 hash_value,
+ u8 queue, u64 *hash_cmd)
+{
+ s32 ret = 0;
+ u8 pkg_type;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ pkg_type = flow_type & SXE_SAMPLE_FLOW_TYPE_MASK;
+ switch (pkg_type) {
+ case SXE_SAMPLE_FLOW_TYPE_TCPV4:
+ case SXE_SAMPLE_FLOW_TYPE_UDPV4:
+ case SXE_SAMPLE_FLOW_TYPE_SCTPV4:
+ case SXE_SAMPLE_FLOW_TYPE_TCPV6:
+ case SXE_SAMPLE_FLOW_TYPE_UDPV6:
+ case SXE_SAMPLE_FLOW_TYPE_SCTPV6:
+ break;
+ default:
+ LOG_DEV_ERR("error on flow type input\n");
+ ret = -SXE_ERR_CONFIG;
+ goto l_end;
+ }
+
+ *hash_cmd = sxe_hw_fnav_sample_rule_hash_get(hw, pkg_type, hash_value, queue);
+
+l_end:
+ return ret;
+}
+
+static s32 sxe_hw_fnav_single_sample_rule_del(struct sxe_hw *hw,
+ u32 hash)
+{
+ u32 fdircmd;
+ s32 ret;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ SXE_REG_WRITE(hw, SXE_FNAVHASH, hash);
+ SXE_WRITE_FLUSH(hw);
+
+ SXE_REG_WRITE(hw, SXE_FNAVCMD, SXE_FNAVCMD_CMD_REMOVE_FLOW);
+ ret = sxe_hw_fnav_cmd_complete_check(hw, &fdircmd);
+ if (ret) {
+ LOG_DEV_ERR("flow navigator previous command did not complete,"
+ "aborting table re-initialization.\n");
+ }
+
+ return ret;
+}
+
+s32 sxe_hw_fnav_sample_rules_table_reinit(struct sxe_hw *hw)
+{
+ u32 fnavctrl = SXE_REG_READ(hw, SXE_FNAVCTRL);
+ u32 fnavcmd;
+ s32 ret;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ fnavctrl &= ~SXE_FNAVCTRL_INIT_DONE;
+
+ ret = sxe_hw_fnav_cmd_complete_check(hw, &fnavcmd);
+ if (ret) {
+ LOG_DEV_ERR("flow navigator previous command did not complete,"
+ "aborting table re-initialization.\n");
+ goto l_ret;
+ }
+
+ SXE_REG_WRITE(hw, SXE_FNAVFREE, 0);
+ SXE_WRITE_FLUSH(hw);
+
+ SXE_REG_WRITE(hw, SXE_FNAVCMD,
+ (SXE_REG_READ(hw, SXE_FNAVCMD) |
+ SXE_FNAVCMD_CLEARHT));
+ SXE_WRITE_FLUSH(hw);
+ SXE_REG_WRITE(hw, SXE_FNAVCMD,
+ (SXE_REG_READ(hw, SXE_FNAVCMD) &
+ ~SXE_FNAVCMD_CLEARHT));
+ SXE_WRITE_FLUSH(hw);
+
+ SXE_REG_WRITE(hw, SXE_FNAVHASH, 0x00);
+ SXE_WRITE_FLUSH(hw);
+
+ SXE_REG_WRITE(hw, SXE_FNAVCTRL, fnavctrl);
+ SXE_WRITE_FLUSH(hw);
+
+ ret = sxe_hw_fnav_wait_init_done(hw);
+ if (ret) {
+ LOG_ERROR_BDF("flow navigator simple poll time exceeded!\n");
+ goto l_ret;
+ }
+
+ SXE_REG_READ(hw, SXE_FNAVUSTAT);
+ SXE_REG_READ(hw, SXE_FNAVFSTAT);
+ SXE_REG_READ(hw, SXE_FNAVMATCH);
+ SXE_REG_READ(hw, SXE_FNAVMISS);
+ SXE_REG_READ(hw, SXE_FNAVLEN);
+
+l_ret:
+ return ret;
+}
+
+static void sxe_hw_fnav_sample_stats_reinit(struct sxe_hw *hw)
+{
+ SXE_REG_READ(hw, SXE_FNAVUSTAT);
+ SXE_REG_READ(hw, SXE_FNAVFSTAT);
+ SXE_REG_READ(hw, SXE_FNAVMATCH);
+ SXE_REG_READ(hw, SXE_FNAVMISS);
+ SXE_REG_READ(hw, SXE_FNAVLEN);
+
+}
+
+static void sxe_hw_ptp_freq_adjust(struct sxe_hw *hw, u32 adj_freq)
+{
+ SXE_REG_WRITE(hw, SXE_TIMADJL, 0);
+ SXE_REG_WRITE(hw, SXE_TIMADJH, adj_freq);
+ SXE_WRITE_FLUSH(hw);
+
+}
+
+u64 sxe_hw_ptp_systime_get(struct sxe_hw *hw)
+{
+ struct sxe_adapter *adapter = hw->adapter;
+ u32 systiml;
+ u32 systimm;
+ u64 ns;
+
+ systiml = SXE_REG_READ(hw, SXE_SYSTIML);
+ systimm = SXE_REG_READ(hw, SXE_SYSTIMM);
+ ns = SXE_TIME_TO_NS(systiml, systimm);
+
+ LOG_DEBUG_BDF("get ptp hw systime systiml=%u, systimm=%u, ns=%"SXE_PRIU64"\n",
+ systiml, systimm, ns);
+ return ns;
+}
+
+void sxe_hw_ptp_systime_init(struct sxe_hw *hw)
+{
+ SXE_REG_WRITE(hw, SXE_SYSTIML, 0);
+ SXE_REG_WRITE(hw, SXE_SYSTIMM, 0);
+ SXE_REG_WRITE(hw, SXE_SYSTIMH, 0);
+
+ SXE_WRITE_FLUSH(hw);
+}
+
+void sxe_hw_ptp_init(struct sxe_hw *hw)
+{
+ u32 regval;
+ u32 tsctl = SXE_TSCTRL_TSEN |
+ SXE_TSCTRL_VER_2 |
+ SXE_TSCTRL_PTYP_ALL |
+ SXE_TSCTRL_L4_UNICAST;
+
+ regval = SXE_REG_READ(hw, SXE_TSCTRL);
+ regval &= ~SXE_TSCTRL_ONESTEP;
+ regval &= ~SXE_TSCTRL_CSEN;
+ regval |= tsctl;
+ SXE_REG_WRITE(hw, SXE_TSCTRL, regval);
+
+ SXE_REG_WRITE(hw, SXE_TIMINC,
+ SXE_TIMINC_SET(SXE_INCPD, SXE_IV_NS, SXE_IV_SNS));
+
+}
+
+void sxe_hw_ptp_rx_timestamp_clear(struct sxe_hw *hw)
+{
+ SXE_REG_READ(hw, SXE_RXSTMPH);
+}
+
+void sxe_hw_ptp_tx_timestamp_get(struct sxe_hw *hw,
+ u32 *ts_sec, u32 *ts_ns)
+{
+ u32 reg_sec;
+ u32 reg_ns;
+ u32 sec_8bit;
+ u32 sec_24bit;
+ u32 systimm;
+ u32 systimm_8bit;
+ u32 systimm_24bit;
+
+ SXE_REG64_WRITE(hw, SXE_TXSTMP_SEL, SXE_TXTS_MAGIC0);
+ reg_ns = SXE_REG_READ(hw, SXE_TXSTMP_VAL);
+ SXE_REG64_WRITE(hw, SXE_TXSTMP_SEL, SXE_TXTS_MAGIC1);
+ reg_sec = SXE_REG_READ(hw, SXE_TXSTMP_VAL);
+ systimm = SXE_REG_READ(hw, SXE_SYSTIMM);
+
+
+ sec_8bit = reg_sec & 0x000000FF;
+ sec_24bit = (reg_sec >> 8) & 0x00FFFFFF;
+
+ systimm_24bit = systimm & 0x00FFFFFF;
+ systimm_8bit = systimm & 0xFF000000;
+
+ *ts_ns = (sec_8bit << 24) | ((reg_ns & 0xFFFFFF00) >> 8);
+
+ if (unlikely((sec_24bit - systimm_24bit) >= 0x00FFFFF0)) {
+ if (systimm_8bit >= 1)
+ systimm_8bit -= 1;
+ }
+
+ *ts_sec = systimm_8bit | sec_24bit;
+}
+
+u64 sxe_hw_ptp_rx_timestamp_get(struct sxe_hw *hw)
+{
+ struct sxe_adapter *adapter = hw->adapter;
+ u32 rxtsl;
+ u32 rxtsh;
+ u64 ns;
+
+ rxtsl = SXE_REG_READ(hw, SXE_RXSTMPL);
+ rxtsh = SXE_REG_READ(hw, SXE_RXSTMPH);
+ ns = SXE_TIME_TO_NS(rxtsl, rxtsh);
+
+ LOG_DEBUG_BDF("ptp get rx ptp timestamp low=%u, high=%u, ns=%"SXE_PRIU64"\n",
+ rxtsl, rxtsh, ns);
+ return ns;
+}
+
+bool sxe_hw_ptp_is_rx_timestamp_valid(struct sxe_hw *hw)
+{
+ bool rx_tmstamp_valid = false;
+ u32 tsyncrxctl;
+
+ tsyncrxctl = SXE_REG_READ(hw, SXE_TSYNCRXCTL);
+ if (tsyncrxctl & SXE_TSYNCRXCTL_RXTT)
+ rx_tmstamp_valid = true;
+
+ return rx_tmstamp_valid;
+}
+
+void sxe_hw_ptp_timestamp_mode_set(struct sxe_hw *hw,
+ bool is_l2, u32 tsctl, u32 tses)
+{
+ u32 regval;
+
+ if (is_l2) {
+ SXE_REG_WRITE(hw, SXE_ETQF(SXE_ETQF_FILTER_1588),
+ (SXE_ETQF_FILTER_EN |
+ SXE_ETQF_1588 |
+ ETH_P_1588));
+ } else {
+ SXE_REG_WRITE(hw, SXE_ETQF(SXE_ETQF_FILTER_1588), 0);
+ }
+
+ if (tsctl) {
+ regval = SXE_REG_READ(hw, SXE_TSCTRL);
+ regval |= tsctl;
+ SXE_REG_WRITE(hw, SXE_TSCTRL, regval);
+ }
+
+ SXE_REG_WRITE(hw, SXE_TSES, tses);
+
+ SXE_WRITE_FLUSH(hw);
+
+}
+
+void sxe_hw_ptp_timestamp_enable(struct sxe_hw *hw)
+{
+ SXE_REG_WRITE(hw, SXE_TSYNCTXCTL,
+ (SXE_REG_READ(hw, SXE_TSYNCTXCTL) |
+ SXE_TSYNCTXCTL_TEN));
+
+ SXE_REG_WRITE(hw, SXE_TSYNCRXCTL,
+ (SXE_REG_READ(hw, SXE_TSYNCRXCTL) |
+ SXE_TSYNCRXCTL_REN));
+ SXE_WRITE_FLUSH(hw);
+
+}
+
+static void sxe_hw_dcb_tc_rss_configure(struct sxe_hw *hw, u16 rss)
+{
+ u32 msb = 0;
+
+ while (rss) {
+ msb++;
+ rss >>= 1;
+ }
+
+ SXE_REG_WRITE(hw, SXE_RQTC, msb * SXE_8_TC_MSB);
+}
+
+static void sxe_hw_tx_ring_disable(struct sxe_hw *hw, u8 reg_idx,
+ unsigned long timeout)
+{
+ unsigned long wait_delay, delay_interval;
+ int wait_loop;
+ u32 txdctl;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+ txdctl &= ~SXE_TXDCTL_ENABLE;
+ SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl);
+
+ delay_interval = timeout / 100;
+
+ wait_loop = SXE_MAX_RX_DESC_POLL;
+ wait_delay = delay_interval;
+
+ while (wait_loop--) {
+ usleep_range(wait_delay, wait_delay + 10);
+ wait_delay += delay_interval * 2;
+ txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+
+ if (!(txdctl & SXE_TXDCTL_ENABLE))
+ return;
+ }
+
+ LOG_MSG_ERR(drv, "register TXDCTL.ENABLE not cleared within the polling period\n");
+}
+
+static void sxe_hw_rx_ring_disable(struct sxe_hw *hw, u8 reg_idx,
+ unsigned long timeout)
+{
+ unsigned long wait_delay, delay_interval;
+ int wait_loop;
+ u32 rxdctl;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+ rxdctl &= ~SXE_RXDCTL_ENABLE;
+
+ SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl);
+
+ delay_interval = timeout / 100;
+
+ wait_loop = SXE_MAX_RX_DESC_POLL;
+ wait_delay = delay_interval;
+
+ while (wait_loop--) {
+ usleep_range(wait_delay, wait_delay + 10);
+ wait_delay += delay_interval * 2;
+ rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+
+ if (!(rxdctl & SXE_RXDCTL_ENABLE))
+ return;
+ }
+
+ LOG_MSG_ERR(drv, "register RXDCTL.ENABLE not cleared within the polling period\n");
+}
+
+static u32 sxe_hw_tx_dbu_fc_status_get(struct sxe_hw *hw)
+{
+ return SXE_REG_READ(hw, SXE_TXPBFCS);
+}
+
+static void sxe_hw_fnav_sample_hash_set(struct sxe_hw *hw, u64 hash)
+{
+ SXE_REG64_WRITE(hw, SXE_FNAVHASH, hash);
+}
+
+static const struct sxe_dbu_operations sxe_dbu_ops = {
+ .rx_pkt_buf_size_configure = sxe_hw_rx_pkt_buf_size_configure,
+ .rx_pkt_buf_switch = sxe_hw_rx_pkt_buf_switch,
+ .rx_multi_ring_configure = sxe_hw_rx_multi_ring_configure,
+ .rss_key_set_all = sxe_hw_rss_key_set_all,
+ .rss_redir_tbl_set_all = sxe_hw_rss_redir_tbl_set_all,
+ .rx_cap_switch_on = sxe_hw_rx_cap_switch_on,
+ .rx_cap_switch_off = sxe_hw_rx_cap_switch_off,
+ .rss_hash_pkt_type_set = sxe_hw_rss_hash_pkt_type_set,
+ .rss_hash_pkt_type_update = sxe_hw_rss_hash_pkt_type_update,
+ .rss_rings_used_set = sxe_hw_rss_rings_used_set,
+ .lro_ack_switch = sxe_hw_rx_lro_ack_switch,
+
+ .fnav_mode_init = sxe_hw_fnav_mode_init,
+ .fnav_specific_rule_mask_set = sxe_hw_fnav_specific_rule_mask_set,
+ .fnav_specific_rule_add = sxe_hw_fnav_specific_rule_add,
+ .fnav_specific_rule_del = sxe_hw_fnav_specific_rule_del,
+ .fnav_sample_hash_cmd_get = sxe_hw_fnav_sample_hash_cmd_get,
+ .fnav_sample_stats_reinit = sxe_hw_fnav_sample_stats_reinit,
+ .fnav_sample_hash_set = sxe_hw_fnav_sample_hash_set,
+ .fnav_single_sample_rule_del = sxe_hw_fnav_single_sample_rule_del,
+
+ .tx_pkt_buf_switch = sxe_hw_tx_pkt_buf_switch,
+ .tx_pkt_buf_size_configure = sxe_hw_tx_pkt_buf_size_configure,
+
+ .ptp_init = sxe_hw_ptp_init,
+ .ptp_freq_adjust = sxe_hw_ptp_freq_adjust,
+ .ptp_systime_init = sxe_hw_ptp_systime_init,
+ .ptp_systime_get = sxe_hw_ptp_systime_get,
+ .ptp_tx_timestamp_get = sxe_hw_ptp_tx_timestamp_get,
+ .ptp_timestamp_mode_set = sxe_hw_ptp_timestamp_mode_set,
+ .ptp_timestamp_enable = sxe_hw_ptp_timestamp_enable,
+ .ptp_rx_timestamp_clear = sxe_hw_ptp_rx_timestamp_clear,
+ .ptp_rx_timestamp_get = sxe_hw_ptp_rx_timestamp_get,
+ .ptp_is_rx_timestamp_valid = sxe_hw_ptp_is_rx_timestamp_valid,
+
+ .dcb_tc_rss_configure = sxe_hw_dcb_tc_rss_configure,
+ .vf_rx_switch = sxe_hw_vf_rx_switch,
+ .rx_pkt_buf_size_get = sxe_hw_rx_pkt_buf_size_get,
+ .rx_func_switch_on = sxe_hw_rx_func_switch_on,
+
+ .tx_ring_disable = sxe_hw_tx_ring_disable,
+ .rx_ring_disable = sxe_hw_rx_ring_disable,
+
+ .tx_dbu_fc_status_get = sxe_hw_tx_dbu_fc_status_get,
+};
+
+
+void sxe_hw_rx_dma_ctrl_init(struct sxe_hw *hw)
+{
+ u32 rx_dma_ctrl = SXE_REG_READ(hw, SXE_RDRXCTL);
+
+ rx_dma_ctrl &= ~SXE_RDRXCTL_LROFRSTSIZE;
+ SXE_REG_WRITE(hw, SXE_RDRXCTL, rx_dma_ctrl);
+}
+
+void sxe_hw_rx_dma_lro_ctrl_set(struct sxe_hw *hw)
+{
+ u32 rx_dma_ctrl = SXE_REG_READ(hw, SXE_RDRXCTL);
+
+ rx_dma_ctrl |= SXE_RDRXCTL_LROACKC;
+ SXE_REG_WRITE(hw, SXE_RDRXCTL, rx_dma_ctrl);
+}
+
+void sxe_hw_rx_desc_thresh_set(struct sxe_hw *hw, u8 reg_idx)
+{
+ u32 rxdctl;
+ rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+ rxdctl |= 0x40 << SXE_RXDCTL_PREFETCH_NUM_CFG_SHIFT;
+ rxdctl |= 0x2 << SXE_RXDCTL_DESC_FIFO_AE_TH_SHIFT;
+ rxdctl |= 0x10;
+ SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl);
+
+}
+
+void sxe_hw_rx_ring_switch(struct sxe_hw *hw, u8 reg_idx, bool is_on)
+{
+ u32 rxdctl;
+ u32 wait_loop = SXE_RING_WAIT_LOOP;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+ if (is_on) {
+ rxdctl |= SXE_RXDCTL_ENABLE;
+ SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl);
+
+ do {
+ usleep_range(1000, 2000);
+ rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+ } while (--wait_loop && !(rxdctl & SXE_RXDCTL_ENABLE));
+ } else {
+ rxdctl &= ~SXE_RXDCTL_ENABLE;
+ SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl);
+
+ do {
+ usleep_range(1000, 2000);
+ rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+ } while (--wait_loop && (rxdctl & SXE_RXDCTL_ENABLE));
+ }
+
+ SXE_WRITE_FLUSH(hw);
+
+ if (!wait_loop) {
+ LOG_MSG_ERR(drv, "rx ring %u switch %u failed within "
+ "the polling period\n", reg_idx, is_on);
+ }
+
+}
+
+void sxe_hw_rx_ring_switch_not_polling(struct sxe_hw *hw, u8 reg_idx, bool is_on)
+{
+ u32 rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+ if (is_on) {
+ rxdctl |= SXE_RXDCTL_ENABLE;
+ SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl);
+ } else {
+ rxdctl &= ~SXE_RXDCTL_ENABLE;
+ SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl);
+ }
+
+ SXE_WRITE_FLUSH(hw);
+
+}
+
+void sxe_hw_rx_queue_desc_reg_configure(struct sxe_hw *hw,
+ u8 reg_idx, u32 rdh_value,
+ u32 rdt_value)
+{
+ SXE_REG_WRITE(hw, SXE_RDH(reg_idx), rdh_value);
+ SXE_REG_WRITE(hw, SXE_RDT(reg_idx), rdt_value);
+}
+
+static void sxe_hw_rx_ring_head_init(struct sxe_hw *hw, u8 reg_idx)
+{
+ SXE_REG_WRITE(hw, SXE_RDH(reg_idx), 0);
+
+}
+
+static void sxe_hw_rx_ring_tail_init(struct sxe_hw *hw, u8 reg_idx)
+{
+ SXE_REG_WRITE(hw, SXE_RDT(reg_idx), 0);
+
+}
+
+void sxe_hw_rx_ring_desc_configure(struct sxe_hw *hw,
+ u32 desc_mem_len, u64 desc_dma_addr,
+ u8 reg_idx)
+{
+ SXE_REG_WRITE(hw, SXE_RDBAL(reg_idx),
+ (desc_dma_addr & DMA_BIT_MASK(32)));
+ SXE_REG_WRITE(hw, SXE_RDBAH(reg_idx), (desc_dma_addr >> 32));
+ SXE_REG_WRITE(hw, SXE_RDLEN(reg_idx), desc_mem_len);
+
+ SXE_WRITE_FLUSH(hw);
+
+ sxe_hw_rx_ring_head_init(hw, reg_idx);
+ sxe_hw_rx_ring_tail_init(hw, reg_idx);
+
+}
+
+void sxe_hw_rx_rcv_ctl_configure(struct sxe_hw *hw, u8 reg_idx,
+ u32 header_buf_len, u32 pkg_buf_len)
+{
+ u32 srrctl;
+
+ srrctl = ((header_buf_len << SXE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ SXE_SRRCTL_BSIZEHDR_MASK);
+ srrctl |= ((pkg_buf_len >> SXE_SRRCTL_BSIZEPKT_SHIFT) &
+ SXE_SRRCTL_BSIZEPKT_MASK);
+
+ SXE_REG_WRITE(hw, SXE_SRRCTL(reg_idx), srrctl);
+
+}
+
+void sxe_hw_rx_lro_ctl_configure(struct sxe_hw *hw,
+ u8 reg_idx, u32 max_desc)
+{
+ u32 lroctrl;
+ lroctrl = SXE_REG_READ(hw, SXE_LROCTL(reg_idx));
+ lroctrl |= SXE_LROCTL_LROEN;
+ lroctrl |= max_desc;
+ SXE_REG_WRITE(hw, SXE_LROCTL(reg_idx), lroctrl);
+
+}
+
+static u32 sxe_hw_rx_desc_ctrl_get(struct sxe_hw *hw, u8 reg_idx)
+{
+ return SXE_REG_READ(hw, SXE_RXDCTL(reg_idx));
+}
+
+static void sxe_hw_dcb_arbiter_set(struct sxe_hw *hw, bool is_enable)
+{
+ u32 rttdcs;
+
+ rttdcs = SXE_REG_READ(hw, SXE_RTTDCS);
+
+ if (true == is_enable) {
+ rttdcs &= ~SXE_RTTDCS_ARBDIS;
+ rttdcs &= ~SXE_RTTDCS_BPBFSM;
+
+ SXE_REG_WRITE(hw, SXE_RTTDCS, rttdcs);
+ } else {
+ rttdcs |= SXE_RTTDCS_ARBDIS;
+ SXE_REG_WRITE(hw, SXE_RTTDCS, rttdcs);
+ }
+
+}
+
+
+static void sxe_hw_tx_multi_ring_configure(struct sxe_hw *hw, u8 tcs,
+ u16 pool_mask, bool sriov_enable, u16 max_txq)
+{
+ u32 mtqc;
+
+ sxe_hw_dcb_arbiter_set(hw, false);
+
+ if (true == sriov_enable) {
+ mtqc = SXE_MTQC_VT_ENA;
+ if (tcs > SXE_DCB_4_TC)
+ mtqc |= SXE_MTQC_RT_ENA | SXE_MTQC_8TC_8TQ;
+ else if (tcs > SXE_DCB_1_TC)
+ mtqc |= SXE_MTQC_RT_ENA | SXE_MTQC_4TC_4TQ;
+ else if (pool_mask == SXE_4Q_PER_POOL_MASK)
+ mtqc |= SXE_MTQC_32VF;
+ else
+ mtqc |= SXE_MTQC_64VF;
+ } else {
+ if (tcs > SXE_DCB_4_TC) {
+ mtqc = SXE_MTQC_RT_ENA | SXE_MTQC_8TC_8TQ;
+ } else if (tcs > SXE_DCB_1_TC) {
+ mtqc = SXE_MTQC_RT_ENA | SXE_MTQC_4TC_4TQ;
+ } else {
+ if (max_txq > 63)
+ mtqc = SXE_MTQC_RT_ENA | SXE_MTQC_4TC_4TQ;
+ else
+ mtqc = SXE_MTQC_64Q_1PB;
+ }
+ }
+
+ SXE_REG_WRITE(hw, SXE_MTQC, mtqc);
+
+ sxe_hw_dcb_arbiter_set(hw, true);
+
+}
+
+void sxe_hw_tx_ring_head_init(struct sxe_hw *hw, u8 reg_idx)
+{
+ SXE_REG_WRITE(hw, SXE_TDH(reg_idx), 0);
+
+}
+
+void sxe_hw_tx_ring_tail_init(struct sxe_hw *hw, u8 reg_idx)
+{
+ SXE_REG_WRITE(hw, SXE_TDT(reg_idx), 0);
+
+}
+
+void sxe_hw_tx_ring_desc_configure(struct sxe_hw *hw,
+ u32 desc_mem_len,
+ u64 desc_dma_addr, u8 reg_idx)
+{
+ SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), 0);
+
+ SXE_WRITE_FLUSH(hw);
+
+ SXE_REG_WRITE(hw, SXE_TDBAL(reg_idx), (desc_dma_addr & DMA_BIT_MASK(32)));
+ SXE_REG_WRITE(hw, SXE_TDBAH(reg_idx), (desc_dma_addr >> 32));
+ SXE_REG_WRITE(hw, SXE_TDLEN(reg_idx), desc_mem_len);
+ sxe_hw_tx_ring_head_init(hw, reg_idx);
+ sxe_hw_tx_ring_tail_init(hw, reg_idx);
+
+}
+
+void sxe_hw_tx_desc_thresh_set(
+ struct sxe_hw *hw,
+ u8 reg_idx,
+ u32 wb_thresh,
+ u32 host_thresh,
+ u32 prefech_thresh)
+{
+ u32 txdctl = 0;
+
+ txdctl |= (wb_thresh << SXE_TXDCTL_WTHRESH_SHIFT);
+ txdctl |= (host_thresh << SXE_TXDCTL_HTHRESH_SHIFT) | prefech_thresh;
+
+ SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl);
+
+}
+
+void sxe_hw_all_ring_disable(struct sxe_hw *hw, u32 ring_max)
+{
+ u32 i, value;
+
+ for (i = 0; i < ring_max; i++) {
+ value = SXE_REG_READ(hw, SXE_TXDCTL(i));
+ value &= ~SXE_TXDCTL_ENABLE;
+ SXE_REG_WRITE(hw, SXE_TXDCTL(i), value);
+
+ value = SXE_REG_READ(hw, SXE_RXDCTL(i));
+ value &= ~SXE_RXDCTL_ENABLE;
+ SXE_REG_WRITE(hw, SXE_RXDCTL(i), value);
+ }
+
+ SXE_WRITE_FLUSH(hw);
+ usleep_range(1000, 2000);
+
+}
+
+void sxe_hw_tx_ring_switch(struct sxe_hw *hw, u8 reg_idx, bool is_on)
+{
+ u32 wait_loop = SXE_RING_WAIT_LOOP;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ u32 txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+ if (is_on) {
+ txdctl |= SXE_TXDCTL_ENABLE;
+ SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl);
+
+ do {
+ usleep_range(1000, 2000);
+ txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+ } while (--wait_loop && !(txdctl & SXE_TXDCTL_ENABLE));
+ } else {
+ txdctl &= ~SXE_TXDCTL_ENABLE;
+ SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl);
+
+ do {
+ usleep_range(1000, 2000);
+ txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+ } while (--wait_loop && (txdctl & SXE_TXDCTL_ENABLE));
+ }
+
+ if (!wait_loop) {
+ LOG_DEV_ERR("tx ring %u switch %u failed within "
+ "the polling period\n", reg_idx, is_on);
+ }
+
+}
+
+void sxe_hw_tx_ring_switch_not_polling(struct sxe_hw *hw, u8 reg_idx, bool is_on)
+{
+ u32 txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+ if (is_on) {
+ txdctl |= SXE_TXDCTL_ENABLE;
+ SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl);
+ } else {
+ txdctl &= ~SXE_TXDCTL_ENABLE;
+ SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl);
+ }
+
+}
+
+void sxe_hw_tx_pkt_buf_thresh_configure(struct sxe_hw *hw,
+ u8 num_pb, bool dcb_enable)
+{
+ u32 i, tx_pkt_size, tx_pb_thresh;
+
+ if (!num_pb)
+ num_pb = 1;
+
+ tx_pkt_size = SXE_TX_PBSIZE_MAX / num_pb;
+ if (true == dcb_enable)
+ tx_pb_thresh = (tx_pkt_size / 1024) - SXE_TX_PKT_SIZE_MAX;
+ else
+ tx_pb_thresh = (tx_pkt_size / 1024) - SXE_NODCB_TX_PKT_SIZE_MAX;
+
+ for (i = 0; i < num_pb; i++)
+ SXE_REG_WRITE(hw, SXE_TXPBTHRESH(i), tx_pb_thresh);
+
+ for (; i < SXE_PKG_BUF_NUM_MAX; i++)
+ SXE_REG_WRITE(hw, SXE_TXPBTHRESH(i), 0);
+
+}
+
+void sxe_hw_tx_enable(struct sxe_hw *hw)
+{
+ u32 ctl;
+
+ ctl = SXE_REG_READ(hw, SXE_DMATXCTL);
+ ctl |= SXE_DMATXCTL_TE;
+ SXE_REG_WRITE(hw, SXE_DMATXCTL, ctl);
+
+}
+
+static u32 sxe_hw_tx_desc_ctrl_get(struct sxe_hw *hw, u8 reg_idx)
+{
+ return SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+}
+
+static void sxe_hw_tx_desc_wb_thresh_clear(struct sxe_hw *hw, u8 reg_idx)
+{
+ u32 reg_data;
+
+ reg_data = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+ reg_data &= ~SXE_TXDCTL_ENABLE;
+ SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), reg_data);
+ SXE_WRITE_FLUSH(hw);
+ reg_data &= ~(0x7f<<16);
+ reg_data |= SXE_TXDCTL_ENABLE;
+ SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), reg_data);
+
+}
+
+void sxe_hw_vlan_tag_strip_switch(struct sxe_hw *hw,
+ u16 reg_index, bool is_enable)
+{
+ u32 rxdctl;
+
+ rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_index));
+
+ if (is_enable)
+ rxdctl |= SXE_RXDCTL_VME;
+ else
+ rxdctl &= ~SXE_RXDCTL_VME;
+
+ SXE_REG_WRITE(hw, SXE_RXDCTL(reg_index), rxdctl);
+
+}
+
+static void sxe_hw_tx_vlan_tag_set(struct sxe_hw *hw,
+ u16 vid, u16 qos, u32 vf)
+{
+ u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | SXE_VMVIR_VLANA_DEFAULT;
+
+ SXE_REG_WRITE(hw, SXE_VMVIR(vf), vmvir);
+}
+
+void sxe_hw_tx_vlan_tag_clear(struct sxe_hw *hw, u32 vf)
+{
+ SXE_REG_WRITE(hw, SXE_VMVIR(vf), 0);
+}
+
+u32 sxe_hw_tx_vlan_insert_get(struct sxe_hw *hw, u32 vf)
+{
+ return SXE_REG_READ(hw, SXE_VMVIR(vf));
+}
+
+void sxe_hw_tx_ring_info_get(struct sxe_hw *hw,
+ u8 idx, u32 *head, u32 *tail)
+{
+ *head = SXE_REG_READ(hw, SXE_TDH(idx));
+ *tail = SXE_REG_READ(hw, SXE_TDT(idx));
+
+}
+
+void sxe_hw_dcb_rx_bw_alloc_configure(struct sxe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type,
+ u8 *prio_tc,
+ u8 max_priority)
+{
+ u32 reg;
+ u32 credit_refill;
+ u32 credit_max;
+ u8 i;
+
+ reg = SXE_RTRPCS_RRM | SXE_RTRPCS_RAC | SXE_RTRPCS_ARBDIS;
+ SXE_REG_WRITE(hw, SXE_RTRPCS, reg);
+
+ reg = 0;
+ for (i = 0; i < max_priority; i++)
+ reg |= (prio_tc[i] << (i * SXE_RTRUP2TC_UP_SHIFT));
+
+ SXE_REG_WRITE(hw, SXE_RTRUP2TC, reg);
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ credit_refill = refill[i];
+ credit_max = max[i];
+ reg = credit_refill | (credit_max << SXE_RTRPT4C_MCL_SHIFT);
+
+ reg |= (u32)(bwg_id[i]) << SXE_RTRPT4C_BWG_SHIFT;
+
+ if (prio_type[i] == PRIO_LINK)
+ reg |= SXE_RTRPT4C_LSP;
+
+ SXE_REG_WRITE(hw, SXE_RTRPT4C(i), reg);
+ }
+
+ reg = SXE_RTRPCS_RRM | SXE_RTRPCS_RAC;
+ SXE_REG_WRITE(hw, SXE_RTRPCS, reg);
+
+}
+
+void sxe_hw_dcb_tx_desc_bw_alloc_configure(struct sxe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type)
+{
+ u32 reg, max_credits;
+ u8 i;
+
+ for (i = 0; i < 128; i++) {
+ SXE_REG_WRITE(hw, SXE_RTTDQSEL, i);
+ SXE_REG_WRITE(hw, SXE_RTTDT1C, 0);
+ }
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ max_credits = max[i];
+ reg = max_credits << SXE_RTTDT2C_MCL_SHIFT;
+ reg |= refill[i];
+ reg |= (u32)(bwg_id[i]) << SXE_RTTDT2C_BWG_SHIFT;
+
+ if (prio_type[i] == PRIO_GROUP)
+ reg |= SXE_RTTDT2C_GSP;
+
+ if (prio_type[i] == PRIO_LINK)
+ reg |= SXE_RTTDT2C_LSP;
+
+ SXE_REG_WRITE(hw, SXE_RTTDT2C(i), reg);
+ }
+
+ reg = SXE_RTTDCS_TDPAC | SXE_RTTDCS_TDRM;
+ SXE_REG_WRITE(hw, SXE_RTTDCS, reg);
+
+}
+
+void sxe_hw_dcb_tx_data_bw_alloc_configure(struct sxe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type,
+ u8 *prio_tc,
+ u8 max_priority)
+{
+ u32 reg;
+ u8 i;
+
+ reg = SXE_RTTPCS_TPPAC | SXE_RTTPCS_TPRM |
+ (SXE_RTTPCS_ARBD_DCB << SXE_RTTPCS_ARBD_SHIFT) |
+ SXE_RTTPCS_ARBDIS;
+ SXE_REG_WRITE(hw, SXE_RTTPCS, reg);
+
+ reg = 0;
+ for (i = 0; i < max_priority; i++)
+ reg |= (prio_tc[i] << (i * SXE_RTTUP2TC_UP_SHIFT));
+
+ SXE_REG_WRITE(hw, SXE_RTTUP2TC, reg);
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ reg = refill[i];
+ reg |= (u32)(max[i]) << SXE_RTTPT2C_MCL_SHIFT;
+ reg |= (u32)(bwg_id[i]) << SXE_RTTPT2C_BWG_SHIFT;
+
+ if (prio_type[i] == PRIO_GROUP)
+ reg |= SXE_RTTPT2C_GSP;
+
+ if (prio_type[i] == PRIO_LINK)
+ reg |= SXE_RTTPT2C_LSP;
+
+ SXE_REG_WRITE(hw, SXE_RTTPT2C(i), reg);
+ }
+
+ reg = SXE_RTTPCS_TPPAC | SXE_RTTPCS_TPRM |
+ (SXE_RTTPCS_ARBD_DCB << SXE_RTTPCS_ARBD_SHIFT);
+ SXE_REG_WRITE(hw, SXE_RTTPCS, reg);
+
+}
+
+void sxe_hw_dcb_pfc_configure(struct sxe_hw *hw,
+ u8 pfc_en, u8 *prio_tc,
+ u8 max_priority)
+{
+ u32 i, j, fcrtl, reg;
+ u8 max_tc = 0;
+ u32 reg_val;
+
+ reg_val = SXE_REG_READ(hw, SXE_FLCTRL);
+
+ reg_val &= ~SXE_FCTRL_TFCE_MASK;
+ reg_val |= SXE_FCTRL_TFCE_PFC_EN;
+
+ reg_val |= SXE_FCTRL_TFCE_DPF_EN;
+
+ reg_val &= ~(SXE_FCTRL_TFCE_FCEN_MASK | SXE_FCTRL_TFCE_XONE_MASK);
+ reg_val |= (pfc_en << 16) & SXE_FCTRL_TFCE_FCEN_MASK;
+ reg_val |= (pfc_en << 24) & SXE_FCTRL_TFCE_XONE_MASK;
+
+ reg_val &= ~SXE_FCTRL_RFCE_MASK;
+ reg_val |= SXE_FCTRL_RFCE_PFC_EN;
+ SXE_REG_WRITE(hw, SXE_FLCTRL, reg_val);
+
+ reg_val = SXE_REG_READ(hw, SXE_PFCTOP);
+ reg_val &= ~SXE_PFCTOP_FCOP_MASK;
+ reg_val |= SXE_PFCTOP_FCT;
+ reg_val |= SXE_PFCTOP_FCOP_PFC;
+ SXE_REG_WRITE(hw, SXE_PFCTOP, reg_val);
+
+ for (i = 0; i < max_priority; i++) {
+ if (prio_tc[i] > max_tc)
+ max_tc = prio_tc[i];
+ }
+
+ for (i = 0; i <= max_tc; i++) {
+ int enabled = 0;
+
+ for (j = 0; j < max_priority; j++) {
+ if ((prio_tc[j] == i) && (pfc_en & BIT(j))) {
+ enabled = 1;
+ break;
+ }
+ }
+
+ if (enabled) {
+ reg = (hw->fc.high_water[i] << 9) | SXE_FCRTH_FCEN;
+ fcrtl = (hw->fc.low_water[i] << 9) | SXE_FCRTL_XONE;
+ SXE_REG_WRITE(hw, SXE_FCRTL(i), fcrtl);
+ } else {
+
+ reg = (SXE_REG_READ(hw, SXE_RXPBSIZE(i)) - 24576) >> 1;
+ SXE_REG_WRITE(hw, SXE_FCRTL(i), 0);
+ }
+
+ SXE_REG_WRITE(hw, SXE_FCRTH(i), reg);
+ }
+
+ for (; i < MAX_TRAFFIC_CLASS; i++) {
+ SXE_REG_WRITE(hw, SXE_FCRTL(i), 0);
+ SXE_REG_WRITE(hw, SXE_FCRTH(i), 0);
+ }
+
+ reg = hw->fc.pause_time * 0x00010001;
+ for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
+ SXE_REG_WRITE(hw, SXE_FCTTV(i), reg);
+
+ SXE_REG_WRITE(hw, SXE_FCRTV, hw->fc.pause_time / 2);
+
+}
+
+static void sxe_hw_dcb_8tc_vmdq_off_stats_configure(struct sxe_hw *hw)
+{
+ u32 reg;
+ u8 i;
+
+ for (i = 0; i < 32; i++) {
+ reg = 0x01010101 * (i / 4);
+ SXE_REG_WRITE(hw, SXE_RQSMR(i), reg);
+ }
+
+ for (i = 0; i < 32; i++) {
+ if (i < 8)
+ reg = 0x00000000;
+ else if (i < 16)
+ reg = 0x01010101;
+ else if (i < 20)
+ reg = 0x02020202;
+ else if (i < 24)
+ reg = 0x03030303;
+ else if (i < 26)
+ reg = 0x04040404;
+ else if (i < 28)
+ reg = 0x05050505;
+ else if (i < 30)
+ reg = 0x06060606;
+ else
+ reg = 0x07070707;
+
+ SXE_REG_WRITE(hw, SXE_TQSM(i), reg);
+ }
+
+}
+
+static void sxe_hw_dcb_rx_up_tc_map_set(struct sxe_hw *hw, u8 tc)
+{
+ u8 i;
+ u32 reg, rsave;
+
+ reg = SXE_REG_READ(hw, SXE_RTRUP2TC);
+ rsave = reg;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ u8 up2tc = reg >> (i * SXE_RTRUP2TC_UP_SHIFT);
+
+ if (up2tc > tc)
+ reg &= ~(0x7 << SXE_RTRUP2TC_UP_MASK);
+ }
+
+ if (reg != rsave)
+ SXE_REG_WRITE(hw, SXE_RTRUP2TC, reg);
+
+}
+
+void sxe_hw_vt_pool_loopback_switch(struct sxe_hw *hw,
+ bool is_enable)
+{
+ if (true == is_enable)
+ SXE_REG_WRITE(hw, SXE_PFDTXGSWC, SXE_PFDTXGSWC_VT_LBEN);
+ else
+ SXE_REG_WRITE(hw, SXE_PFDTXGSWC, 0);
+
+}
+
+void sxe_hw_pool_rx_ring_drop_enable(struct sxe_hw *hw, u8 vf_idx,
+ u16 pf_vlan, u8 ring_per_pool)
+{
+ u32 qde = SXE_QDE_ENABLE;
+ u8 i;
+
+ if (pf_vlan)
+ qde |= SXE_QDE_HIDE_VLAN;
+
+ for (i = (vf_idx * ring_per_pool); i < ((vf_idx + 1) * ring_per_pool); i++) {
+ u32 value;
+
+ SXE_WRITE_FLUSH(hw);
+
+ value = i << SXE_QDE_IDX_SHIFT;
+ value |= qde | SXE_QDE_WRITE;
+
+ SXE_REG_WRITE(hw, SXE_QDE, value);
+ }
+
+}
+
+u32 sxe_hw_rx_pool_bitmap_get(struct sxe_hw *hw, u8 reg_idx)
+{
+ return SXE_REG_READ(hw, SXE_VFRE(reg_idx));
+}
+
+void sxe_hw_rx_pool_bitmap_set(struct sxe_hw *hw,
+ u8 reg_idx, u32 bitmap)
+{
+ SXE_REG_WRITE(hw, SXE_VFRE(reg_idx), bitmap);
+
+}
+
+u32 sxe_hw_tx_pool_bitmap_get(struct sxe_hw *hw, u8 reg_idx)
+{
+ return SXE_REG_READ(hw, SXE_VFTE(reg_idx));
+}
+
+void sxe_hw_tx_pool_bitmap_set(struct sxe_hw *hw,
+ u8 reg_idx, u32 bitmap)
+{
+ SXE_REG_WRITE(hw, SXE_VFTE(reg_idx), bitmap);
+
+}
+
+void sxe_hw_dcb_max_mem_window_set(struct sxe_hw *hw, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_RTTBCNRM, value);
+
+}
+
+void sxe_hw_dcb_tx_ring_rate_factor_set(struct sxe_hw *hw,
+ u32 ring_idx, u32 rate)
+{
+ SXE_REG_WRITE(hw, SXE_RTTDQSEL, ring_idx);
+ SXE_REG_WRITE(hw, SXE_RTTBCNRC, rate);
+
+}
+
+void sxe_hw_spoof_count_enable(struct sxe_hw *hw,
+ u8 reg_idx, u8 bit_index)
+{
+ u32 value = SXE_REG_READ(hw, SXE_VMECM(reg_idx));
+
+ value |= BIT(bit_index);
+
+ SXE_REG_WRITE(hw, SXE_VMECM(reg_idx), value);
+
+}
+
+void sxe_hw_pool_mac_anti_spoof_set(struct sxe_hw *hw,
+ u8 vf_idx, bool status)
+{
+ u8 reg_index = vf_idx >> 3;
+ u8 bit_index = vf_idx % 8;
+ u32 value;
+
+ value = SXE_REG_READ(hw, SXE_SPOOF(reg_index));
+
+ if (status)
+ value |= BIT(bit_index);
+ else
+ value &= ~BIT(bit_index);
+
+ SXE_REG_WRITE(hw, SXE_SPOOF(reg_index), value);
+
+}
+
+static void sxe_hw_dcb_rx_up_tc_map_get(struct sxe_hw *hw, u8 *map)
+{
+ u32 reg, i;
+
+ reg = SXE_REG_READ(hw, SXE_RTRUP2TC);
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ map[i] = SXE_RTRUP2TC_UP_MASK &
+ (reg >> (i * SXE_RTRUP2TC_UP_SHIFT));
+ }
+
+}
+
+void sxe_hw_rx_drop_switch(struct sxe_hw *hw, u8 idx, bool is_enable)
+{
+ u32 srrctl = SXE_REG_READ(hw, SXE_SRRCTL(idx));
+
+ if (true == is_enable)
+ srrctl |= SXE_SRRCTL_DROP_EN;
+ else
+ srrctl &= ~SXE_SRRCTL_DROP_EN;
+
+ SXE_REG_WRITE(hw, SXE_SRRCTL(idx), srrctl);
+
+}
+
+static void sxe_hw_pool_vlan_anti_spoof_set(struct sxe_hw *hw,
+ u8 vf_idx, bool status)
+{
+ u8 reg_index = vf_idx >> 3;
+ u8 bit_index = (vf_idx % 8) + SXE_SPOOF_VLAN_SHIFT;
+ u32 value;
+
+ value = SXE_REG_READ(hw, SXE_SPOOF(reg_index));
+
+ if (status)
+ value |= BIT(bit_index);
+ else
+ value &= ~BIT(bit_index);
+
+ SXE_REG_WRITE(hw, SXE_SPOOF(reg_index), value);
+
+}
+
+static void sxe_hw_vf_tx_desc_addr_clear(struct sxe_hw *hw,
+ u8 vf_idx, u8 ring_per_pool)
+{
+ u8 i;
+
+ for (i = 0; i < ring_per_pool; i++) {
+ SXE_REG_WRITE(hw, SXE_PVFTDWBAL_N(ring_per_pool, vf_idx, i), 0);
+ SXE_REG_WRITE(hw, SXE_PVFTDWBAH_N(ring_per_pool, vf_idx, i), 0);
+ }
+
+}
+
+static void sxe_hw_vf_tx_ring_disable(struct sxe_hw *hw,
+ u8 ring_per_pool, u8 vf_idx)
+{
+ u32 ring_idx;
+ u32 reg;
+
+ for (ring_idx = 0; ring_idx < ring_per_pool; ring_idx++) {
+ u32 reg_idx = vf_idx * ring_per_pool + ring_idx;
+ reg = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx));
+ if (reg) {
+ reg |= SXE_TXDCTL_ENABLE;
+ SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), reg);
+ reg &= ~SXE_TXDCTL_ENABLE;
+ SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), reg);
+ }
+ }
+
+ SXE_WRITE_FLUSH(hw);
+
+}
+
+void sxe_hw_dcb_rate_limiter_clear(struct sxe_hw *hw, u8 ring_max)
+{
+ u32 i;
+
+ for (i = 0; i < ring_max; i++) {
+ SXE_REG_WRITE(hw, SXE_RTTDQSEL, i);
+ SXE_REG_WRITE(hw, SXE_RTTBCNRC, 0);
+ }
+ SXE_WRITE_FLUSH(hw);
+
+}
+
+static void sxe_hw_tx_tph_update(struct sxe_hw *hw, u8 ring_idx, u8 cpu)
+{
+ u32 value = cpu;
+
+ value <<= SXE_TPH_TXCTRL_CPUID_SHIFT;
+
+ value |= (SXE_TPH_TXCTRL_DESC_RRO_EN |
+ SXE_TPH_TXCTRL_DATA_RRO_EN |
+ SXE_TPH_TXCTRL_DESC_TPH_EN);
+
+ SXE_REG_WRITE(hw, SXE_TPH_TXCTRL(ring_idx), value);
+}
+
+static void sxe_hw_rx_tph_update(struct sxe_hw *hw, u8 ring_idx, u8 cpu)
+{
+ u32 value = cpu;
+
+ value <<= SXE_TPH_RXCTRL_CPUID_SHIFT;
+
+ value |= (SXE_TPH_RXCTRL_DESC_RRO_EN |
+ SXE_TPH_RXCTRL_DATA_TPH_EN |
+ SXE_TPH_RXCTRL_DESC_TPH_EN);
+
+ SXE_REG_WRITE(hw, SXE_TPH_RXCTRL(ring_idx), value);
+}
+
+static void sxe_hw_tph_switch(struct sxe_hw *hw, bool is_enable)
+{
+ if (is_enable == true)
+ SXE_REG_WRITE(hw, SXE_TPH_CTRL, SXE_TPH_CTRL_MODE_CB2);
+ else
+ SXE_REG_WRITE(hw, SXE_TPH_CTRL, SXE_TPH_CTRL_DISABLE);
+
+}
+
+static const struct sxe_dma_operations sxe_dma_ops = {
+ .rx_dma_ctrl_init = sxe_hw_rx_dma_ctrl_init,
+ .rx_ring_switch = sxe_hw_rx_ring_switch,
+ .rx_ring_switch_not_polling = sxe_hw_rx_ring_switch_not_polling,
+ .rx_ring_desc_configure = sxe_hw_rx_ring_desc_configure,
+ .rx_desc_thresh_set = sxe_hw_rx_desc_thresh_set,
+ .rx_rcv_ctl_configure = sxe_hw_rx_rcv_ctl_configure,
+ .rx_lro_ctl_configure = sxe_hw_rx_lro_ctl_configure,
+ .rx_desc_ctrl_get = sxe_hw_rx_desc_ctrl_get,
+ .rx_dma_lro_ctl_set = sxe_hw_rx_dma_lro_ctrl_set,
+ .rx_drop_switch = sxe_hw_rx_drop_switch,
+ .pool_rx_ring_drop_enable = sxe_hw_pool_rx_ring_drop_enable,
+ .rx_tph_update = sxe_hw_rx_tph_update,
+
+ .tx_enable = sxe_hw_tx_enable,
+ .tx_multi_ring_configure = sxe_hw_tx_multi_ring_configure,
+ .tx_ring_desc_configure = sxe_hw_tx_ring_desc_configure,
+ .tx_desc_thresh_set = sxe_hw_tx_desc_thresh_set,
+ .tx_desc_wb_thresh_clear = sxe_hw_tx_desc_wb_thresh_clear,
+ .tx_ring_switch = sxe_hw_tx_ring_switch,
+ .tx_ring_switch_not_polling = sxe_hw_tx_ring_switch_not_polling,
+ .tx_pkt_buf_thresh_configure = sxe_hw_tx_pkt_buf_thresh_configure,
+ .tx_desc_ctrl_get = sxe_hw_tx_desc_ctrl_get,
+ .tx_ring_info_get = sxe_hw_tx_ring_info_get,
+ .tx_tph_update = sxe_hw_tx_tph_update,
+
+ .tph_switch = sxe_hw_tph_switch,
+
+ .vlan_tag_strip_switch = sxe_hw_vlan_tag_strip_switch,
+ .tx_vlan_tag_set = sxe_hw_tx_vlan_tag_set,
+ .tx_vlan_tag_clear = sxe_hw_tx_vlan_tag_clear,
+
+ .dcb_rx_bw_alloc_configure = sxe_hw_dcb_rx_bw_alloc_configure,
+ .dcb_tx_desc_bw_alloc_configure = sxe_hw_dcb_tx_desc_bw_alloc_configure,
+ .dcb_tx_data_bw_alloc_configure = sxe_hw_dcb_tx_data_bw_alloc_configure,
+ .dcb_pfc_configure = sxe_hw_dcb_pfc_configure,
+ .dcb_tc_stats_configure = sxe_hw_dcb_8tc_vmdq_off_stats_configure,
+ .dcb_rx_up_tc_map_set = sxe_hw_dcb_rx_up_tc_map_set,
+ .dcb_rx_up_tc_map_get = sxe_hw_dcb_rx_up_tc_map_get,
+ .dcb_rate_limiter_clear = sxe_hw_dcb_rate_limiter_clear,
+ .dcb_tx_ring_rate_factor_set = sxe_hw_dcb_tx_ring_rate_factor_set,
+
+ .vt_pool_loopback_switch = sxe_hw_vt_pool_loopback_switch,
+ .rx_pool_get = sxe_hw_rx_pool_bitmap_get,
+ .rx_pool_set = sxe_hw_rx_pool_bitmap_set,
+ .tx_pool_get = sxe_hw_tx_pool_bitmap_get,
+ .tx_pool_set = sxe_hw_tx_pool_bitmap_set,
+
+ .vf_tx_desc_addr_clear = sxe_hw_vf_tx_desc_addr_clear,
+ .pool_mac_anti_spoof_set = sxe_hw_pool_mac_anti_spoof_set,
+ .pool_vlan_anti_spoof_set = sxe_hw_pool_vlan_anti_spoof_set,
+
+ .max_dcb_memory_window_set = sxe_hw_dcb_max_mem_window_set,
+ .spoof_count_enable = sxe_hw_spoof_count_enable,
+
+ .vf_tx_ring_disable = sxe_hw_vf_tx_ring_disable,
+ .all_ring_disable = sxe_hw_all_ring_disable,
+ .tx_ring_tail_init = sxe_hw_tx_ring_tail_init,
+};
+
+
+#ifdef SXE_IPSEC_CONFIGURE
+
+static void sxe_hw_ipsec_rx_sa_load(struct sxe_hw *hw, u16 idx,
+ u8 type)
+{
+ u32 reg = SXE_REG_READ(hw, SXE_IPSRXIDX);
+
+ reg &= SXE_RXTXIDX_IPS_EN;
+ reg |= type << SXE_RXIDX_TBL_SHIFT |
+ idx << SXE_RXTXIDX_IDX_SHIFT |
+ SXE_RXTXIDX_WRITE;
+ SXE_REG_WRITE(hw, SXE_IPSRXIDX, reg);
+ SXE_WRITE_FLUSH(hw);
+
+}
+
+static void sxe_hw_ipsec_rx_ip_store(struct sxe_hw *hw,
+ __be32 *ip_addr, u8 ip_len, u8 ip_idx)
+{
+ u8 i;
+
+ for (i = 0; i < ip_len; i++) {
+ SXE_REG_WRITE(hw, SXE_IPSRXIPADDR(i),
+ (__force u32)cpu_to_le32((__force u32)ip_addr[i]));
+ }
+ SXE_WRITE_FLUSH(hw);
+ sxe_hw_ipsec_rx_sa_load(hw, ip_idx, SXE_IPSEC_IP_TABLE);
+
+}
+
+static void sxe_hw_ipsec_rx_spi_store(struct sxe_hw *hw,
+ __be32 spi, u8 ip_idx, u16 sa_idx)
+{
+ SXE_REG_WRITE(hw, SXE_IPSRXSPI, (__force u32)cpu_to_le32((__force u32)spi));
+
+ SXE_REG_WRITE(hw, SXE_IPSRXIPIDX, ip_idx);
+
+ SXE_WRITE_FLUSH(hw);
+
+ sxe_hw_ipsec_rx_sa_load(hw, sa_idx, SXE_IPSEC_SPI_TABLE);
+
+}
+
+static void sxe_hw_ipsec_rx_key_store(struct sxe_hw *hw,
+ u32 *key, u8 key_len, u32 salt, u32 mode, u16 sa_idx)
+{
+ u8 i;
+
+ for (i = 0; i < key_len; i++) {
+ SXE_REG_WRITE(hw, SXE_IPSRXKEY(i),
+ (__force u32)cpu_to_be32(key[(key_len - 1) - i]));
+ }
+
+ SXE_REG_WRITE(hw, SXE_IPSRXSALT, (__force u32)cpu_to_be32(salt));
+ SXE_REG_WRITE(hw, SXE_IPSRXMOD, mode);
+ SXE_WRITE_FLUSH(hw);
+
+ sxe_hw_ipsec_rx_sa_load(hw, sa_idx, SXE_IPSEC_KEY_TABLE);
+
+}
+
+static void sxe_hw_ipsec_tx_sa_load(struct sxe_hw *hw, u16 idx)
+{
+ u32 reg = SXE_REG_READ(hw, SXE_IPSTXIDX);
+
+ reg &= SXE_RXTXIDX_IPS_EN;
+ reg |= idx << SXE_RXTXIDX_IDX_SHIFT | SXE_RXTXIDX_WRITE;
+ SXE_REG_WRITE(hw, SXE_IPSTXIDX, reg);
+ SXE_WRITE_FLUSH(hw);
+
+}
+
+static void sxe_hw_ipsec_tx_key_store(struct sxe_hw *hw, u32 *key,
+ u8 key_len, u32 salt, u16 sa_idx)
+{
+ u8 i;
+
+ for (i = 0; i < key_len; i++) {
+ SXE_REG_WRITE(hw, SXE_IPSTXKEY(i),
+ (__force u32)cpu_to_be32(key[(key_len - 1) - i]));
+ }
+ SXE_REG_WRITE(hw, SXE_IPSTXSALT, (__force u32)cpu_to_be32(salt));
+ SXE_WRITE_FLUSH(hw);
+
+ sxe_hw_ipsec_tx_sa_load(hw, sa_idx);
+
+}
+
+static void sxe_hw_ipsec_sec_data_stop(struct sxe_hw *hw, bool is_linkup)
+{
+ u32 tx_empty, rx_empty;
+ u32 limit;
+ u32 reg;
+
+ reg = SXE_REG_READ(hw, SXE_SECTXCTRL);
+ reg |= SXE_SECTXCTRL_TX_DIS;
+ SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg);
+
+ reg = SXE_REG_READ(hw, SXE_SECRXCTRL);
+ reg |= SXE_SECRXCTRL_RX_DIS;
+ SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg);
+
+ tx_empty = SXE_REG_READ(hw, SXE_SECTXSTAT) & SXE_SECTXSTAT_SECTX_RDY;
+ rx_empty = SXE_REG_READ(hw, SXE_SECRXSTAT) & SXE_SECRXSTAT_SECRX_RDY;
+ if (tx_empty && rx_empty)
+ return;
+
+ if (!is_linkup) {
+ SXE_REG_WRITE(hw, SXE_LPBKCTRL, SXE_LPBKCTRL_EN);
+
+ SXE_WRITE_FLUSH(hw);
+ mdelay(3);
+ }
+
+ limit = 20;
+ do {
+ mdelay(10);
+ tx_empty = SXE_REG_READ(hw, SXE_SECTXSTAT) &
+ SXE_SECTXSTAT_SECTX_RDY;
+ rx_empty = SXE_REG_READ(hw, SXE_SECRXSTAT) &
+ SXE_SECRXSTAT_SECRX_RDY;
+ } while (!(tx_empty && rx_empty) && limit--);
+
+ if (!is_linkup) {
+ SXE_REG_WRITE(hw, SXE_LPBKCTRL, 0);
+
+ SXE_WRITE_FLUSH(hw);
+ }
+
+}
+
+static void sxe_hw_ipsec_engine_start(struct sxe_hw *hw, bool is_linkup)
+{
+ u32 reg;
+
+ sxe_hw_ipsec_sec_data_stop(hw, is_linkup);
+
+ reg = SXE_REG_READ(hw, SXE_SECTXMINIFG);
+ reg = (reg & 0xfffffff0) | 0x3;
+ SXE_REG_WRITE(hw, SXE_SECTXMINIFG, reg);
+
+ reg = SXE_REG_READ(hw, SXE_SECTXBUFFAF);
+ reg = (reg & 0xfffffc00) | 0x15;
+ SXE_REG_WRITE(hw, SXE_SECTXBUFFAF, reg);
+
+ SXE_REG_WRITE(hw, SXE_SECRXCTRL, 0);
+ SXE_REG_WRITE(hw, SXE_SECTXCTRL, SXE_SECTXCTRL_STORE_FORWARD);
+
+ SXE_REG_WRITE(hw, SXE_IPSTXIDX, SXE_RXTXIDX_IPS_EN);
+ SXE_REG_WRITE(hw, SXE_IPSRXIDX, SXE_RXTXIDX_IPS_EN);
+
+ SXE_WRITE_FLUSH(hw);
+
+}
+
+static void sxe_hw_ipsec_engine_stop(struct sxe_hw *hw, bool is_linkup)
+{
+ u32 reg;
+
+ sxe_hw_ipsec_sec_data_stop(hw, is_linkup);
+
+ SXE_REG_WRITE(hw, SXE_IPSTXIDX, 0);
+ SXE_REG_WRITE(hw, SXE_IPSRXIDX, 0);
+
+ reg = SXE_REG_READ(hw, SXE_SECTXCTRL);
+ reg |= SXE_SECTXCTRL_SECTX_DIS;
+ reg &= ~SXE_SECTXCTRL_STORE_FORWARD;
+ SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg);
+
+ reg = SXE_REG_READ(hw, SXE_SECRXCTRL);
+ reg |= SXE_SECRXCTRL_SECRX_DIS;
+ SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg);
+
+ SXE_REG_WRITE(hw, SXE_SECTXBUFFAF, 0x250);
+
+ reg = SXE_REG_READ(hw, SXE_SECTXMINIFG);
+ reg = (reg & 0xfffffff0) | 0x1;
+ SXE_REG_WRITE(hw, SXE_SECTXMINIFG, reg);
+
+ SXE_REG_WRITE(hw, SXE_SECTXCTRL, SXE_SECTXCTRL_SECTX_DIS);
+ SXE_REG_WRITE(hw, SXE_SECRXCTRL, SXE_SECRXCTRL_SECRX_DIS);
+
+ SXE_WRITE_FLUSH(hw);
+
+}
+
+bool sxe_hw_ipsec_offload_is_disable(struct sxe_hw *hw)
+{
+ u32 tx_dis = SXE_REG_READ(hw, SXE_SECTXSTAT);
+ u32 rx_dis = SXE_REG_READ(hw, SXE_SECRXSTAT);
+ bool ret = false;
+
+ if ((tx_dis & SXE_SECTXSTAT_SECTX_OFF_DIS) ||
+ (rx_dis & SXE_SECRXSTAT_SECRX_OFF_DIS)) {
+ ret = true;
+ }
+
+ return ret;
+}
+
+void sxe_hw_ipsec_sa_disable(struct sxe_hw *hw)
+{
+ SXE_REG_WRITE(hw, SXE_IPSRXIDX, 0);
+ SXE_REG_WRITE(hw, SXE_IPSTXIDX, 0);
+
+}
+
+static const struct sxe_sec_operations sxe_sec_ops = {
+ .ipsec_rx_ip_store = sxe_hw_ipsec_rx_ip_store,
+ .ipsec_rx_spi_store = sxe_hw_ipsec_rx_spi_store,
+ .ipsec_rx_key_store = sxe_hw_ipsec_rx_key_store,
+ .ipsec_tx_key_store = sxe_hw_ipsec_tx_key_store,
+ .ipsec_sec_data_stop = sxe_hw_ipsec_sec_data_stop,
+ .ipsec_engine_start = sxe_hw_ipsec_engine_start,
+ .ipsec_engine_stop = sxe_hw_ipsec_engine_stop,
+ .ipsec_sa_disable = sxe_hw_ipsec_sa_disable,
+ .ipsec_offload_is_disable = sxe_hw_ipsec_offload_is_disable,
+};
+#endif
+
+static const struct sxe_sec_operations sxe_sec_ops = { 0 };
+
+
+void sxe_hw_stats_regs_clean(struct sxe_hw *hw)
+{
+ u16 i;
+ for (i = 0; i < 16; i++) {
+ SXE_REG_READ(hw, SXE_QPTC(i));
+ SXE_REG_READ(hw, SXE_QPRC(i));
+ SXE_REG_READ(hw, SXE_QBTC_H(i));
+ SXE_REG_READ(hw, SXE_QBTC_L(i));
+ SXE_REG_READ(hw, SXE_QBRC_H(i));
+ SXE_REG_READ(hw, SXE_QBRC_L(i));
+ SXE_REG_READ(hw, SXE_QPRDC(i));
+ }
+
+ SXE_REG_READ(hw, SXE_RXDGBCH);
+ SXE_REG_READ(hw, SXE_RXDGBCL);
+ SXE_REG_READ(hw, SXE_RXDGPC);
+ SXE_REG_READ(hw, SXE_TXDGPC);
+ SXE_REG_READ(hw, SXE_TXDGBCH);
+ SXE_REG_READ(hw, SXE_TXDGBCL);
+ SXE_REG_READ(hw, SXE_RXDDGPC);
+ SXE_REG_READ(hw, SXE_RXDDGBCH);
+ SXE_REG_READ(hw, SXE_RXDDGBCL);
+ SXE_REG_READ(hw, SXE_RXLPBKGPC);
+ SXE_REG_READ(hw, SXE_RXLPBKGBCH);
+ SXE_REG_READ(hw, SXE_RXLPBKGBCL);
+ SXE_REG_READ(hw, SXE_RXDLPBKGPC);
+ SXE_REG_READ(hw, SXE_RXDLPBKGBCH);
+ SXE_REG_READ(hw, SXE_RXDLPBKGBCL);
+ SXE_REG_READ(hw, SXE_RXTPCIN);
+ SXE_REG_READ(hw, SXE_RXTPCOUT);
+ SXE_REG_READ(hw, SXE_RXPRDDC);
+ SXE_REG_READ(hw, SXE_TXSWERR);
+ SXE_REG_READ(hw, SXE_TXSWITCH);
+ SXE_REG_READ(hw, SXE_TXREPEAT);
+ SXE_REG_READ(hw, SXE_TXDESCERR);
+
+ SXE_REG_READ(hw, SXE_CRCERRS);
+ SXE_REG_READ(hw, SXE_ERRBC);
+ SXE_REG_READ(hw, SXE_RLEC);
+ SXE_REG_READ(hw, SXE_PRC64);
+ SXE_REG_READ(hw, SXE_PRC127);
+ SXE_REG_READ(hw, SXE_PRC255);
+ SXE_REG_READ(hw, SXE_PRC511);
+ SXE_REG_READ(hw, SXE_PRC1023);
+ SXE_REG_READ(hw, SXE_PRC1522);
+ SXE_REG_READ(hw, SXE_GPRC);
+ SXE_REG_READ(hw, SXE_BPRC);
+ SXE_REG_READ(hw, SXE_MPRC);
+ SXE_REG_READ(hw, SXE_GPTC);
+ SXE_REG_READ(hw, SXE_GORCL);
+ SXE_REG_READ(hw, SXE_GORCH);
+ SXE_REG_READ(hw, SXE_GOTCL);
+ SXE_REG_READ(hw, SXE_GOTCH);
+ SXE_REG_READ(hw, SXE_RUC);
+ SXE_REG_READ(hw, SXE_RFC);
+ SXE_REG_READ(hw, SXE_ROC);
+ SXE_REG_READ(hw, SXE_RJC);
+ for (i = 0; i < 8; i++)
+ SXE_REG_READ(hw, SXE_PRCPF(i));
+
+ SXE_REG_READ(hw, SXE_TORL);
+ SXE_REG_READ(hw, SXE_TORH);
+ SXE_REG_READ(hw, SXE_TPR);
+ SXE_REG_READ(hw, SXE_TPT);
+ SXE_REG_READ(hw, SXE_PTC64);
+ SXE_REG_READ(hw, SXE_PTC127);
+ SXE_REG_READ(hw, SXE_PTC255);
+ SXE_REG_READ(hw, SXE_PTC511);
+ SXE_REG_READ(hw, SXE_PTC1023);
+ SXE_REG_READ(hw, SXE_PTC1522);
+ SXE_REG_READ(hw, SXE_MPTC);
+ SXE_REG_READ(hw, SXE_BPTC);
+ for (i = 0; i < 8; i++)
+ SXE_REG_READ(hw, SXE_PFCT(i));
+
+}
+
+static void sxe_hw_stats_seq_get(struct sxe_hw *hw, struct sxe_mac_stats *stats)
+{
+ u8 i;
+ u64 tx_pfc_num = 0;
+#ifdef SXE_DPDK
+ u64 gotch = 0;
+ u32 rycle_cnt = 10;
+#endif
+
+ for (i = 0; i < 8; i++) {
+ stats->prcpf[i] += SXE_REG_READ(hw, SXE_PRCPF(i));
+ tx_pfc_num = SXE_REG_READ(hw, SXE_PFCT(i));
+ stats->pfct[i] += tx_pfc_num;
+ stats->total_tx_pause += tx_pfc_num;
+ }
+
+ stats->total_gptc += SXE_REG_READ(hw, SXE_GPTC);
+ stats->total_gotc += (SXE_REG_READ(hw, SXE_GOTCL) |
+ ((u64)SXE_REG_READ(hw, SXE_GOTCH) << 32));
+#ifdef SXE_DPDK
+ do {
+ gotch = SXE_REG_READ(hw, SXE_GOTCH);
+ rycle_cnt--;
+ } while (gotch != 0 && rycle_cnt != 0);
+ if (gotch != 0) {
+ LOG_INFO("GOTCH is not clear!\n");
+ }
+#endif
+
+}
+
+void sxe_hw_stats_seq_clean(struct sxe_hw *hw, struct sxe_mac_stats *stats)
+{
+ u8 i;
+ u64 tx_pfc_num = 0;
+ u64 gotch = 0;
+ u32 rycle_cnt = 10;
+
+ stats->total_gotc += (SXE_REG_READ(hw, SXE_GOTCL) |
+ ((u64)SXE_REG_READ(hw, SXE_GOTCH) << 32));
+ stats->total_gptc += SXE_REG_READ(hw, SXE_GPTC);
+ do {
+ gotch = SXE_REG_READ(hw, SXE_GOTCH);
+ rycle_cnt--;
+ } while (gotch != 0 && rycle_cnt != 0);
+ if (gotch != 0) {
+ LOG_INFO("GOTCH is not clear!\n");
+ }
+ for (i = 0; i < 8; i++) {
+ stats->prcpf[i] += SXE_REG_READ(hw, SXE_PRCPF(i));
+ tx_pfc_num = SXE_REG_READ(hw, SXE_PFCT(i));
+ stats->pfct[i] += tx_pfc_num;
+ stats->total_tx_pause += tx_pfc_num;
+ }
+
+}
+
+void sxe_hw_stats_get(struct sxe_hw *hw, struct sxe_mac_stats *stats)
+{
+ u64 rjc;
+ u32 i, rx_dbu_drop, ring_drop = 0;
+ u64 tpr = 0;
+#ifdef SXE_DPDK
+ u32 rycle_cnt = 10;
+ u64 gorch, torh = 0;
+#endif
+
+ for (i = 0; i < 16; i++) {
+ stats->qptc[i] += SXE_REG_READ(hw, SXE_QPTC(i));
+ stats->qprc[i] += SXE_REG_READ(hw, SXE_QPRC(i));
+ ring_drop = SXE_REG_READ(hw, SXE_QPRDC(i));
+ stats->qprdc[i] += ring_drop;
+ stats->hw_rx_no_dma_resources += ring_drop;
+
+ stats->qbtc[i] += ((u64)SXE_REG_READ(hw, SXE_QBTC_H(i)) << 32);
+ SXE_RMB();
+ stats->qbtc[i] += SXE_REG_READ(hw, SXE_QBTC_L(i));
+
+ stats->qbrc[i] += ((u64)SXE_REG_READ(hw, SXE_QBRC_H(i)) << 32);
+ SXE_RMB();
+ stats->qbrc[i] += SXE_REG_READ(hw, SXE_QBRC_L(i));
+ }
+ stats->rxdgbc += ((u64)SXE_REG_READ(hw, SXE_RXDGBCH) << 32) +
+ (SXE_REG_READ(hw, SXE_RXDGBCL));
+
+ stats->rxdgpc += SXE_REG_READ(hw, SXE_RXDGPC);
+ stats->txdgpc += SXE_REG_READ(hw, SXE_TXDGPC);
+ stats->txdgbc += (((u64)SXE_REG_READ(hw, SXE_TXDGBCH) << 32) +
+ SXE_REG_READ(hw, SXE_TXDGBCL));
+
+ stats->rxddpc += SXE_REG_READ(hw, SXE_RXDDGPC);
+ stats->rxddbc += ((u64)SXE_REG_READ(hw, SXE_RXDDGBCH) << 32) +
+ (SXE_REG_READ(hw, SXE_RXDDGBCL));
+
+ stats->rxlpbkpc += SXE_REG_READ(hw, SXE_RXLPBKGPC);
+ stats->rxlpbkbc += ((u64)SXE_REG_READ(hw, SXE_RXLPBKGBCH) << 32) +
+ (SXE_REG_READ(hw, SXE_RXLPBKGBCL));
+
+ stats->rxdlpbkpc += SXE_REG_READ(hw, SXE_RXDLPBKGPC);
+ stats->rxdlpbkbc += ((u64)SXE_REG_READ(hw, SXE_RXDLPBKGBCH) << 32) +
+ (SXE_REG_READ(hw, SXE_RXDLPBKGBCL));
+ stats->rxtpcing += SXE_REG_READ(hw, SXE_RXTPCIN);
+ stats->rxtpceng += SXE_REG_READ(hw, SXE_RXTPCOUT);
+ stats->prddc += SXE_REG_READ(hw, SXE_RXPRDDC);
+ stats->txswerr += SXE_REG_READ(hw, SXE_TXSWERR);
+ stats->txswitch += SXE_REG_READ(hw, SXE_TXSWITCH);
+ stats->txrepeat += SXE_REG_READ(hw, SXE_TXREPEAT);
+ stats->txdescerr += SXE_REG_READ(hw, SXE_TXDESCERR);
+
+ for (i = 0; i < 8; i++) {
+ stats->dburxtcin[i] += SXE_REG_READ(hw, SXE_DBUDRTCICNT(i));
+ stats->dburxtcout[i] += SXE_REG_READ(hw, SXE_DBUDRTCOCNT(i));
+ stats->dburxgdreecnt[i] += SXE_REG_READ(hw, SXE_DBUDREECNT(i));
+ rx_dbu_drop = SXE_REG_READ(hw, SXE_DBUDROFPCNT(i));
+ stats->dburxdrofpcnt[i] += rx_dbu_drop;
+ stats->dbutxtcin[i] += SXE_REG_READ(hw, SXE_DBUDTTCICNT(i));
+ stats->dbutxtcout[i] += SXE_REG_READ(hw, SXE_DBUDTTCOCNT(i));
+ }
+
+ stats->fnavadd += (SXE_REG_READ(hw, SXE_FNAVUSTAT) & 0xFFFF);
+ stats->fnavrmv += ((SXE_REG_READ(hw, SXE_FNAVUSTAT) >> 16) & 0xFFFF);
+ stats->fnavadderr += (SXE_REG_READ(hw, SXE_FNAVFSTAT) & 0xFFFF);
+ stats->fnavrmverr += ((SXE_REG_READ(hw, SXE_FNAVFSTAT) >> 16) & 0xFFFF);
+ stats->fnavmatch += SXE_REG_READ(hw, SXE_FNAVMATCH);
+ stats->fnavmiss += SXE_REG_READ(hw, SXE_FNAVMISS);
+
+ sxe_hw_stats_seq_get(hw, stats);
+
+ stats->crcerrs += SXE_REG_READ(hw, SXE_CRCERRS);
+ stats->errbc += SXE_REG_READ(hw, SXE_ERRBC);
+ stats->bprc += SXE_REG_READ(hw, SXE_BPRC);
+ stats->mprc += SXE_REG_READ(hw, SXE_MPRC);
+ stats->roc += SXE_REG_READ(hw, SXE_ROC);
+ stats->prc64 += SXE_REG_READ(hw, SXE_PRC64);
+ stats->prc127 += SXE_REG_READ(hw, SXE_PRC127);
+ stats->prc255 += SXE_REG_READ(hw, SXE_PRC255);
+ stats->prc511 += SXE_REG_READ(hw, SXE_PRC511);
+ stats->prc1023 += SXE_REG_READ(hw, SXE_PRC1023);
+ stats->prc1522 += SXE_REG_READ(hw, SXE_PRC1522);
+ stats->rlec += SXE_REG_READ(hw, SXE_RLEC);
+ stats->mptc += SXE_REG_READ(hw, SXE_MPTC);
+ stats->ruc += SXE_REG_READ(hw, SXE_RUC);
+ stats->rfc += SXE_REG_READ(hw, SXE_RFC);
+
+ rjc = SXE_REG_READ(hw, SXE_RJC);
+ stats->rjc += rjc;
+ stats->roc += rjc;
+
+ tpr = SXE_REG_READ(hw, SXE_TPR);
+ stats->tpr += tpr;
+ stats->tpt += SXE_REG_READ(hw, SXE_TPT);
+ stats->ptc64 += SXE_REG_READ(hw, SXE_PTC64);
+ stats->ptc127 += SXE_REG_READ(hw, SXE_PTC127);
+ stats->ptc255 += SXE_REG_READ(hw, SXE_PTC255);
+ stats->ptc511 += SXE_REG_READ(hw, SXE_PTC511);
+ stats->ptc1023 += SXE_REG_READ(hw, SXE_PTC1023);
+ stats->ptc1522 += SXE_REG_READ(hw, SXE_PTC1522);
+ stats->bptc += SXE_REG_READ(hw, SXE_BPTC);
+
+ stats->gprc += SXE_REG_READ(hw, SXE_GPRC);
+ stats->gorc += (SXE_REG_READ(hw, SXE_GORCL) |
+ ((u64)SXE_REG_READ(hw, SXE_GORCH) << 32));
+#ifdef SXE_DPDK
+ do {
+ gorch = SXE_REG_READ(hw, SXE_GORCH);
+ rycle_cnt--;
+ } while (gorch != 0 && rycle_cnt != 0);
+ if (gorch != 0) {
+ LOG_INFO("GORCH is not clear!\n");
+ }
+#endif
+
+ stats->tor += (SXE_REG_READ(hw, SXE_TORL) |
+ ((u64)SXE_REG_READ(hw, SXE_TORH) << 32));
+#ifdef SXE_DPDK
+ rycle_cnt = 10;
+ do {
+ torh = SXE_REG_READ(hw, SXE_TORH);
+ rycle_cnt--;
+ } while (torh != 0 && rycle_cnt != 0);
+ if (torh != 0) {
+ LOG_INFO("TORH is not clear!\n");
+ }
+#endif
+
+#ifdef SXE_DPDK
+ stats->tor -= tpr * RTE_ETHER_CRC_LEN;
+ stats->gptc = stats->total_gptc - stats->total_tx_pause;
+ stats->gotc = stats->total_gotc - stats->total_tx_pause * RTE_ETHER_MIN_LEN
+ - stats->gptc * RTE_ETHER_CRC_LEN;
+#else
+ stats->gptc = stats->total_gptc;
+ stats->gotc = stats->total_gotc;
+#endif
+
+}
+
+static u32 sxe_hw_tx_packets_num_get(struct sxe_hw *hw)
+{
+ return SXE_REG_READ(hw, SXE_TXDGPC);
+}
+
+static u32 sxe_hw_unsec_packets_num_get(struct sxe_hw *hw)
+{
+ return SXE_REG_READ(hw, SXE_SSVPC);
+}
+
+static u32 sxe_hw_mac_stats_dump(struct sxe_hw *hw, u32 *regs_buff, u32 buf_size)
+{
+ u32 i;
+ u32 regs_num = buf_size / sizeof(u32);
+
+ for (i = 0; i < regs_num; i++)
+ regs_buff[i] = SXE_REG_READ(hw, mac_regs[i]);
+
+ return i;
+}
+
+static u32 sxe_hw_tx_dbu_to_mac_stats(struct sxe_hw *hw)
+{
+ return SXE_REG_READ(hw, SXE_DTMPCNT);
+}
+
+static const struct sxe_stat_operations sxe_stat_ops = {
+ .stats_get = sxe_hw_stats_get,
+ .stats_clear = sxe_hw_stats_regs_clean,
+ .mac_stats_dump = sxe_hw_mac_stats_dump,
+ .tx_packets_num_get = sxe_hw_tx_packets_num_get,
+ .unsecurity_packets_num_get = sxe_hw_unsec_packets_num_get,
+ .tx_dbu_to_mac_stats = sxe_hw_tx_dbu_to_mac_stats,
+};
+
+void sxe_hw_mbx_init(struct sxe_hw *hw)
+{
+ hw->mbx.msg_len = SXE_MBX_MSG_NUM;
+ hw->mbx.interval = SXE_MBX_RETRY_INTERVAL;
+ hw->mbx.retry = SXE_MBX_RETRY_COUNT;
+
+ hw->mbx.stats.rcv_msgs = 0;
+ hw->mbx.stats.send_msgs = 0;
+ hw->mbx.stats.acks = 0;
+ hw->mbx.stats.reqs = 0;
+ hw->mbx.stats.rsts = 0;
+
+}
+
+static bool sxe_hw_vf_irq_check(struct sxe_hw *hw, u32 mask, u32 index)
+{
+ u32 value = SXE_REG_READ(hw, SXE_PFMBICR(index));
+
+ if (value & mask) {
+ SXE_REG_WRITE(hw, SXE_PFMBICR(index), mask);
+ return true;
+ }
+
+ return false;
+}
+
+bool sxe_hw_vf_rst_check(struct sxe_hw *hw, u8 vf_idx)
+{
+ u32 index = vf_idx >> 5;
+ u32 bit = vf_idx % 32;
+ u32 value;
+
+ value = SXE_REG_READ(hw, SXE_VFLRE(index));
+ if (value & BIT(bit)) {
+ SXE_REG_WRITE(hw, SXE_VFLREC(index), BIT(bit));
+ hw->mbx.stats.rsts++;
+ return true;
+ }
+
+ return false;
+}
+
+bool sxe_hw_vf_req_check(struct sxe_hw *hw, u8 vf_idx)
+{
+ u8 index = vf_idx >> 4;
+ u8 bit = vf_idx % 16;
+
+ if (sxe_hw_vf_irq_check(hw, SXE_PFMBICR_VFREQ << bit, index)) {
+ hw->mbx.stats.reqs++;
+ return true;
+ }
+
+ return false;
+}
+
+bool sxe_hw_vf_ack_check(struct sxe_hw *hw, u8 vf_idx)
+{
+ u8 index = vf_idx >> 4;
+ u8 bit = vf_idx % 16;
+
+ if (sxe_hw_vf_irq_check(hw, SXE_PFMBICR_VFACK << bit, index)) {
+ hw->mbx.stats.acks++;
+ return true;
+ }
+
+ return false;
+}
+
+static bool sxe_hw_mbx_lock(struct sxe_hw *hw, u8 vf_idx)
+{
+ u32 value;
+ bool ret = false;
+ u32 retry = hw->mbx.retry;
+
+ while (retry--) {
+ SXE_REG_WRITE(hw, SXE_PFMAILBOX(vf_idx), SXE_PFMAILBOX_PFU);
+
+ value = SXE_REG_READ(hw, SXE_PFMAILBOX(vf_idx));
+ if (value & SXE_PFMAILBOX_PFU) {
+ ret = true;
+ break;
+ }
+
+ udelay(hw->mbx.interval);
+ }
+
+ return ret;
+}
+
+s32 sxe_hw_rcv_msg_from_vf(struct sxe_hw *hw, u32 *msg,
+ u16 msg_len, u16 index)
+{
+ struct sxe_mbx_info *mbx = &hw->mbx;
+ u8 i;
+ s32 ret = 0;
+ u16 msg_entry;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ msg_entry = (msg_len > mbx->msg_len) ? mbx->msg_len : msg_len;
+
+ if (!sxe_hw_mbx_lock(hw, index)) {
+ ret = -SXE_ERR_MBX_LOCK_FAIL;
+ LOG_ERROR_BDF("vf idx:%d msg_len:%d rcv lock mailbox fail.(err:%d)\n",
+ index, msg_len, ret);
+ goto l_out;
+ }
+
+ for (i = 0; i < msg_entry; i++) {
+ msg[i] = SXE_REG_READ(hw, (SXE_PFMBMEM(index) + (i << 2)));
+ LOG_DEBUG_BDF("vf_idx:%u read mbx mem[%u]:0x%x.\n",
+ index, i, msg[i]);
+ }
+
+ SXE_REG_WRITE(hw, SXE_PFMAILBOX(index), SXE_PFMAILBOX_ACK);
+ mbx->stats.rcv_msgs++;
+
+l_out:
+ return ret;
+}
+
+s32 sxe_hw_send_msg_to_vf(struct sxe_hw *hw, u32 *msg,
+ u16 msg_len, u16 index)
+{
+ struct sxe_mbx_info *mbx = &hw->mbx;
+ u8 i;
+ s32 ret = 0;
+ u32 old;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ if (msg_len > mbx->msg_len) {
+ ret = -EINVAL;
+ LOG_ERROR_BDF("pf reply msg num:%d exceed limit:%d reply fail.(err:%d)\n",
+ msg_len, mbx->msg_len, ret);
+ goto l_out;
+ }
+
+ if (!sxe_hw_mbx_lock(hw, index)) {
+ ret = -SXE_ERR_MBX_LOCK_FAIL;
+ LOG_ERROR_BDF("send msg len:%u to vf idx:%u msg[0]:0x%x "
+ "lock mailbox fail.(err:%d)\n",
+ msg_len, index, msg[0], ret);
+ goto l_out;
+ }
+
+ old = SXE_REG_READ(hw, (SXE_PFMBMEM(index)));
+ LOG_DEBUG_BDF("original send msg:0x%x. mbx mem[0]:0x%x\n", *msg, old);
+ if (msg[0] & SXE_CTRL_MSG_MASK)
+ msg[0] |= (old & SXE_MSGID_MASK);
+ else
+ msg[0] |= (old & SXE_PFMSG_MASK);
+
+ for (i = 0; i < msg_len; i++) {
+ SXE_REG_WRITE(hw, (SXE_PFMBMEM(index) + (i << 2)), msg[i]);
+ LOG_DEBUG_BDF("vf_idx:%u write mbx mem[%u]:0x%x.\n",
+ index, i, msg[i]);
+ }
+
+ SXE_REG_WRITE(hw, SXE_PFMAILBOX(index), SXE_PFMAILBOX_STS);
+ mbx->stats.send_msgs++;
+
+l_out:
+ return ret;
+}
+
+void sxe_hw_mbx_mem_clear(struct sxe_hw *hw, u8 vf_idx)
+{
+ u8 msg_idx;
+ struct sxe_adapter *adapter = hw->adapter;
+ for (msg_idx = 0; msg_idx < hw->mbx.msg_len; msg_idx++)
+ SXE_REG_WRITE_ARRAY(hw, SXE_PFMBMEM(vf_idx), msg_idx, 0);
+
+ SXE_WRITE_FLUSH(hw);
+
+ LOG_INFO_BDF("vf_idx:%u clear mbx mem.\n", vf_idx);
+}
+
+static const struct sxe_mbx_operations sxe_mbx_ops = {
+ .init = sxe_hw_mbx_init,
+
+ .req_check = sxe_hw_vf_req_check,
+ .ack_check = sxe_hw_vf_ack_check,
+ .rst_check = sxe_hw_vf_rst_check,
+
+ .msg_send = sxe_hw_send_msg_to_vf,
+ .msg_rcv = sxe_hw_rcv_msg_from_vf,
+
+ .mbx_mem_clear = sxe_hw_mbx_mem_clear,
+};
+
+void sxe_hw_pcie_vt_mode_set(struct sxe_hw *hw, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_GCR_EXT, value);
+
+}
+
+static const struct sxe_pcie_operations sxe_pcie_ops = {
+ .vt_mode_set = sxe_hw_pcie_vt_mode_set,
+};
+
+s32 sxe_hw_hdc_lock_get(struct sxe_hw *hw, u32 trylock)
+{
+ u32 val;
+ u16 i;
+ s32 ret = 0;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ SXE_REG_WRITE(hw, SXE_HDC_SW_LK, SXE_HDC_RELEASE_SW_LK);
+ SXE_WRITE_FLUSH(hw);
+
+ for (i = 0; i < trylock; i++) {
+ val = SXE_REG_READ(hw, SXE_HDC_SW_LK) & SXE_HDC_SW_LK_BIT;
+ if (!val)
+ break;
+
+ udelay(10);
+ }
+
+ if (i >= trylock) {
+ LOG_ERROR_BDF("hdc is busy, reg: 0x%x\n", val);
+ ret = -SXE_ERR_HDC_LOCK_BUSY;
+ goto l_out;
+ }
+
+ val = SXE_REG_READ(hw, SXE_HDC_PF_LK) & SXE_HDC_PF_LK_BIT;
+ if (!val) {
+ SXE_REG_WRITE(hw, SXE_HDC_SW_LK, SXE_HDC_RELEASE_SW_LK);
+ LOG_ERROR_BDF("get hdc lock fail, reg: 0x%x\n", val);
+ ret = -SXE_ERR_HDC_LOCK_BUSY;
+ goto l_out;
+ }
+
+ hw->hdc.pf_lock_val = val;
+ LOG_DEBUG_BDF("hw[%p]'s port[%u] got pf lock\n", hw, val);
+
+l_out:
+ return ret;
+}
+
+void sxe_hw_hdc_lock_release(struct sxe_hw *hw, u32 retry_cnt)
+{
+ struct sxe_adapter *adapter = hw->adapter;
+
+ do {
+ SXE_REG_WRITE(hw, SXE_HDC_SW_LK, SXE_HDC_RELEASE_SW_LK);
+ udelay(1);
+ if (!(SXE_REG_READ(hw, SXE_HDC_PF_LK) & hw->hdc.pf_lock_val)) {
+ LOG_DEBUG_BDF("hw[%p]'s port[%u] release pf lock\n", hw,
+ hw->hdc.pf_lock_val);
+ hw->hdc.pf_lock_val = 0;
+ break;
+ }
+ } while ((retry_cnt--) > 0);
+
+}
+
+void sxe_hw_hdc_fw_ov_clear(struct sxe_hw *hw)
+{
+ SXE_REG_WRITE(hw, SXE_HDC_FW_OV, 0);
+}
+
+bool sxe_hw_hdc_is_fw_over_set(struct sxe_hw *hw)
+{
+ bool fw_ov = false;
+
+ if (SXE_REG_READ(hw, SXE_HDC_FW_OV) & SXE_HDC_FW_OV_BIT)
+ fw_ov = true;
+
+ return fw_ov;
+}
+
+void sxe_hw_hdc_packet_send_done(struct sxe_hw *hw)
+{
+ SXE_REG_WRITE(hw, SXE_HDC_SW_OV, SXE_HDC_SW_OV_BIT);
+ SXE_WRITE_FLUSH(hw);
+
+}
+
+void sxe_hw_hdc_packet_header_send(struct sxe_hw *hw, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_HDC_PACKET_HEAD0, value);
+
+}
+
+void sxe_hw_hdc_packet_data_dword_send(struct sxe_hw *hw,
+ u16 dword_index, u32 value)
+{
+ SXE_WRITE_REG_ARRAY_32(hw, SXE_HDC_PACKET_DATA0, dword_index, value);
+}
+
+u32 sxe_hw_hdc_fw_ack_header_get(struct sxe_hw *hw)
+{
+ return SXE_REG_READ(hw, SXE_HDC_PACKET_HEAD0);
+}
+
+u32 sxe_hw_hdc_packet_data_dword_rcv(struct sxe_hw *hw,
+ u16 dword_index)
+{
+ return SXE_READ_REG_ARRAY_32(hw, SXE_HDC_PACKET_DATA0, dword_index);
+}
+
+u32 sxe_hw_hdc_fw_status_get(struct sxe_hw *hw)
+{
+ struct sxe_adapter *adapter = hw->adapter;
+ u32 status = SXE_REG_READ(hw, SXE_FW_STATUS_REG);
+
+ LOG_DEBUG_BDF("fw status[0x%x]\n", status);
+
+ return status;
+}
+
+void sxe_hw_hdc_drv_status_set(struct sxe_hw *hw, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_DRV_STATUS_REG, value);
+}
+
+u32 sxe_hw_hdc_channel_state_get(struct sxe_hw *hw)
+{
+ struct sxe_adapter *adapter = hw->adapter;
+
+ u32 state = SXE_REG_READ(hw, SXE_FW_HDC_STATE_REG);
+
+ LOG_DEBUG_BDF("hdc channel state[0x%x]\n", state);
+
+ return state;
+}
+
+static u32 sxe_hw_hdc_irq_event_get(struct sxe_hw *hw)
+{
+ u32 status = SXE_REG_READ(hw, SXE_HDC_MSI_STATUS_REG);
+ struct sxe_adapter *adapter = hw->adapter;
+
+ LOG_DEBUG_BDF("msi status[0x%x]\n", status);
+
+ return status;
+}
+
+static void sxe_hw_hdc_irq_event_clear(struct sxe_hw *hw, u32 event)
+{
+ u32 status = SXE_REG_READ(hw, SXE_HDC_MSI_STATUS_REG);
+ struct sxe_adapter *adapter = hw->adapter;
+
+ LOG_DEBUG_BDF("msi status[0x%x] and clear bit=[0x%x]\n", status, event);
+
+ status &= ~event;
+ SXE_REG_WRITE(hw, SXE_HDC_MSI_STATUS_REG, status);
+
+}
+
+static void sxe_hw_hdc_resource_clean(struct sxe_hw *hw)
+{
+ u16 i;
+
+ SXE_REG_WRITE(hw, SXE_HDC_SW_LK, 0x0);
+ SXE_REG_WRITE(hw, SXE_HDC_PACKET_HEAD0, 0x0);
+ for (i = 0; i < SXE_HDC_DATA_LEN_MAX; i++)
+ SXE_WRITE_REG_ARRAY_32(hw, SXE_HDC_PACKET_DATA0, i, 0x0);
+
+}
+
+static const struct sxe_hdc_operations sxe_hdc_ops = {
+ .pf_lock_get = sxe_hw_hdc_lock_get,
+ .pf_lock_release = sxe_hw_hdc_lock_release,
+ .is_fw_over_set = sxe_hw_hdc_is_fw_over_set,
+ .fw_ack_header_rcv = sxe_hw_hdc_fw_ack_header_get,
+ .packet_send_done = sxe_hw_hdc_packet_send_done,
+ .packet_header_send = sxe_hw_hdc_packet_header_send,
+ .packet_data_dword_send = sxe_hw_hdc_packet_data_dword_send,
+ .packet_data_dword_rcv = sxe_hw_hdc_packet_data_dword_rcv,
+ .fw_status_get = sxe_hw_hdc_fw_status_get,
+ .drv_status_set = sxe_hw_hdc_drv_status_set,
+ .irq_event_get = sxe_hw_hdc_irq_event_get,
+ .irq_event_clear = sxe_hw_hdc_irq_event_clear,
+ .fw_ov_clear = sxe_hw_hdc_fw_ov_clear,
+ .channel_state_get = sxe_hw_hdc_channel_state_get,
+ .resource_clean = sxe_hw_hdc_resource_clean,
+};
+
+#ifdef SXE_PHY_CONFIGURE
+#define SXE_MDIO_COMMAND_TIMEOUT 100
+
+static s32 sxe_hw_phy_reg_write(struct sxe_hw *hw, s32 prtad, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ s32 ret;
+ u32 i, command;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ SXE_REG_WRITE(hw, SXE_MSCD, (u32)phy_data);
+
+ command = ((reg_addr << SXE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << SXE_MSCA_DEV_TYPE_SHIFT) |
+ (prtad << SXE_MSCA_PHY_ADDR_SHIFT) |
+ (SXE_MSCA_ADDR_CYCLE | SXE_MSCA_MDI_CMD_ON_PROG));
+
+ SXE_REG_WRITE(hw, SXE_MSCA, command);
+
+ for (i = 0; i < SXE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = SXE_REG_READ(hw, SXE_MSCA);
+ if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0)
+ break;
+
+ }
+
+ if ((command & SXE_MSCA_MDI_CMD_ON_PROG) != 0) {
+ LOG_DEV_ERR("phy write cmd didn't complete, "
+ "reg_addr=%u, device_type=%u\n", reg_addr, device_type);
+ ret = -SXE_ERR_MDIO_CMD_TIMEOUT;
+ goto l_end;
+ }
+
+ command = ((reg_addr << SXE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << SXE_MSCA_DEV_TYPE_SHIFT) |
+ (prtad << SXE_MSCA_PHY_ADDR_SHIFT) |
+ (SXE_MSCA_WRITE | SXE_MSCA_MDI_CMD_ON_PROG));
+
+ SXE_REG_WRITE(hw, SXE_MSCA, command);
+
+ for (i = 0; i < SXE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = SXE_REG_READ(hw, SXE_MSCA);
+ if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0)
+ break;
+ }
+
+ if ((command & SXE_MSCA_MDI_CMD_ON_PROG) != 0) {
+ LOG_DEV_ERR("phy write cmd didn't complete, "
+ "reg_addr=%u, device_type=%u\n", reg_addr, device_type);
+ ret = -SXE_ERR_MDIO_CMD_TIMEOUT;
+ }
+
+l_end:
+ return ret;
+}
+
+static s32 sxe_hw_phy_reg_read(struct sxe_hw *hw, s32 prtad, u32 reg_addr,
+ u32 device_type, u16 *phy_data)
+{
+ s32 ret = 0;
+ u32 i, data, command;
+ struct sxe_adapter *adapter = hw->adapter;
+
+ command = ((reg_addr << SXE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << SXE_MSCA_DEV_TYPE_SHIFT) |
+ (prtad << SXE_MSCA_PHY_ADDR_SHIFT) |
+ (SXE_MSCA_ADDR_CYCLE | SXE_MSCA_MDI_CMD_ON_PROG));
+
+ SXE_REG_WRITE(hw, SXE_MSCA, command);
+
+ for (i = 0; i < SXE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = SXE_REG_READ(hw, SXE_MSCA);
+ if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0)
+ break;
+ }
+
+ if ((command & SXE_MSCA_MDI_CMD_ON_PROG) != 0) {
+ LOG_DEV_ERR("phy read cmd didn't complete, "
+ "reg_addr=%u, device_type=%u\n", reg_addr, device_type);
+ ret = -SXE_ERR_MDIO_CMD_TIMEOUT;
+ goto l_end;
+ }
+
+ command = ((reg_addr << SXE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << SXE_MSCA_DEV_TYPE_SHIFT) |
+ (prtad << SXE_MSCA_PHY_ADDR_SHIFT) |
+ (SXE_MSCA_READ | SXE_MSCA_MDI_CMD_ON_PROG));
+
+ SXE_REG_WRITE(hw, SXE_MSCA, command);
+
+ for (i = 0; i < SXE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = SXE_REG_READ(hw, SXE_MSCA);
+ if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0)
+ break;
+ }
+
+ if ((command & SXE_MSCA_MDI_CMD_ON_PROG) != 0) {
+ LOG_DEV_ERR("phy write cmd didn't complete, "
+ "reg_addr=%u, device_type=%u\n", reg_addr, device_type);
+ ret = -SXE_ERR_MDIO_CMD_TIMEOUT;
+ goto l_end;
+ }
+
+ data = SXE_REG_READ(hw, SXE_MSCD);
+ data >>= MDIO_MSCD_RDATA_SHIFT;
+ *phy_data = (u16)(data);
+
+l_end:
+ return ret;
+}
+
+#define SXE_PHY_REVISION_MASK 0x000F
+#define SXE_PHY_ID_HIGH_5_BIT_MASK 0xFC00
+#define SXE_PHY_ID_HIGH_SHIFT 10
+
+static s32 sxe_hw_phy_id_get(struct sxe_hw *hw, u32 prtad, u32 *id)
+{
+ s32 ret;
+ u16 phy_id_high = 0;
+ u16 phy_id_low = 0;
+
+
+ ret = sxe_hw_phy_reg_read(hw, prtad, MDIO_DEVID1, MDIO_MMD_PMAPMD,
+ &phy_id_low);
+
+ if (ret) {
+ LOG_ERROR("get phy id upper 16 bits failed, prtad=%d\n", prtad);
+ goto l_end;
+ }
+
+ ret = sxe_hw_phy_reg_read(hw, prtad, MDIO_DEVID2, MDIO_MMD_PMAPMD,
+ &phy_id_high);
+ if (ret) {
+ LOG_ERROR("get phy id lower 4 bits failed, prtad=%d\n", prtad);
+ goto l_end;
+ }
+
+ *id = (u32)((phy_id_high >> SXE_PHY_ID_HIGH_SHIFT) << 16);
+ *id |= (u32)phy_id_low;
+
+l_end:
+ return ret;
+}
+
+s32 sxe_hw_phy_link_cap_get(struct sxe_hw *hw, u32 prtad, u32 *speed)
+{
+ s32 ret;
+ u16 speed_ability;
+
+ ret = hw->phy.ops->reg_read(hw, prtad, MDIO_SPEED, MDIO_MMD_PMAPMD,
+ &speed_ability);
+ if (ret) {
+ *speed = 0;
+ LOG_ERROR("get phy link cap failed, ret=%d, prtad=%d\n",
+ ret, prtad);
+ goto l_end;
+ }
+
+ if (speed_ability & MDIO_SPEED_10G)
+ *speed |= SXE_LINK_SPEED_10GB_FULL;
+
+ if (speed_ability & MDIO_PMA_SPEED_1000)
+ *speed |= SXE_LINK_SPEED_1GB_FULL;
+
+ if (speed_ability & MDIO_PMA_SPEED_100)
+ *speed |= SXE_LINK_SPEED_100_FULL;
+
+l_end:
+ return ret;
+}
+
+static s32 sxe_hw_phy_ctrl_reset(struct sxe_hw *hw, u32 prtad)
+{
+ u32 i;
+ s32 ret;
+ u16 ctrl;
+
+ ret = sxe_hw_phy_reg_write(hw, prtad, MDIO_CTRL1,
+ MDIO_MMD_PHYXS, MDIO_CTRL1_RESET);
+ if (ret) {
+ LOG_ERROR("phy reset failed, ret=%d\n", ret);
+ goto l_end;
+ }
+
+ for (i = 0; i < 30; i++) {
+ msleep(100);
+ ret = sxe_hw_phy_reg_read(hw, prtad, MDIO_CTRL1,
+ MDIO_MMD_PHYXS, &ctrl);
+ if (ret)
+ goto l_end;
+
+ if (!(ctrl & MDIO_CTRL1_RESET)) {
+ udelay(2);
+ break;
+ }
+ }
+
+ if (ctrl & MDIO_CTRL1_RESET) {
+ LOG_DEV_ERR("phy reset polling failed to complete\n");
+ return -SXE_ERR_PHY_RESET_FAIL;
+ }
+
+l_end:
+ return ret;
+}
+
+static const struct sxe_phy_operations sxe_phy_hw_ops = {
+ .reg_write = sxe_hw_phy_reg_write,
+ .reg_read = sxe_hw_phy_reg_read,
+ .identifier_get = sxe_hw_phy_id_get,
+ .link_cap_get = sxe_hw_phy_link_cap_get,
+ .reset = sxe_hw_phy_ctrl_reset,
+};
+#endif
+
+void sxe_hw_ops_init(struct sxe_hw *hw)
+{
+ hw->setup.ops = &sxe_setup_ops;
+ hw->irq.ops = &sxe_irq_ops;
+ hw->mac.ops = &sxe_mac_ops;
+ hw->dbu.ops = &sxe_dbu_ops;
+ hw->dma.ops = &sxe_dma_ops;
+ hw->sec.ops = &sxe_sec_ops;
+ hw->stat.ops = &sxe_stat_ops;
+ hw->mbx.ops = &sxe_mbx_ops;
+ hw->pcie.ops = &sxe_pcie_ops;
+ hw->hdc.ops = &sxe_hdc_ops;
+#ifdef SXE_PHY_CONFIGURE
+ hw->phy.ops = &sxe_phy_hw_ops;
+#endif
+
+ hw->filter.mac.ops = &sxe_filter_mac_ops;
+ hw->filter.vlan.ops = &sxe_filter_vlan_ops;
+}
+
+u32 sxe_hw_rss_key_get_by_idx(struct sxe_hw *hw, u8 reg_idx)
+{
+ u32 rss_key;
+
+ if (reg_idx >= SXE_MAX_RSS_KEY_ENTRIES)
+ rss_key = 0;
+ else
+ rss_key = SXE_REG_READ(hw, SXE_RSSRK(reg_idx));
+
+ return rss_key;
+}
+
+bool sxe_hw_is_rss_enabled(struct sxe_hw *hw)
+{
+ bool rss_enable = false;
+ u32 mrqc = SXE_REG_READ(hw, SXE_MRQC);
+ if (mrqc & SXE_MRQC_RSSEN)
+ rss_enable = true;
+
+ return rss_enable;
+}
+
+static u32 sxe_hw_mrqc_reg_get(struct sxe_hw *hw)
+{
+ return SXE_REG_READ(hw, SXE_MRQC);
+}
+
+u32 sxe_hw_rss_field_get(struct sxe_hw *hw)
+{
+ u32 mrqc = sxe_hw_mrqc_reg_get(hw);
+ return (mrqc & SXE_RSS_FIELD_MASK);
+}
+
+#ifdef SXE_DPDK
+
+#define SXE_TRAFFIC_CLASS_MAX 8
+
+#define SXE_MR_VLAN_MSB_REG_OFFSET 4
+#define SXE_MR_VIRTUAL_POOL_MSB_REG_OFFSET 4
+
+#define SXE_MR_TYPE_MASK 0x0F
+#define SXE_MR_DST_POOL_OFFSET 8
+
+void sxe_hw_crc_strip_config(struct sxe_hw *hw, bool keep_crc)
+{
+ u32 crcflag = SXE_REG_READ(hw, SXE_CRC_STRIP_REG);
+
+ if (keep_crc) {
+ crcflag |= SXE_KEEP_CRC_EN;
+ } else {
+ crcflag &= ~SXE_KEEP_CRC_EN;
+ }
+
+ SXE_REG_WRITE(hw, SXE_CRC_STRIP_REG, crcflag);
+}
+
+void sxe_hw_rx_pkt_buf_size_set(struct sxe_hw *hw, u8 tc_idx, u16 pbsize)
+{
+ u32 rxpbsize = pbsize << SXE_RX_PKT_BUF_SIZE_SHIFT;
+
+ sxe_hw_rx_pkt_buf_switch(hw, false);
+ SXE_REG_WRITE(hw, SXE_RXPBSIZE(tc_idx), rxpbsize);
+ sxe_hw_rx_pkt_buf_switch(hw, true);
+
+}
+
+void sxe_hw_dcb_vmdq_mq_configure(struct sxe_hw *hw, u8 num_pools)
+{
+ u16 pbsize;
+ u8 i, nb_tcs;
+ u32 mrqc;
+
+ nb_tcs = SXE_VMDQ_DCB_NUM_QUEUES / num_pools;
+
+ pbsize = (u8)(SXE_RX_PKT_BUF_SIZE / nb_tcs);
+
+ for (i = 0; i < nb_tcs; i++)
+ sxe_hw_rx_pkt_buf_size_set(hw, i, pbsize);
+
+ for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ sxe_hw_rx_pkt_buf_size_set(hw, i, 0);
+
+ mrqc = (num_pools == RTE_ETH_16_POOLS) ?
+ SXE_MRQC_VMDQRT8TCEN : SXE_MRQC_VMDQRT4TCEN;
+ SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+ SXE_REG_WRITE(hw, SXE_RTRPCS, SXE_RTRPCS_RRM);
+
+}
+
+static const struct sxe_reg_info sxe_regs_general_group[] = {
+ {SXE_CTRL, 1, 1, "SXE_CTRL"},
+ {SXE_STATUS, 1, 1, "SXE_STATUS"},
+ {SXE_CTRL_EXT, 1, 1, "SXE_CTRL_EXT"},
+ {0, 0, 0, ""}
+};
+
+static const struct sxe_reg_info sxe_regs_interrupt_group[] = {
+ {SXE_EICS, 1, 1, "SXE_EICS"},
+ {SXE_EIMS, 1, 1, "SXE_EIMS"},
+ {SXE_EIMC, 1, 1, "SXE_EIMC"},
+ {SXE_EIAC, 1, 1, "SXE_EIAC"},
+ {SXE_EIAM, 1, 1, "SXE_EIAM"},
+ {SXE_EITR(0), 24, 4, "SXE_EITR"},
+ {SXE_IVAR(0), 24, 4, "SXE_IVAR"},
+ {SXE_GPIE, 1, 1, "SXE_GPIE"},
+ {0, 0, 0, ""}
+};
+
+static const struct sxe_reg_info sxe_regs_fctl_group[] = {
+ {SXE_PFCTOP, 1, 1, "SXE_PFCTOP"},
+ {SXE_FCRTV, 1, 1, "SXE_FCRTV"},
+ {SXE_TFCS, 1, 1, "SXE_TFCS"},
+ {0, 0, 0, ""}
+};
+
+static const struct sxe_reg_info sxe_regs_rxdma_group[] = {
+ {SXE_RDBAL(0), 64, 0x40, "SXE_RDBAL"},
+ {SXE_RDBAH(0), 64, 0x40, "SXE_RDBAH"},
+ {SXE_RDLEN(0), 64, 0x40, "SXE_RDLEN"},
+ {SXE_RDH(0), 64, 0x40, "SXE_RDH"},
+ {SXE_RDT(0), 64, 0x40, "SXE_RDT"},
+ {SXE_RXDCTL(0), 64, 0x40, "SXE_RXDCTL"},
+ {SXE_SRRCTL(0), 16, 0x4, "SXE_SRRCTL"},
+ {SXE_TPH_RXCTRL(0), 16, 4, "SXE_TPH_RXCTRL"},
+ {SXE_RDRXCTL, 1, 1, "SXE_RDRXCTL"},
+ {SXE_RXPBSIZE(0), 8, 4, "SXE_RXPBSIZE"},
+ {SXE_RXCTRL, 1, 1, "SXE_RXCTRL"},
+ {0, 0, 0, ""}
+};
+
+static const struct sxe_reg_info sxe_regs_rx_group[] = {
+ {SXE_RXCSUM, 1, 1, "SXE_RXCSUM"},
+ {SXE_RFCTL, 1, 1, "SXE_RFCTL"},
+ {SXE_RAL(0), 16, 8, "SXE_RAL"},
+ {SXE_RAH(0), 16, 8, "SXE_RAH"},
+ {SXE_PSRTYPE(0), 1, 4, "SXE_PSRTYPE"},
+ {SXE_FCTRL, 1, 1, "SXE_FCTRL"},
+ {SXE_VLNCTRL, 1, 1, "SXE_VLNCTRL"},
+ {SXE_MCSTCTRL, 1, 1, "SXE_MCSTCTRL"},
+ {SXE_MRQC, 1, 1, "SXE_MRQC"},
+ {SXE_VMD_CTL, 1, 1, "SXE_VMD_CTL"},
+
+ {0, 0, 0, ""}
+};
+
+static struct sxe_reg_info sxe_regs_tx_group[] = {
+ {SXE_TDBAL(0), 32, 0x40, "SXE_TDBAL"},
+ {SXE_TDBAH(0), 32, 0x40, "SXE_TDBAH"},
+ {SXE_TDLEN(0), 32, 0x40, "SXE_TDLEN"},
+ {SXE_TDH(0), 32, 0x40, "SXE_TDH"},
+ {SXE_TDT(0), 32, 0x40, "SXE_TDT"},
+ {SXE_TXDCTL(0), 32, 0x40, "SXE_TXDCTL"},
+ {SXE_TPH_TXCTRL(0), 16, 4, "SXE_TPH_TXCTRL"},
+ {SXE_TXPBSIZE(0), 8, 4, "SXE_TXPBSIZE"},
+ {0, 0, 0, ""}
+};
+
+static const struct sxe_reg_info sxe_regs_wakeup_group[] = {
+ {SXE_WUC, 1, 1, "SXE_WUC"},
+ {SXE_WUFC, 1, 1, "SXE_WUFC"},
+ {SXE_WUS, 1, 1, "SXE_WUS"},
+ {0, 0, 0, ""}
+};
+
+static const struct sxe_reg_info sxe_regs_dcb_group[] = {
+ {0, 0, 0, ""}
+};
+
+static const struct sxe_reg_info sxe_regs_diagnostic_group[] = {
+
+ {SXE_MFLCN, 1, 1, "SXE_MFLCN"},
+ {0, 0, 0, ""},
+};
+
+static const struct sxe_reg_info *sxe_regs_group[] = {
+ sxe_regs_general_group,
+ sxe_regs_interrupt_group,
+ sxe_regs_fctl_group,
+ sxe_regs_rxdma_group,
+ sxe_regs_rx_group,
+ sxe_regs_tx_group,
+ sxe_regs_wakeup_group,
+ sxe_regs_dcb_group,
+ sxe_regs_diagnostic_group,
+ NULL};
+
+static u32 sxe_regs_group_count(const struct sxe_reg_info *regs)
+{
+ int i = 0;
+ int count = 0;
+
+ while (regs[i].count)
+ count += regs[i++].count;
+
+ return count;
+};
+
+static u32 sxe_hw_regs_group_read(struct sxe_hw *hw,
+ const struct sxe_reg_info *regs,
+ u32 *reg_buf)
+{
+ u32 j, i = 0;
+ int count = 0;
+
+ while (regs[i].count) {
+ for (j = 0; j < regs[i].count; j++) {
+ reg_buf[count + j] = SXE_REG_READ(hw,
+ regs[i].addr + j * regs[i].stride);
+ LOG_INFO("regs= %s, regs_addr=%x, regs_value=%04x\n",
+ regs[i].name, regs[i].addr, reg_buf[count + j]);
+ }
+
+ i++;
+ count += j;
+ }
+
+ return count;
+};
+
+u32 sxe_hw_all_regs_group_num_get(void)
+{
+ u32 i = 0;
+ u32 count = 0;
+ const struct sxe_reg_info *reg_group;
+ const struct sxe_reg_info **reg_set = sxe_regs_group;
+
+ while ((reg_group = reg_set[i++]))
+ count += sxe_regs_group_count(reg_group);
+
+ return count;
+}
+
+void sxe_hw_all_regs_group_read(struct sxe_hw *hw, u32 *data)
+{
+ u32 count = 0, i = 0;
+ const struct sxe_reg_info *reg_group;
+ const struct sxe_reg_info **reg_set = sxe_regs_group;
+
+ while ((reg_group = reg_set[i++]))
+ count += sxe_hw_regs_group_read(hw, reg_group, &data[count]);
+
+ LOG_INFO("read regs cnt=%u, regs num=%u\n",
+ count, sxe_hw_all_regs_group_num_get());
+
+}
+
+static void sxe_hw_default_pool_configure(struct sxe_hw *hw,
+ u8 default_pool_enabled,
+ u8 default_pool_idx)
+{
+ u32 vt_ctl;
+
+ vt_ctl = SXE_VT_CTL_VT_ENABLE | SXE_VT_CTL_REPLEN;
+ if (default_pool_enabled)
+ vt_ctl |= (default_pool_idx << SXE_VT_CTL_POOL_SHIFT);
+ else
+ vt_ctl |= SXE_VT_CTL_DIS_DEFPL;
+
+ SXE_REG_WRITE(hw, SXE_VT_CTL, vt_ctl);
+}
+
+void sxe_hw_dcb_vmdq_default_pool_configure(struct sxe_hw *hw,
+ u8 default_pool_enabled,
+ u8 default_pool_idx)
+{
+ sxe_hw_default_pool_configure(hw, default_pool_enabled, default_pool_idx);
+}
+
+u32 sxe_hw_ring_irq_switch_get(struct sxe_hw *hw, u8 idx)
+{
+ u32 mask;
+
+ if (idx == 0)
+ mask = SXE_REG_READ(hw, SXE_EIMS_EX(0));
+ else
+ mask = SXE_REG_READ(hw, SXE_EIMS_EX(1));
+
+ return mask;
+}
+
+void sxe_hw_ring_irq_switch_set(struct sxe_hw *hw, u8 idx, u32 value)
+{
+ if (idx == 0)
+ SXE_REG_WRITE(hw, SXE_EIMS_EX(0), value);
+ else
+ SXE_REG_WRITE(hw, SXE_EIMS_EX(1), value);
+
+}
+
+void sxe_hw_dcb_vmdq_up_2_tc_configure(struct sxe_hw *hw,
+ u8 *tc_arr)
+{
+ u32 up2tc;
+ u8 i;
+
+ up2tc = 0;
+ for (i = 0; i < MAX_USER_PRIORITY; i++)
+ up2tc |= ((tc_arr[i] & 0x07) << (i * 3));
+
+ SXE_REG_WRITE(hw, SXE_RTRUP2TC, up2tc);
+
+}
+
+u32 sxe_hw_uta_hash_table_get(struct sxe_hw *hw, u8 reg_idx)
+{
+ return SXE_REG_READ(hw, SXE_UTA(reg_idx));
+}
+
+void sxe_hw_uta_hash_table_set(struct sxe_hw *hw,
+ u8 reg_idx, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_UTA(reg_idx), value);
+
+}
+
+u32 sxe_hw_vlan_type_get(struct sxe_hw *hw)
+{
+ return SXE_REG_READ(hw, SXE_VLNCTRL);
+}
+
+void sxe_hw_vlan_type_set(struct sxe_hw *hw, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_VLNCTRL, value);
+}
+
+void sxe_hw_dcb_vmdq_vlan_configure(struct sxe_hw *hw,
+ u8 num_pools)
+{
+ u32 vlanctrl;
+ u8 i;
+
+ vlanctrl = SXE_REG_READ(hw, SXE_VLNCTRL);
+ vlanctrl |= SXE_VLNCTRL_VFE;
+ SXE_REG_WRITE(hw, SXE_VLNCTRL, vlanctrl);
+
+ for (i = 0; i < SXE_VFT_TBL_SIZE; i++)
+ SXE_REG_WRITE(hw, SXE_VFTA(i), 0xFFFFFFFF);
+
+ SXE_REG_WRITE(hw, SXE_VFRE(0),
+ num_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+
+ SXE_REG_WRITE(hw, SXE_MPSAR_LOW(0), 0xFFFFFFFF);
+ SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(0), 0xFFFFFFFF);
+
+}
+
+void sxe_hw_vlan_ext_type_set(struct sxe_hw *hw, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_EXVET, value);
+}
+
+u32 sxe_hw_txctl_vlan_type_get(struct sxe_hw *hw)
+{
+ return SXE_REG_READ(hw, SXE_DMATXCTL);
+}
+
+void sxe_hw_txctl_vlan_type_set(struct sxe_hw *hw, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_DMATXCTL, value);
+}
+
+u32 sxe_hw_ext_vlan_get(struct sxe_hw *hw)
+{
+ return SXE_REG_READ(hw, SXE_CTRL_EXT);
+}
+
+void sxe_hw_ext_vlan_set(struct sxe_hw *hw, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_CTRL_EXT, value);
+}
+
+void sxe_hw_rxq_stat_map_set(struct sxe_hw *hw, u8 idx, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_RQSMR(idx), value);
+}
+
+void sxe_hw_dcb_vmdq_pool_configure(struct sxe_hw *hw,
+ u8 pool_idx, u16 vlan_id,
+ u64 pools_map)
+{
+ SXE_REG_WRITE(hw, SXE_VLVF(pool_idx), (SXE_VLVF_VIEN |
+ (vlan_id & 0xFFF)));
+
+ SXE_REG_WRITE(hw, SXE_VLVFB(pool_idx * 2), pools_map);
+
+}
+
+void sxe_hw_txq_stat_map_set(struct sxe_hw *hw, u8 idx, u32 value)
+{
+ SXE_REG_WRITE(hw, SXE_TQSM(idx), value);
+}
+
+void sxe_hw_dcb_rx_configure(struct sxe_hw *hw, bool is_vt_on,
+ u8 sriov_active, u8 tc_num)
+{
+ u32 reg;
+ u32 vlanctrl;
+ u8 i;
+ u32 q;
+
+ reg = SXE_RTRPCS_RRM | SXE_RTRPCS_RAC | SXE_RTRPCS_ARBDIS;
+ SXE_REG_WRITE(hw, SXE_RTRPCS, reg);
+
+ reg = SXE_REG_READ(hw, SXE_MRQC);
+ if (tc_num == 4) {
+ if (is_vt_on) {
+ reg = (reg & ~SXE_MRQC_MRQE_MASK) |
+ SXE_MRQC_VMDQRT4TCEN;
+ } else {
+ SXE_REG_WRITE(hw, SXE_VT_CTL, 0);
+ reg = (reg & ~SXE_MRQC_MRQE_MASK) |
+ SXE_MRQC_RTRSS4TCEN;
+ }
+ }
+
+ if (tc_num == 8) {
+ if (is_vt_on) {
+ reg = (reg & ~SXE_MRQC_MRQE_MASK) |
+ SXE_MRQC_VMDQRT8TCEN;
+ } else {
+ SXE_REG_WRITE(hw, SXE_VT_CTL, 0);
+ reg = (reg & ~SXE_MRQC_MRQE_MASK) |
+ SXE_MRQC_RTRSS8TCEN;
+ }
+ }
+
+ SXE_REG_WRITE(hw, SXE_MRQC, reg);
+
+ if (sriov_active == 0) {
+ for (q = 0; q < SXE_HW_TXRX_RING_NUM_MAX; q++) {
+ SXE_REG_WRITE(hw, SXE_QDE,
+ (SXE_QDE_WRITE |
+ (q << SXE_QDE_IDX_SHIFT)));
+ }
+ } else {
+ for (q = 0; q < SXE_HW_TXRX_RING_NUM_MAX; q++) {
+ SXE_REG_WRITE(hw, SXE_QDE,
+ (SXE_QDE_WRITE |
+ (q << SXE_QDE_IDX_SHIFT) |
+ SXE_QDE_ENABLE));
+ }
+ }
+
+ vlanctrl = SXE_REG_READ(hw, SXE_VLNCTRL);
+ vlanctrl |= SXE_VLNCTRL_VFE;
+ SXE_REG_WRITE(hw, SXE_VLNCTRL, vlanctrl);
+
+ for (i = 0; i < SXE_VFT_TBL_SIZE; i++)
+ SXE_REG_WRITE(hw, SXE_VFTA(i), 0xFFFFFFFF);
+
+ reg = SXE_RTRPCS_RRM | SXE_RTRPCS_RAC;
+ SXE_REG_WRITE(hw, SXE_RTRPCS, reg);
+
+}
+
+void sxe_hw_fc_status_get(struct sxe_hw *hw,
+ bool *rx_pause_on, bool *tx_pause_on)
+{
+ u32 flctrl;
+
+ flctrl = SXE_REG_READ(hw, SXE_FLCTRL);
+ if (flctrl & (SXE_FCTRL_RFCE_PFC_EN | SXE_FCTRL_RFCE_LFC_EN))
+ *rx_pause_on = true;
+ else
+ *rx_pause_on = false;
+
+ if (flctrl & (SXE_FCTRL_TFCE_PFC_EN | SXE_FCTRL_TFCE_LFC_EN))
+ *tx_pause_on = true;
+ else
+ *tx_pause_on = false;
+
+}
+
+void sxe_hw_fc_base_init(struct sxe_hw *hw)
+{
+ u8 i;
+
+ hw->fc.requested_mode = SXE_FC_NONE;
+ hw->fc.current_mode = SXE_FC_NONE;
+ hw->fc.pause_time = SXE_DEFAULT_FCPAUSE;
+ hw->fc.disable_fc_autoneg = false;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ hw->fc.low_water[i] = SXE_FC_DEFAULT_LOW_WATER_MARK;
+ hw->fc.high_water[i] = SXE_FC_DEFAULT_HIGH_WATER_MARK;
+ }
+
+ hw->fc.send_xon = 1;
+}
+
+u32 sxe_hw_fc_tc_high_water_mark_get(struct sxe_hw *hw, u8 tc_idx)
+{
+ return hw->fc.high_water[tc_idx];
+}
+
+u32 sxe_hw_fc_tc_low_water_mark_get(struct sxe_hw *hw, u8 tc_idx)
+{
+ return hw->fc.low_water[tc_idx];
+}
+
+u16 sxe_hw_fc_send_xon_get(struct sxe_hw *hw)
+{
+ return hw->fc.send_xon;
+}
+
+void sxe_hw_fc_send_xon_set(struct sxe_hw *hw, u16 send_xon)
+{
+ hw->fc.send_xon = send_xon;
+}
+
+u16 sxe_hw_fc_pause_time_get(struct sxe_hw *hw)
+{
+ return hw->fc.pause_time;
+}
+
+void sxe_hw_fc_pause_time_set(struct sxe_hw *hw, u16 pause_time)
+{
+ hw->fc.pause_time = pause_time;
+}
+
+void sxe_hw_dcb_tx_configure(struct sxe_hw *hw, bool is_vt_on, u8 tc_num)
+{
+ u32 reg;
+
+ reg = SXE_REG_READ(hw, SXE_RTTDCS);
+ reg |= SXE_RTTDCS_ARBDIS;
+ SXE_REG_WRITE(hw, SXE_RTTDCS, reg);
+
+ if (tc_num == 8)
+ reg = SXE_MTQC_RT_ENA | SXE_MTQC_8TC_8TQ;
+ else
+ reg = SXE_MTQC_RT_ENA | SXE_MTQC_4TC_4TQ;
+
+ if (is_vt_on)
+ reg |= SXE_MTQC_VT_ENA;
+
+ SXE_REG_WRITE(hw, SXE_MTQC, reg);
+
+ reg = SXE_REG_READ(hw, SXE_RTTDCS);
+ reg &= ~SXE_RTTDCS_ARBDIS;
+ SXE_REG_WRITE(hw, SXE_RTTDCS, reg);
+
+
+}
+
+void sxe_hw_rx_ip_checksum_offload_switch(struct sxe_hw *hw,
+ bool is_on)
+{
+ u32 rxcsum;
+
+ rxcsum = SXE_REG_READ(hw, SXE_RXCSUM);
+ if (is_on)
+ rxcsum |= SXE_RXCSUM_IPPCSE;
+ else
+ rxcsum &= ~SXE_RXCSUM_IPPCSE;
+
+ SXE_REG_WRITE(hw, SXE_RXCSUM, rxcsum);
+
+}
+
+void sxe_hw_rss_cap_switch(struct sxe_hw *hw, bool is_on)
+{
+ u32 mrqc = SXE_REG_READ(hw, SXE_MRQC);
+ if (is_on)
+ mrqc |= SXE_MRQC_RSSEN;
+ else
+ mrqc &= ~SXE_MRQC_RSSEN;
+
+ SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+}
+
+void sxe_hw_pool_xmit_enable(struct sxe_hw *hw, u16 reg_idx, u8 pool_num)
+{
+ SXE_REG_WRITE(hw, SXE_VFTE(reg_idx),
+ pool_num == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+}
+
+void sxe_hw_rss_field_set(struct sxe_hw *hw, u32 rss_field)
+{
+ u32 mrqc = SXE_REG_READ(hw, SXE_MRQC);
+
+ mrqc &= ~SXE_RSS_FIELD_MASK;
+ mrqc |= rss_field;
+ SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+}
+
+static void sxe_hw_dcb_4tc_vmdq_off_stats_configure(struct sxe_hw *hw)
+{
+ u32 reg;
+ u8 i;
+
+ for (i = 0; i < 32; i++) {
+ if (i % 8 > 3)
+ continue;
+
+ reg = 0x01010101 * (i / 8);
+ SXE_REG_WRITE(hw, SXE_RQSMR(i), reg);
+ }
+ for (i = 0; i < 32; i++) {
+ if (i < 16)
+ reg = 0x00000000;
+ else if (i < 24)
+ reg = 0x01010101;
+ else if (i < 28)
+ reg = 0x02020202;
+ else
+ reg = 0x03030303;
+
+ SXE_REG_WRITE(hw, SXE_TQSM(i), reg);
+ }
+
+}
+
+static void sxe_hw_dcb_4tc_vmdq_on_stats_configure(struct sxe_hw *hw)
+{
+ u8 i;
+
+ for (i = 0; i < 32; i++)
+ SXE_REG_WRITE(hw, SXE_RQSMR(i), 0x03020100);
+
+
+ for (i = 0; i < 32; i++)
+ SXE_REG_WRITE(hw, SXE_TQSM(i), 0x03020100);
+
+}
+
+void sxe_hw_rss_redir_tbl_set_by_idx(struct sxe_hw *hw,
+ u16 reg_idx, u32 value)
+{
+ return sxe_hw_rss_redir_tbl_reg_write(hw, reg_idx, value);
+}
+
+static u32 sxe_hw_rss_redir_tbl_reg_read(struct sxe_hw *hw, u16 reg_idx)
+{
+ return SXE_REG_READ(hw, SXE_RETA(reg_idx >> 2));
+}
+
+u32 sxe_hw_rss_redir_tbl_get_by_idx(struct sxe_hw *hw, u16 reg_idx)
+{
+ return sxe_hw_rss_redir_tbl_reg_read(hw, reg_idx);
+}
+
+void sxe_hw_ptp_time_inc_stop(struct sxe_hw *hw)
+{
+ SXE_REG_WRITE(hw, SXE_TIMINC, 0);
+}
+
+void sxe_hw_dcb_tc_stats_configure(struct sxe_hw *hw,
+ u8 tc_num, bool vmdq_active)
+{
+ if (tc_num == 8 && vmdq_active == false)
+ sxe_hw_dcb_8tc_vmdq_off_stats_configure(hw);
+ else if (tc_num == 4 && vmdq_active == false)
+ sxe_hw_dcb_4tc_vmdq_off_stats_configure(hw);
+ else if (tc_num == 4 && vmdq_active == true)
+ sxe_hw_dcb_4tc_vmdq_on_stats_configure(hw);
+
+}
+
+void sxe_hw_ptp_timestamp_disable(struct sxe_hw *hw)
+{
+ SXE_REG_WRITE(hw, SXE_TSYNCTXCTL,
+ (SXE_REG_READ(hw, SXE_TSYNCTXCTL) &
+ ~SXE_TSYNCTXCTL_TEN));
+
+ SXE_REG_WRITE(hw, SXE_TSYNCRXCTL,
+ (SXE_REG_READ(hw, SXE_TSYNCRXCTL) &
+ ~SXE_TSYNCRXCTL_REN));
+ SXE_WRITE_FLUSH(hw);
+
+}
+
+void sxe_hw_mac_pool_clear(struct sxe_hw *hw, u8 rar_idx)
+{
+ struct sxe_adapter *adapter = hw->adapter;
+
+ if (rar_idx > SXE_UC_ENTRY_NUM_MAX) {
+ LOG_ERROR_BDF("rar_idx:%d invalid.(err:%d)\n",
+ rar_idx, SXE_ERR_PARAM);
+ return;
+ }
+
+ SXE_REG_WRITE(hw, SXE_MPSAR_LOW(rar_idx), 0);
+ SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(rar_idx), 0);
+
+}
+
+void sxe_hw_vmdq_mq_configure(struct sxe_hw *hw)
+{
+ u32 mrqc;
+
+ mrqc = SXE_MRQC_VMDQEN;
+ SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+}
+
+void sxe_hw_vmdq_default_pool_configure(struct sxe_hw *hw,
+ u8 default_pool_enabled,
+ u8 default_pool_idx)
+{
+ sxe_hw_default_pool_configure(hw, default_pool_enabled, default_pool_idx);
+}
+
+void sxe_hw_vmdq_vlan_configure(struct sxe_hw *hw,
+ u8 num_pools, u32 rx_mode)
+{
+ u32 vlanctrl;
+ u8 i;
+
+ vlanctrl = SXE_REG_READ(hw, SXE_VLNCTRL);
+ vlanctrl |= SXE_VLNCTRL_VFE;
+ SXE_REG_WRITE(hw, SXE_VLNCTRL, vlanctrl);
+
+ for (i = 0; i < SXE_VFT_TBL_SIZE; i++)
+ SXE_REG_WRITE(hw, SXE_VFTA(i), 0xFFFFFFFF);
+
+ SXE_REG_WRITE(hw, SXE_VFRE(0), 0xFFFFFFFF);
+ if (num_pools == RTE_ETH_64_POOLS)
+ SXE_REG_WRITE(hw, SXE_VFRE(1), 0xFFFFFFFF);
+
+ for (i = 0; i < num_pools; i++)
+ SXE_REG_WRITE(hw, SXE_VMOLR(i), rx_mode);
+
+ SXE_REG_WRITE(hw, SXE_MPSAR_LOW(0), 0xFFFFFFFF);
+ SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(0), 0xFFFFFFFF);
+
+ SXE_WRITE_FLUSH(hw);
+}
+
+u32 sxe_hw_pcie_vt_mode_get(struct sxe_hw *hw)
+{
+
+ return SXE_REG_READ(hw, SXE_GCR_EXT);
+}
+
+void sxe_rx_fc_threshold_set(struct sxe_hw *hw)
+{
+ u8 i;
+ u32 high;
+
+ for (i = 0; i < SXE_TRAFFIC_CLASS_MAX; i++) {
+ SXE_REG_WRITE(hw, SXE_FCRTL(i), 0);
+ high = SXE_REG_READ(hw, SXE_RXPBSIZE(i)) - 32;
+ SXE_REG_WRITE(hw, SXE_FCRTH(i), high);
+ }
+
+}
+
+void sxe_hw_vmdq_pool_configure(struct sxe_hw *hw,
+ u8 pool_idx, u16 vlan_id,
+ u64 pools_map)
+{
+ SXE_REG_WRITE(hw, SXE_VLVF(pool_idx), (SXE_VLVF_VIEN |
+ (vlan_id & SXE_RXD_VLAN_ID_MASK)));
+
+ if (((pools_map >> 32) & 0xFFFFFFFF) == 0) {
+ SXE_REG_WRITE(hw, SXE_VLVFB(pool_idx * 2),
+ (pools_map & 0xFFFFFFFF));
+ } else {
+ SXE_REG_WRITE(hw, SXE_VLVFB((pool_idx * 2 + 1)),
+ ((pools_map >> 32) & 0xFFFFFFFF));
+ }
+
+ SXE_WRITE_FLUSH(hw);
+}
+
+void sxe_hw_vmdq_loopback_configure(struct sxe_hw *hw)
+{
+ u8 i;
+ SXE_REG_WRITE(hw, SXE_PFDTXGSWC, SXE_PFDTXGSWC_VT_LBEN);
+ for (i = 0; i < SXE_VMTXSW_REGISTER_COUNT; i++)
+ SXE_REG_WRITE(hw, SXE_VMTXSW(i), 0xFFFFFFFF);
+
+ SXE_WRITE_FLUSH(hw);
+}
+
+void sxe_hw_tx_multi_queue_configure(struct sxe_hw *hw,
+ bool vmdq_enable, bool sriov_enable, u16 pools_num)
+{
+ u32 mtqc;
+
+ sxe_hw_dcb_arbiter_set(hw, false);
+
+ if (sriov_enable) {
+ switch (pools_num) {
+ case RTE_ETH_64_POOLS:
+ mtqc = SXE_MTQC_VT_ENA | SXE_MTQC_64VF;
+ break;
+ case RTE_ETH_32_POOLS:
+ mtqc = SXE_MTQC_VT_ENA | SXE_MTQC_32VF;
+ break;
+ case RTE_ETH_16_POOLS:
+ mtqc = SXE_MTQC_VT_ENA | SXE_MTQC_RT_ENA |
+ SXE_MTQC_8TC_8TQ;
+ break;
+ default:
+ mtqc = SXE_MTQC_64Q_1PB;
+ }
+ } else {
+ if (vmdq_enable) {
+ u8 queue_idx;
+ SXE_REG_WRITE(hw, SXE_VFTE(0), UINT32_MAX);
+ SXE_REG_WRITE(hw, SXE_VFTE(1), UINT32_MAX);
+
+ for (queue_idx = 0; queue_idx < SXE_HW_TXRX_RING_NUM_MAX;
+ queue_idx++) {
+ SXE_REG_WRITE(hw, SXE_QDE,
+ (SXE_QDE_WRITE |
+ (queue_idx << SXE_QDE_IDX_SHIFT)));
+ }
+
+ mtqc = SXE_MTQC_VT_ENA | SXE_MTQC_64VF;
+ } else {
+ mtqc = SXE_MTQC_64Q_1PB;
+ }
+ }
+
+ SXE_REG_WRITE(hw, SXE_MTQC, mtqc);
+
+ sxe_hw_dcb_arbiter_set(hw, true);
+
+}
+
+void sxe_hw_vf_queue_drop_enable(struct sxe_hw *hw, u8 vf_idx,
+ u8 ring_per_pool)
+{
+ u32 value;
+ u8 i;
+
+ for (i = (vf_idx * ring_per_pool); i < ((vf_idx + 1) * ring_per_pool); i++) {
+ value = SXE_QDE_ENABLE | SXE_QDE_WRITE;
+ SXE_WRITE_FLUSH(hw);
+
+ value |= i << SXE_QDE_IDX_SHIFT;
+
+ SXE_REG_WRITE(hw, SXE_QDE, value);
+ }
+
+}
+
+bool sxe_hw_vt_status(struct sxe_hw *hw)
+{
+ bool ret;
+ u32 vt_ctl = SXE_REG_READ(hw, SXE_VT_CTL);
+
+ if (vt_ctl & SXE_VMD_CTL_POOL_EN)
+ ret = true;
+ else
+ ret = false;
+
+ return ret;
+}
+
+void sxe_hw_mirror_ctl_set(struct sxe_hw *hw, u8 rule_id,
+ u8 mirror_type, u8 dst_pool, bool on)
+{
+ u32 mr_ctl;
+
+ mr_ctl = SXE_REG_READ(hw, SXE_MRCTL(rule_id));
+
+ if (on) {
+ mr_ctl |= mirror_type;
+ mr_ctl &= SXE_MR_TYPE_MASK;
+ mr_ctl |= dst_pool << SXE_MR_DST_POOL_OFFSET;
+ } else {
+ mr_ctl &= ~(mirror_type & SXE_MR_TYPE_MASK);
+ }
+
+ SXE_REG_WRITE(hw, SXE_MRCTL(rule_id), mr_ctl);
+
+}
+
+void sxe_hw_mirror_virtual_pool_set(struct sxe_hw *hw, u8 rule_id, u32 lsb, u32 msb)
+{
+ SXE_REG_WRITE(hw, SXE_VMRVM(rule_id), lsb);
+ SXE_REG_WRITE(hw, SXE_VMRVM(rule_id + SXE_MR_VIRTUAL_POOL_MSB_REG_OFFSET), msb);
+
+}
+
+void sxe_hw_mirror_vlan_set(struct sxe_hw *hw, u8 rule_id, u32 lsb, u32 msb)
+{
+ SXE_REG_WRITE(hw, SXE_VMRVLAN(rule_id), lsb);
+ SXE_REG_WRITE(hw, SXE_VMRVLAN(rule_id + SXE_MR_VLAN_MSB_REG_OFFSET), msb);
+
+}
+
+void sxe_hw_mirror_rule_clear(struct sxe_hw *hw, u8 rule_id)
+{
+ SXE_REG_WRITE(hw, SXE_MRCTL(rule_id), 0);
+
+ SXE_REG_WRITE(hw, SXE_VMRVLAN(rule_id), 0);
+ SXE_REG_WRITE(hw, SXE_VMRVLAN(rule_id + SXE_MR_VLAN_MSB_REG_OFFSET), 0);
+
+ SXE_REG_WRITE(hw, SXE_VMRVM(rule_id), 0);
+ SXE_REG_WRITE(hw, SXE_VMRVM(rule_id + SXE_MR_VIRTUAL_POOL_MSB_REG_OFFSET), 0);
+
+}
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+void sxe_hw_fivetuple_filter_add(struct rte_eth_dev *dev,
+ struct sxe_fivetuple_node_info *filter)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ u16 i;
+ u32 ftqf, sdpqf;
+ u32 l34timir = 0;
+ u8 mask = 0xff;
+
+ i = filter->index;
+
+ sdpqf = (u32)(filter->filter_info.dst_port << SXE_SDPQF_DSTPORT_SHIFT);
+ sdpqf = sdpqf | (filter->filter_info.src_port & SXE_SDPQF_SRCPORT);
+
+ ftqf = (u32)(filter->filter_info.protocol & SXE_FTQF_PROTOCOL_MASK);
+ ftqf |= (u32)((filter->filter_info.priority &
+ SXE_FTQF_PRIORITY_MASK) << SXE_FTQF_PRIORITY_SHIFT);
+
+ if (filter->filter_info.src_ip_mask == 0)
+ mask &= SXE_FTQF_SOURCE_ADDR_MASK;
+
+ if (filter->filter_info.dst_ip_mask == 0)
+ mask &= SXE_FTQF_DEST_ADDR_MASK;
+
+ if (filter->filter_info.src_port_mask == 0)
+ mask &= SXE_FTQF_SOURCE_PORT_MASK;
+
+ if (filter->filter_info.dst_port_mask == 0)
+ mask &= SXE_FTQF_DEST_PORT_MASK;
+
+ if (filter->filter_info.proto_mask == 0)
+ mask &= SXE_FTQF_PROTOCOL_COMP_MASK;
+
+ ftqf |= mask << SXE_FTQF_5TUPLE_MASK_SHIFT;
+ ftqf |= SXE_FTQF_POOL_MASK_EN;
+ ftqf |= SXE_FTQF_QUEUE_ENABLE;
+
+ LOG_DEBUG("add fivetuple filter, index[%u], src_ip[0x%x], dst_ip[0x%x]"
+ "src_port[%u], dst_port[%u], ftqf[0x%x], queue[%u]", i,
+ filter->filter_info.src_ip, filter->filter_info.dst_ip,
+ filter->filter_info.src_port, filter->filter_info.dst_port,
+ ftqf, filter->queue);
+
+ SXE_REG_WRITE(hw, SXE_DAQF(i), filter->filter_info.dst_ip);
+ SXE_REG_WRITE(hw, SXE_SAQF(i), filter->filter_info.src_ip);
+ SXE_REG_WRITE(hw, SXE_SDPQF(i), sdpqf);
+ SXE_REG_WRITE(hw, SXE_FTQF(i), ftqf);
+
+ l34timir |= SXE_L34T_IMIR_RESERVE;
+ l34timir |= (u32)(filter->queue << SXE_L34T_IMIR_QUEUE_SHIFT);
+ SXE_REG_WRITE(hw, SXE_L34T_IMIR(i), l34timir);
+
+}
+
+void sxe_hw_fivetuple_filter_del(struct sxe_hw *hw, u16 reg_index)
+{
+ SXE_REG_WRITE(hw, SXE_DAQF(reg_index), 0);
+ SXE_REG_WRITE(hw, SXE_SAQF(reg_index), 0);
+ SXE_REG_WRITE(hw, SXE_SDPQF(reg_index), 0);
+ SXE_REG_WRITE(hw, SXE_FTQF(reg_index), 0);
+ SXE_REG_WRITE(hw, SXE_L34T_IMIR(reg_index), 0);
+
+}
+
+void sxe_hw_ethertype_filter_add(struct sxe_hw *hw,
+ u8 reg_index, u16 ethertype, u16 queue)
+{
+ u32 etqf = 0;
+ u32 etqs = 0;
+
+ etqf = SXE_ETQF_FILTER_EN;
+ etqf |= (u32)ethertype;
+ etqs |= (u32)((queue << SXE_ETQS_RX_QUEUE_SHIFT) &
+ SXE_ETQS_RX_QUEUE);
+ etqs |= SXE_ETQS_QUEUE_EN;
+
+ SXE_REG_WRITE(hw, SXE_ETQF(reg_index), etqf);
+ SXE_REG_WRITE(hw, SXE_ETQS(reg_index), etqs);
+ SXE_WRITE_FLUSH(hw);
+
+}
+
+void sxe_hw_ethertype_filter_del(struct sxe_hw *hw, u8 filter_type)
+{
+ SXE_REG_WRITE(hw, SXE_ETQF(filter_type), 0);
+ SXE_REG_WRITE(hw, SXE_ETQS(filter_type), 0);
+ SXE_WRITE_FLUSH(hw);
+
+}
+
+void sxe_hw_syn_filter_add(struct sxe_hw *hw, u16 queue, u8 priority)
+{
+ u32 synqf;
+
+ synqf = (u32)(((queue << SXE_SYN_FILTER_QUEUE_SHIFT) &
+ SXE_SYN_FILTER_QUEUE) | SXE_SYN_FILTER_ENABLE);
+
+ if (priority)
+ synqf |= SXE_SYN_FILTER_SYNQFP;
+ else
+ synqf &= ~SXE_SYN_FILTER_SYNQFP;
+
+ SXE_REG_WRITE(hw, SXE_SYNQF, synqf);
+ SXE_WRITE_FLUSH(hw);
+
+}
+
+void sxe_hw_syn_filter_del(struct sxe_hw *hw)
+{
+ u32 synqf;
+
+ synqf = SXE_REG_READ(hw, SXE_SYNQF);
+
+ synqf &= ~(SXE_SYN_FILTER_QUEUE | SXE_SYN_FILTER_ENABLE);
+ SXE_REG_WRITE(hw, SXE_SYNQF, synqf);
+ SXE_WRITE_FLUSH(hw);
+
+}
+
+void sxe_hw_fnav_rx_pkt_buf_size_reset(struct sxe_hw *hw, u32 pbsize)
+{
+ S32 i;
+
+ SXE_REG_WRITE(hw, SXE_RXPBSIZE(0), (SXE_REG_READ(hw, SXE_RXPBSIZE(0)) - pbsize));
+ for (i = 1; i < 8; i++)
+ SXE_REG_WRITE(hw, SXE_RXPBSIZE(i), 0);
+
+}
+
+void sxe_hw_fnav_flex_mask_set(struct sxe_hw *hw, u16 flex_mask)
+{
+ u32 fnavm;
+
+ fnavm = SXE_REG_READ(hw, SXE_FNAVM);
+ if (flex_mask == UINT16_MAX)
+ fnavm &= ~SXE_FNAVM_FLEX;
+
+ SXE_REG_WRITE(hw, SXE_FNAVM, fnavm);
+}
+
+void sxe_hw_fnav_ipv6_mask_set(struct sxe_hw *hw, u16 src_mask, u16 dst_mask)
+{
+ u32 fnavipv6m;
+
+ fnavipv6m = (dst_mask << 16) | src_mask;
+ SXE_REG_WRITE(hw, SXE_FNAVIP6M, ~fnavipv6m);
+
+}
+
+s32 sxe_hw_fnav_flex_offset_set(struct sxe_hw *hw, u16 offset)
+{
+ u32 fnavctrl;
+ s32 ret;
+
+ fnavctrl = SXE_REG_READ(hw, SXE_FNAVCTRL);
+ fnavctrl &= ~SXE_FNAVCTRL_FLEX_MASK;
+ fnavctrl |= ((offset >> 1)
+ << SXE_FNAVCTRL_FLEX_SHIFT);
+
+ SXE_REG_WRITE(hw, SXE_FNAVCTRL, fnavctrl);
+ SXE_WRITE_FLUSH(hw);
+
+ ret = sxe_hw_fnav_wait_init_done(hw);
+ if (ret) {
+ LOG_ERROR("flow director signature poll time exceeded!\n");
+ }
+ return ret;
+}
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_MACSEC
+static void sxe_macsec_stop_data(struct sxe_hw *hw, bool link)
+{
+ u32 t_rdy, r_rdy;
+ u32 limit;
+ u32 reg;
+
+ reg = SXE_REG_READ(hw, SXE_SECTXCTRL);
+ reg |= SXE_SECTXCTRL_TX_DIS;
+ SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg);
+
+ reg = SXE_REG_READ(hw, SXE_SECRXCTRL);
+ reg |= SXE_SECRXCTRL_RX_DIS;
+ SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg);
+ SXE_WRITE_FLUSH(hw);
+
+ t_rdy = SXE_REG_READ(hw, SXE_SECTXSTAT) &
+ SXE_SECTXSTAT_SECTX_RDY;
+ r_rdy = SXE_REG_READ(hw, SXE_SECRXSTAT) &
+ SXE_SECRXSTAT_SECRX_RDY;
+ if (t_rdy && r_rdy) {
+ return;
+ }
+ if (!link) {
+ SXE_REG_WRITE(hw, SXE_LPBKCTRL, 0x1);
+
+ SXE_WRITE_FLUSH(hw);
+ mdelay(3);
+ }
+
+ limit = 20;
+ do {
+ mdelay(10);
+ t_rdy = SXE_REG_READ(hw, SXE_SECTXSTAT) &
+ SXE_SECTXSTAT_SECTX_RDY;
+ r_rdy = SXE_REG_READ(hw, SXE_SECRXSTAT) &
+ SXE_SECRXSTAT_SECRX_RDY;
+ } while (!(t_rdy && r_rdy) && limit--);
+
+ if (!link) {
+ SXE_REG_WRITE(hw, SXE_LPBKCTRL, 0x0);
+ SXE_WRITE_FLUSH(hw);
+ }
+
+}
+void sxe_hw_rx_queue_mode_set(struct sxe_hw *hw, u32 mrqc)
+{
+ SXE_REG_WRITE(hw, SXE_MRQC, mrqc);
+
+}
+
+void sxe_hw_macsec_enable(struct sxe_hw *hw, bool is_up, u32 tx_mode,
+ u32 rx_mode, u32 pn_trh)
+{
+ u32 reg;
+
+ sxe_macsec_stop_data(hw, is_up);
+
+ reg = SXE_REG_READ(hw, SXE_SECTXCTRL);
+ reg &= ~SXE_SECTXCTRL_SECTX_DIS;
+ reg &= ~SXE_SECTXCTRL_STORE_FORWARD;
+ SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg);
+
+ SXE_REG_WRITE(hw, SXE_SECTXBUFFAF, 0x250);
+
+ reg = SXE_REG_READ(hw, SXE_SECTXMINIFG);
+ reg = (reg & 0xfffffff0) | 0x3;
+ SXE_REG_WRITE(hw, SXE_SECTXMINIFG, reg);
+
+ reg = SXE_REG_READ(hw, SXE_SECRXCTRL);
+ reg &= ~SXE_SECRXCTRL_SECRX_DIS;
+ reg |= SXE_SECRXCTRL_RP;
+ SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg);
+
+ reg = tx_mode & SXE_LSECTXCTRL_EN_MASK;
+ reg |= SXE_LSECTXCTRL_AISCI;
+ reg &= ~SXE_LSECTXCTRL_PNTHRSH_MASK;
+ reg |= (pn_trh << SXE_LSECTXCTRL_PNTHRSH_SHIFT);
+ SXE_REG_WRITE(hw, SXE_LSECTXCTRL, reg);
+
+ reg = (rx_mode << SXE_LSECRXCTRL_EN_SHIFT) & SXE_LSECRXCTRL_EN_MASK;
+ reg |= SXE_LSECRXCTRL_RP;
+ reg |= SXE_LSECRXCTRL_DROP_EN;
+ SXE_REG_WRITE(hw, SXE_LSECRXCTRL, reg);
+
+ reg = SXE_REG_READ(hw, SXE_SECTXCTRL);
+ reg &= ~SXE_SECTXCTRL_TX_DIS;
+ SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg);
+
+ reg = SXE_REG_READ(hw, SXE_SECRXCTRL);
+ reg &= ~SXE_SECRXCTRL_RX_DIS;
+ SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg);
+
+ SXE_WRITE_FLUSH(hw);
+
+}
+
+void sxe_hw_macsec_disable(struct sxe_hw *hw, bool is_up)
+{
+ u32 reg;
+
+ sxe_macsec_stop_data(hw, is_up);
+
+ reg = SXE_REG_READ(hw, SXE_SECTXCTRL);
+ reg |= SXE_SECTXCTRL_SECTX_DIS;
+ reg &= ~SXE_SECTXCTRL_STORE_FORWARD;
+ SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg);
+
+ reg = SXE_REG_READ(hw, SXE_SECRXCTRL);
+ reg |= SXE_SECRXCTRL_SECRX_DIS;
+ SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg);
+
+ SXE_REG_WRITE(hw, SXE_SECTXBUFFAF, 0x250);
+
+ reg = SXE_REG_READ(hw, SXE_SECTXMINIFG);
+ reg = (reg & 0xfffffff0) | 0x1;
+ SXE_REG_WRITE(hw, SXE_SECTXMINIFG, reg);
+
+ SXE_REG_WRITE(hw, SXE_SECTXCTRL, SXE_SECTXCTRL_SECTX_DIS);
+ SXE_REG_WRITE(hw, SXE_SECRXCTRL, SXE_SECRXCTRL_SECRX_DIS);
+
+ SXE_WRITE_FLUSH(hw);
+}
+
+void sxe_hw_macsec_txsc_set(struct sxe_hw *hw, u32 scl, u32 sch)
+{
+ SXE_REG_WRITE(hw, SXE_LSECTXSCL, scl);
+ SXE_REG_WRITE(hw, SXE_LSECTXSCH, sch);
+
+ SXE_WRITE_FLUSH(hw);
+}
+
+void sxe_hw_macsec_rxsc_set(struct sxe_hw *hw, u32 scl, u32 sch, u16 pi)
+{
+ u32 reg = sch;
+
+ SXE_REG_WRITE(hw, SXE_LSECRXSCL, scl);
+
+ reg |= (pi << SXE_LSECRXSCH_PI_SHIFT) & SXE_LSECRXSCH_PI_MASK;
+ SXE_REG_WRITE(hw, SXE_LSECRXSCH, reg);
+
+ SXE_WRITE_FLUSH(hw);
+
+}
+
+void sxe_hw_macsec_tx_sa_configure(struct sxe_hw *hw, u8 sa_idx,
+ u8 an, u32 pn, u32 *keys)
+{
+ u32 reg;
+ u8 i;
+
+ reg = SXE_REG_READ(hw, SXE_LSECTXSA);
+ reg &= ~SXE_LSECTXSA_SELSA;
+ reg |= (sa_idx << SXE_LSECTXSA_SELSA_SHIFT) & SXE_LSECTXSA_SELSA;
+ SXE_REG_WRITE(hw, SXE_LSECTXSA, reg);
+ SXE_WRITE_FLUSH(hw);
+
+ SXE_REG_WRITE(hw, SXE_LSECTXPN(sa_idx), pn);
+ for (i = 0; i < 4; i++)
+ SXE_REG_WRITE(hw, SXE_LSECTXKEY(sa_idx, i), keys[i]);
+
+ SXE_WRITE_FLUSH(hw);
+
+ reg = SXE_REG_READ(hw, SXE_LSECTXSA);
+ if (sa_idx == 0) {
+ reg &= ~SXE_LSECTXSA_AN0_MASK;
+ reg |= (an << SXE_LSECTXSA_AN0_SHIFT) & SXE_LSECTXSA_AN0_MASK;
+ reg &= ~SXE_LSECTXSA_SELSA;
+ SXE_REG_WRITE(hw, SXE_LSECTXSA, reg);
+ } else if (sa_idx == 1) {
+ reg &= ~SXE_LSECTXSA_AN1_MASK;
+ reg |= (an << SXE_LSECTXSA_AN1_SHIFT) & SXE_LSECTXSA_AN1_MASK;
+ reg |= SXE_LSECTXSA_SELSA;
+ SXE_REG_WRITE(hw, SXE_LSECTXSA, reg);
+ }
+
+ SXE_WRITE_FLUSH(hw);
+}
+
+void sxe_hw_macsec_rx_sa_configure(struct sxe_hw *hw, u8 sa_idx,
+ u8 an, u32 pn, u32 *keys)
+{
+ u32 reg;
+ u8 i;
+
+ reg = SXE_REG_READ(hw, SXE_LSECRXSA(sa_idx));
+ reg &= ~SXE_LSECRXSA_SAV;
+ reg |= (0 << SXE_LSECRXSA_SAV_SHIFT) & SXE_LSECRXSA_SAV;
+
+ SXE_REG_WRITE(hw, SXE_LSECRXSA(sa_idx), reg);
+
+ SXE_WRITE_FLUSH(hw);
+
+ SXE_REG_WRITE(hw, SXE_LSECRXPN(sa_idx), pn);
+
+ for (i = 0; i < 4; i++)
+ SXE_REG_WRITE(hw, SXE_LSECRXKEY(sa_idx, i), keys[i]);
+
+ SXE_WRITE_FLUSH(hw);
+
+ reg = ((an << SXE_LSECRXSA_AN_SHIFT) & SXE_LSECRXSA_AN_MASK) | SXE_LSECRXSA_SAV;
+ SXE_REG_WRITE(hw, SXE_LSECRXSA(sa_idx), reg);
+ SXE_WRITE_FLUSH(hw);
+}
+
+#endif
+#endif
new file mode 100644
@@ -0,0 +1,1525 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_HW_H__
+#define __SXE_HW_H__
+
+#if defined(__KERNEL__) || defined(SXE_KERNEL_TEST)
+#include <linux/types.h>
+#include <linux/kernel.h>
+#else
+#include "sxe_types.h"
+#include "sxe_compat_platform.h"
+#include "sxe_compat_version.h"
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+#include <inttypes.h>
+#endif
+
+#include "sxe_regs.h"
+
+#if defined(__KERNEL__) || defined(SXE_KERNEL_TEST)
+#define SXE_PRIU64 "llu"
+#define SXE_PRIX64 "llx"
+#define SXE_PRID64 "lld"
+#define SXE_RMB() rmb() /* verify reading before check ****/
+
+#else
+#define SXE_PRIU64 PRIu64
+#define SXE_PRIX64 PRIx64
+#define SXE_PRID64 PRId64
+#define SXE_RMB() rte_rmb()
+#endif
+
+struct sxe_hw;
+struct sxe_filter_mac;
+struct sxe_fc_info;
+
+#define SXE_MAC_ADDR_LEN 6
+#define SXE_QUEUE_STATS_MAP_REG_NUM 32
+
+#define SXE_FC_DEFAULT_HIGH_WATER_MARK 0x80
+#define SXE_FC_DEFAULT_LOW_WATER_MARK 0x40
+
+#define SXE_MC_ADDR_EXTRACT_MASK (0xFFF)
+#define SXE_MC_ADDR_SHIFT (5)
+#define SXE_MC_ADDR_REG_MASK (0x7F)
+#define SXE_MC_ADDR_BIT_MASK (0x1F)
+
+#define SXE_TXTS_POLL_CHECK 3
+#define SXE_TXTS_POLL 5
+#define SXE_TIME_TO_NS(ns, sec) (((u64)(ns)) + (u64)(((u64)(sec)) * NSEC_PER_SEC))
+
+enum sxe_strict_prio_type {
+ PRIO_NONE = 0,
+ PRIO_GROUP,
+ PRIO_LINK
+};
+
+enum sxe_mc_filter_type {
+ SXE_MC_FILTER_TYPE0 = 0,
+ SXE_MC_FILTER_TYPE1,
+ SXE_MC_FILTER_TYPE2,
+ SXE_MC_FILTER_TYPE3
+};
+
+#define SXE_POOLS_NUM_MAX 64
+#define SXE_16_POOL 16
+#define SXE_32_POOL 32
+#define SXE_1_RING_PER_POOL 1
+#define SXE_2_RING_PER_POOL 2
+#define SXE_3_RING_PER_POOL 3
+#define SXE_4_RING_PER_POOL 4
+
+#define SXE_DCB_1_TC 1
+#define SXE_DCB_4_TC 4
+#define SXE_DCB_8_TC 8
+
+#define SXE_8Q_PER_POOL_MASK 0x78
+#define SXE_4Q_PER_POOL_MASK 0x7C
+#define SXE_2Q_PER_POOL_MASK 0x7E
+
+#define SXE_VF_NUM_16 16
+#define SXE_VF_NUM_32 32
+
+#define SXE_TX_DESC_EOP_MASK 0x01000000
+#define SXE_TX_DESC_RS_MASK 0x08000000
+#define SXE_TX_DESC_STAT_DD 0x00000001
+#define SXE_TX_DESC_CMD (SXE_TX_DESC_EOP_MASK | SXE_TX_DESC_RS_MASK)
+#define SXE_TX_DESC_TYPE_DATA 0x00300000
+#define SXE_TX_DESC_DEXT 0x20000000
+#define SXE_TX_DESC_IFCS 0x02000000
+#define SXE_TX_DESC_VLE 0x40000000
+#define SXE_TX_DESC_TSTAMP 0x00080000
+#define SXE_TX_DESC_FLAGS (SXE_TX_DESC_TYPE_DATA | \
+ SXE_TX_DESC_IFCS | \
+ SXE_TX_DESC_DEXT| \
+ SXE_TX_DESC_EOP_MASK)
+#define SXE_TXD_DTYP_CTXT 0x00200000
+#define SXE_TXD_DCMD_TSE 0x80000000
+#define SXE_TXD_MAC_LINKSEC 0x00040000
+#define SXE_TXD_MAC_1588 0x00080000
+#define SXE_TX_DESC_PAYLEN_SHIFT 14
+#define SXE_TX_OUTERIPCS_SHIFT 17
+
+#define SXE_TX_POPTS_IXSM 0x01
+#define SXE_TX_POPTS_TXSM 0x02
+#define SXE_TXD_POPTS_SHIFT 8
+#define SXE_TXD_POPTS_IXSM (SXE_TX_POPTS_IXSM << SXE_TXD_POPTS_SHIFT)
+#define SXE_TXD_POPTS_TXSM (SXE_TX_POPTS_TXSM << SXE_TXD_POPTS_SHIFT)
+#define SXE_TXD_POPTS_IPSEC (0x00000400)
+
+#define SXE_TX_CTXTD_DTYP_CTXT 0x00200000
+#define SXE_TX_CTXTD_TUCMD_IPV6 0x00000000
+#define SXE_TX_CTXTD_TUCMD_IPV4 0x00000400
+#define SXE_TX_CTXTD_TUCMD_L4T_UDP 0x00000000
+#define SXE_TX_CTXTD_TUCMD_L4T_TCP 0x00000800
+#define SXE_TX_CTXTD_TUCMD_L4T_SCTP 0x00001000
+#define SXE_TX_CTXTD_TUCMD_L4T_RSV 0x00001800
+#define SXE_TX_CTXTD_TUCMD_IPSEC_TYPE_ESP 0x00002000
+#define SXE_TX_CTXTD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000
+
+#define SXE_TX_CTXTD_L4LEN_SHIFT 8
+#define SXE_TX_CTXTD_MSS_SHIFT 16
+#define SXE_TX_CTXTD_MACLEN_SHIFT 9
+#define SXE_TX_CTXTD_VLAN_SHIFT 16
+#define SXE_TX_CTXTD_VLAN_MASK 0xffff0000
+#define SXE_TX_CTXTD_MACLEN_MASK 0x0000fE00
+#define SXE_TX_CTXTD_OUTER_IPLEN_SHIFT 16
+#define SXE_TX_CTXTD_TUNNEL_LEN_SHIFT 24
+
+#define SXE_VLAN_TAG_SIZE 4
+
+#define SXE_RSS_KEY_SIZE (40)
+#define SXE_MAX_RSS_KEY_ENTRIES (10)
+#define SXE_MAX_RETA_ENTRIES (128)
+
+#define SXE_TIMINC_IV_NS_SHIFT 8
+#define SXE_TIMINC_INCPD_SHIFT 24
+#define SXE_TIMINC_SET(incpd, iv_ns, iv_sns) \
+ (((incpd) << SXE_TIMINC_INCPD_SHIFT) | \
+ ((iv_ns) << SXE_TIMINC_IV_NS_SHIFT) | (iv_sns))
+
+#define PBA_STRATEGY_EQUAL (0)
+#define PBA_STRATEGY_WEIGHTED (1)
+#define SXE_PKG_BUF_NUM_MAX (8)
+#define SXE_HW_TXRX_RING_NUM_MAX 128
+#define SXE_VMDQ_DCB_NUM_QUEUES SXE_HW_TXRX_RING_NUM_MAX
+#define SXE_RX_PKT_BUF_SIZE (512)
+
+#define SXE_UC_ENTRY_NUM_MAX 128
+#define SXE_HW_TX_NONE_MODE_Q_NUM 64
+
+#define SXE_MBX_MSG_NUM 16
+#define SXE_MBX_RETRY_INTERVAL 500
+#define SXE_MBX_RETRY_COUNT 2000
+
+#define SXE_VF_UC_ENTRY_NUM_MAX 10
+#define SXE_VF_MC_ENTRY_NUM_MAX 30
+
+#define SXE_UTA_ENTRY_NUM_MAX 128
+#define SXE_MTA_ENTRY_NUM_MAX 128
+#define SXE_HASH_UC_NUM_MAX 4096
+
+#define SXE_MAC_ADDR_EXTRACT_MASK (0xFFF)
+#define SXE_MAC_ADDR_SHIFT (5)
+#define SXE_MAC_ADDR_REG_MASK (0x7F)
+#define SXE_MAC_ADDR_BIT_MASK (0x1F)
+
+#define SXE_VFT_TBL_SIZE (128)
+#define SXE_VLAN_ID_SHIFT (5)
+#define SXE_VLAN_ID_REG_MASK (0x7F)
+#define SXE_VLAN_ID_BIT_MASK (0x1F)
+
+#define SXE_TX_PBSIZE_MAX 0x00028000
+#define SXE_TX_PKT_SIZE_MAX 0xA
+#define SXE_NODCB_TX_PKT_SIZE_MAX 0x14
+#define SXE_RING_ENABLE_WAIT_LOOP 10
+
+#define VFTA_BLOCK_SIZE 8
+#define VF_BLOCK_BITS (32)
+#define SXE_MAX_MAC_HDR_LEN 127
+#define SXE_MAX_NETWORK_HDR_LEN 511
+#define SXE_MAC_ADDR_LEN 6
+
+#define SXE_FNAV_BUCKET_HASH_KEY 0x3DAD14E2
+#define SXE_FNAV_SAMPLE_HASH_KEY 0x174D3614
+#define SXE_SAMPLE_COMMON_HASH_KEY \
+ (SXE_FNAV_BUCKET_HASH_KEY & SXE_FNAV_SAMPLE_HASH_KEY)
+
+#define SXE_SAMPLE_HASH_MASK 0x7fff
+#define SXE_SAMPLE_L4TYPE_MASK 0x3
+#define SXE_SAMPLE_L4TYPE_UDP 0x1
+#define SXE_SAMPLE_L4TYPE_TCP 0x2
+#define SXE_SAMPLE_L4TYPE_SCTP 0x3
+#define SXE_SAMPLE_L4TYPE_IPV6_MASK 0x4
+#define SXE_SAMPLE_L4TYPE_TUNNEL_MASK 0x10
+#define SXE_SAMPLE_FLOW_TYPE_MASK 0xF
+
+#define SXE_SAMPLE_VM_POOL_MASK 0x7F
+#define SXE_SAMPLE_VLAN_MASK 0xEFFF
+#define SXE_SAMPLE_FLEX_BYTES_MASK 0xFFFF
+
+#define SXE_FNAV_INIT_DONE_POLL 10
+#define SXE_FNAV_DROP_QUEUE 127
+
+#define MAX_TRAFFIC_CLASS 8
+#define DEF_TRAFFIC_CLASS 1
+
+#define SXE_LINK_SPEED_UNKNOWN 0
+#define SXE_LINK_SPEED_10_FULL 0x0002
+#define SXE_LINK_SPEED_100_FULL 0x0008
+#define SXE_LINK_SPEED_1GB_FULL 0x0020
+#define SXE_LINK_SPEED_10GB_FULL 0x0080
+
+typedef u32 sxe_link_speed;
+#ifdef SXE_TEST
+#define SXE_LINK_MBPS_SPEED_DEFAULT 1000
+#else
+#define SXE_LINK_MBPS_SPEED_DEFAULT 10000
+#endif
+
+#define SXE_LINK_MBPS_SPEED_MIN (10)
+
+enum sxe_rss_ip_version {
+ SXE_RSS_IP_VER_4 = 4,
+ SXE_RSS_IP_VER_6 = 6,
+};
+
+enum sxe_fnav_mode {
+ SXE_FNAV_SAMPLE_MODE = 1,
+ SXE_FNAV_SPECIFIC_MODE = 2,
+};
+
+enum sxe_sample_type {
+ SXE_SAMPLE_FLOW_TYPE_IPV4 = 0x0,
+ SXE_SAMPLE_FLOW_TYPE_UDPV4 = 0x1,
+ SXE_SAMPLE_FLOW_TYPE_TCPV4 = 0x2,
+ SXE_SAMPLE_FLOW_TYPE_SCTPV4 = 0x3,
+ SXE_SAMPLE_FLOW_TYPE_IPV6 = 0x4,
+ SXE_SAMPLE_FLOW_TYPE_UDPV6 = 0x5,
+ SXE_SAMPLE_FLOW_TYPE_TCPV6 = 0x6,
+ SXE_SAMPLE_FLOW_TYPE_SCTPV6 = 0x7,
+};
+
+enum {
+ SXE_DIAG_TEST_PASSED = 0,
+ SXE_DIAG_TEST_BLOCKED = 1,
+ SXE_DIAG_STATS_REG_TEST_ERR = 2,
+ SXE_DIAG_REG_PATTERN_TEST_ERR = 3,
+ SXE_DIAG_CHECK_REG_TEST_ERR = 4,
+ SXE_DIAG_DISABLE_IRQ_TEST_ERR = 5,
+ SXE_DIAG_ENABLE_IRQ_TEST_ERR = 6,
+ SXE_DIAG_DISABLE_OTHER_IRQ_TEST_ERR = 7,
+ SXE_DIAG_TX_RING_CONFIGURE_ERR = 8,
+ SXE_DIAG_RX_RING_CONFIGURE_ERR = 9,
+ SXE_DIAG_ALLOC_SKB_ERR = 10,
+ SXE_DIAG_LOOPBACK_SEND_TEST_ERR = 11,
+ SXE_DIAG_LOOPBACK_RECV_TEST_ERR = 12,
+};
+
+#define SXE_RXD_STAT_DD 0x01
+#define SXE_RXD_STAT_EOP 0x02
+#define SXE_RXD_STAT_FLM 0x04
+#define SXE_RXD_STAT_VP 0x08
+#define SXE_RXDADV_NEXTP_MASK 0x000FFFF0
+#define SXE_RXDADV_NEXTP_SHIFT 0x00000004
+#define SXE_RXD_STAT_UDPCS 0x10
+#define SXE_RXD_STAT_L4CS 0x20
+#define SXE_RXD_STAT_IPCS 0x40
+#define SXE_RXD_STAT_PIF 0x80
+#define SXE_RXD_STAT_CRCV 0x100
+#define SXE_RXD_STAT_OUTERIPCS 0x100
+#define SXE_RXD_STAT_VEXT 0x200
+#define SXE_RXD_STAT_UDPV 0x400
+#define SXE_RXD_STAT_DYNINT 0x800
+#define SXE_RXD_STAT_LLINT 0x800
+#define SXE_RXD_STAT_TSIP 0x08000
+#define SXE_RXD_STAT_TS 0x10000
+#define SXE_RXD_STAT_SECP 0x20000
+#define SXE_RXD_STAT_LB 0x40000
+#define SXE_RXD_STAT_ACK 0x8000
+#define SXE_RXD_ERR_CE 0x01
+#define SXE_RXD_ERR_LE 0x02
+#define SXE_RXD_ERR_PE 0x08
+#define SXE_RXD_ERR_OSE 0x10
+#define SXE_RXD_ERR_USE 0x20
+#define SXE_RXD_ERR_TCPE 0x40
+#define SXE_RXD_ERR_IPE 0x80
+#define SXE_RXDADV_ERR_MASK 0xfff00000
+#define SXE_RXDADV_ERR_SHIFT 20
+#define SXE_RXDADV_ERR_OUTERIPER 0x04000000
+#define SXE_RXDADV_ERR_FCEOFE 0x80000000
+#define SXE_RXDADV_ERR_FCERR 0x00700000
+#define SXE_RXDADV_ERR_FNAV_LEN 0x00100000
+#define SXE_RXDADV_ERR_FNAV_DROP 0x00200000
+#define SXE_RXDADV_ERR_FNAV_COLL 0x00400000
+#define SXE_RXDADV_ERR_HBO 0x00800000
+#define SXE_RXDADV_ERR_CE 0x01000000
+#define SXE_RXDADV_ERR_LE 0x02000000
+#define SXE_RXDADV_ERR_PE 0x08000000
+#define SXE_RXDADV_ERR_OSE 0x10000000
+#define SXE_RXDADV_ERR_IPSEC_INV_PROTOCOL 0x08000000
+#define SXE_RXDADV_ERR_IPSEC_INV_LENGTH 0x10000000
+#define SXE_RXDADV_ERR_IPSEC_AUTH_FAILED 0x18000000
+#define SXE_RXDADV_ERR_USE 0x20000000
+#define SXE_RXDADV_ERR_L4E 0x40000000
+#define SXE_RXDADV_ERR_IPE 0x80000000
+#define SXE_RXD_VLAN_ID_MASK 0x0FFF
+#define SXE_RXD_PRI_MASK 0xE000
+#define SXE_RXD_PRI_SHIFT 13
+#define SXE_RXD_CFI_MASK 0x1000
+#define SXE_RXD_CFI_SHIFT 12
+#define SXE_RXDADV_LROCNT_MASK 0x001E0000
+#define SXE_RXDADV_LROCNT_SHIFT 17
+
+#define SXE_RXDADV_STAT_DD SXE_RXD_STAT_DD
+#define SXE_RXDADV_STAT_EOP SXE_RXD_STAT_EOP
+#define SXE_RXDADV_STAT_FLM SXE_RXD_STAT_FLM
+#define SXE_RXDADV_STAT_VP SXE_RXD_STAT_VP
+#define SXE_RXDADV_STAT_MASK 0x000fffff
+#define SXE_RXDADV_STAT_TS 0x00010000
+#define SXE_RXDADV_STAT_SECP 0x00020000
+
+#define SXE_RXDADV_PKTTYPE_NONE 0x00000000
+#define SXE_RXDADV_PKTTYPE_IPV4 0x00000010
+#define SXE_RXDADV_PKTTYPE_IPV4_EX 0x00000020
+#define SXE_RXDADV_PKTTYPE_IPV6 0x00000040
+#define SXE_RXDADV_PKTTYPE_IPV6_EX 0x00000080
+#define SXE_RXDADV_PKTTYPE_TCP 0x00000100
+#define SXE_RXDADV_PKTTYPE_UDP 0x00000200
+#define SXE_RXDADV_PKTTYPE_SCTP 0x00000400
+#define SXE_RXDADV_PKTTYPE_NFS 0x00000800
+#define SXE_RXDADV_PKTTYPE_VXLAN 0x00000800
+#define SXE_RXDADV_PKTTYPE_TUNNEL 0x00010000
+#define SXE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000
+#define SXE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000
+#define SXE_RXDADV_PKTTYPE_LINKSEC 0x00004000
+#define SXE_RXDADV_PKTTYPE_ETQF 0x00008000
+#define SXE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070
+#define SXE_RXDADV_PKTTYPE_ETQF_SHIFT 4
+
+struct sxe_mac_stats {
+ u64 crcerrs;
+ u64 errbc;
+ u64 rlec;
+ u64 prc64;
+ u64 prc127;
+ u64 prc255;
+ u64 prc511;
+ u64 prc1023;
+ u64 prc1522;
+ u64 gprc;
+ u64 bprc;
+ u64 mprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 ruc;
+ u64 rfc;
+ u64 roc;
+ u64 rjc;
+ u64 tor;
+ u64 tpr;
+ u64 tpt;
+ u64 ptc64;
+ u64 ptc127;
+ u64 ptc255;
+ u64 ptc511;
+ u64 ptc1023;
+ u64 ptc1522;
+ u64 mptc;
+ u64 bptc;
+ u64 qprc[16];
+ u64 qptc[16];
+ u64 qbrc[16];
+ u64 qbtc[16];
+ u64 qprdc[16];
+ u64 dburxtcin[8];
+ u64 dburxtcout[8];
+ u64 dburxgdreecnt[8];
+ u64 dburxdrofpcnt[8];
+ u64 dbutxtcin[8];
+ u64 dbutxtcout[8];
+ u64 rxdgpc;
+ u64 rxdgbc;
+ u64 rxddpc;
+ u64 rxddbc;
+ u64 rxtpcing;
+ u64 rxtpceng;
+ u64 rxlpbkpc;
+ u64 rxlpbkbc;
+ u64 rxdlpbkpc;
+ u64 rxdlpbkbc;
+ u64 prddc;
+ u64 txdgpc;
+ u64 txdgbc;
+ u64 txswerr;
+ u64 txswitch;
+ u64 txrepeat;
+ u64 txdescerr;
+
+ u64 fnavadd;
+ u64 fnavrmv;
+ u64 fnavadderr;
+ u64 fnavrmverr;
+ u64 fnavmatch;
+ u64 fnavmiss;
+ u64 hw_rx_no_dma_resources;
+ u64 prcpf[8];
+ u64 pfct[8];
+ u64 mpc[8];
+
+ u64 total_tx_pause;
+ u64 total_gptc;
+ u64 total_gotc;
+};
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+enum sxe_fivetuple_protocol {
+ SXE_FILTER_PROTOCOL_TCP = 0,
+ SXE_FILTER_PROTOCOL_UDP,
+ SXE_FILTER_PROTOCOL_SCTP,
+ SXE_FILTER_PROTOCOL_NONE,
+};
+
+struct sxe_fivetuple_filter_info {
+ u32 src_ip;
+ u32 dst_ip;
+ u16 src_port;
+ u16 dst_port;
+ enum sxe_fivetuple_protocol protocol;
+ u8 priority;
+ u8 src_ip_mask:1,
+ dst_ip_mask:1,
+ src_port_mask:1,
+ dst_port_mask:1,
+ proto_mask:1;
+};
+
+struct sxe_fivetuple_node_info {
+ u16 index;
+ u16 queue;
+ struct sxe_fivetuple_filter_info filter_info;
+};
+#endif
+
+union sxe_fnav_rule_info {
+ struct {
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 vlan_id;
+ __be32 dst_ip[4];
+ __be32 src_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 flex_bytes;
+ __be16 bkt_hash;
+ } ntuple;
+ __be32 fast_access[11];
+};
+
+union sxe_sample_hash_dword {
+ struct {
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 vlan_id;
+ } formatted;
+ __be32 ip;
+ struct {
+ __be16 src;
+ __be16 dst;
+ } port;
+ __be16 flex_bytes;
+ __be32 dword;
+};
+
+void sxe_hw_ops_init(struct sxe_hw *hw);
+
+
+struct sxe_reg_info {
+ u32 addr;
+ u32 count;
+ u32 stride;
+ const s8 *name;
+};
+
+struct sxe_setup_operations {
+ s32 (*reset)(struct sxe_hw *hw);
+ void (*pf_rst_done_set)(struct sxe_hw *hw);
+ void (*no_snoop_disable)(struct sxe_hw *hw);
+ u32 (*reg_read)(struct sxe_hw *hw, u32 reg);
+ void (*reg_write)(struct sxe_hw *hw, u32 reg, u32 val);
+ void (*regs_dump)(struct sxe_hw *hw);
+ void (*regs_flush)(struct sxe_hw *hw);
+ s32 (*regs_test)(struct sxe_hw *hw);
+};
+
+struct sxe_hw_setup {
+ const struct sxe_setup_operations *ops;
+};
+
+struct sxe_irq_operations {
+ u32 (*pending_irq_read_clear)(struct sxe_hw *hw);
+ void (*pending_irq_write_clear)(struct sxe_hw *hw, u32 value);
+ void (*irq_general_reg_set)(struct sxe_hw *hw, u32 value);
+ u32 (*irq_general_reg_get)(struct sxe_hw *hw);
+ void (*ring_irq_auto_disable)(struct sxe_hw *hw, bool is_misx);
+ void (*set_eitrsel)(struct sxe_hw *hw, u32 value);
+ void (*ring_irq_interval_set)(struct sxe_hw *hw, u16 irq_idx,
+ u32 interval);
+ void (*event_irq_interval_set)(struct sxe_hw *hw, u16 irq_idx,
+ u32 value);
+ void (*event_irq_auto_clear_set)(struct sxe_hw *hw, u32 value);
+ void (*ring_irq_map)(struct sxe_hw *hw, bool is_tx,
+ u16 reg_idx, u16 irq_idx);
+ void (*event_irq_map)(struct sxe_hw *hw, u8 offset, u16 irq_idx);
+ void (*ring_irq_enable)(struct sxe_hw *hw, u64 qmask);
+ u32 (*irq_cause_get)(struct sxe_hw *hw);
+ void (*event_irq_trigger)(struct sxe_hw *hw);
+ void (*ring_irq_trigger)(struct sxe_hw *hw, u64 eics);
+ void (*specific_irq_disable)(struct sxe_hw *hw, u32 value);
+ void (*specific_irq_enable)(struct sxe_hw *hw, u32 value);
+ void (*all_irq_disable)(struct sxe_hw *hw);
+ void (*spp_configure)(struct sxe_hw *hw, u32 value);
+ s32 (*irq_test)(struct sxe_hw *hw, u32 *icr, bool shared);
+};
+
+struct sxe_irq_info {
+ const struct sxe_irq_operations *ops;
+};
+
+struct sxe_mac_operations {
+ bool (*link_up_1g_check)(struct sxe_hw *hw);
+ bool (*link_state_is_up)(struct sxe_hw *hw);
+ u32 (*link_speed_get)(struct sxe_hw *hw);
+ void (*link_speed_set)(struct sxe_hw *hw, u32 speed);
+ void (*pad_enable)(struct sxe_hw *hw);
+ s32 (*fc_enable)(struct sxe_hw *hw);
+ void (*crc_configure)(struct sxe_hw *hw);
+ void (*loopback_switch)(struct sxe_hw *hw, bool val);
+ void (*txrx_enable)(struct sxe_hw *hw);
+ void (*max_frame_set)(struct sxe_hw *hw, u32 val);
+ u32 (*max_frame_get)(struct sxe_hw *hw);
+ void (*fc_autoneg_localcap_set)(struct sxe_hw *hw);
+ void (*fc_tc_high_water_mark_set)(struct sxe_hw *hw, u8 tc_idx, u32 val);
+ void (*fc_tc_low_water_mark_set)(struct sxe_hw *hw, u8 tc_idx, u32 val);
+ void (*fc_param_init)(struct sxe_hw *hw);
+ enum sxe_fc_mode (*fc_current_mode_get)(struct sxe_hw *hw);
+ enum sxe_fc_mode (*fc_requested_mode_get)(struct sxe_hw *hw);
+ void (*fc_requested_mode_set)(struct sxe_hw *hw, enum sxe_fc_mode e);
+ bool (*is_fc_autoneg_disabled)(struct sxe_hw *hw);
+ void (*fc_autoneg_disable_set)(struct sxe_hw *hw, bool val);
+};
+
+#define SXE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
+
+struct sxe_mac_info {
+ const struct sxe_mac_operations *ops;
+ u8 flags;
+ bool set_lben;
+ bool auto_restart;
+};
+
+struct sxe_filter_mac_operations {
+ u32 (*rx_mode_get)(struct sxe_hw *hw);
+ void (*rx_mode_set)(struct sxe_hw *hw, u32 filter_ctrl);
+ u32 (*pool_rx_mode_get)(struct sxe_hw *hw, u16 idx);
+ void (*pool_rx_mode_set)(struct sxe_hw *hw, u32 vmolr, u16 idx);
+ void (*rx_lro_enable)(struct sxe_hw *hw, bool is_enable);
+ void (*rx_udp_frag_checksum_disable)(struct sxe_hw *hw);
+ s32 (*uc_addr_add)(struct sxe_hw *hw, u32 rar_idx,
+ u8 *addr, u32 pool_idx);
+ s32 (*uc_addr_del)(struct sxe_hw *hw, u32 idx);
+ void (*uc_addr_clear)(struct sxe_hw *hw);
+ void (*mta_hash_table_set)(struct sxe_hw *hw, u8 index, u32 value);
+ void (*mta_hash_table_update)(struct sxe_hw *hw, u8 reg_idx, u8 bit_idx);
+ void (*fc_mac_addr_set)(struct sxe_hw *hw, u8 *mac_addr);
+
+ void (*mc_filter_enable)(struct sxe_hw *hw);
+
+ void (*mc_filter_disable)(struct sxe_hw *hw);
+
+ void (*rx_nfs_filter_disable)(struct sxe_hw *hw);
+ void (*ethertype_filter_set)(struct sxe_hw *hw, u8 filter_type, u32 val);
+
+ void (*vt_ctrl_configure)(struct sxe_hw *hw, u8 num_vfs);
+
+#ifdef SXE_WOL_CONFIGURE
+ void (*wol_mode_set)(struct sxe_hw *hw, u32 wol_status);
+ void (*wol_mode_clean)(struct sxe_hw *hw);
+ void (*wol_status_set)(struct sxe_hw *hw);
+#endif
+
+ void (*vt_disable)(struct sxe_hw *hw);
+
+ s32 (*uc_addr_pool_enable)(struct sxe_hw *hw, u8 rar_idx, u8 pool_idx);
+};
+
+struct sxe_filter_mac {
+ const struct sxe_filter_mac_operations *ops;
+};
+
+struct sxe_filter_vlan_operations {
+ u32 (*pool_filter_read)(struct sxe_hw *hw, u16 reg_idx);
+ void (*pool_filter_write)(struct sxe_hw *hw, u16 reg_idx, u32 val);
+ u32 (*pool_filter_bitmap_read)(struct sxe_hw *hw, u16 reg_idx);
+ void (*pool_filter_bitmap_write)(struct sxe_hw *hw, u16 reg_idx, u32 val);
+ void (*filter_array_write)(struct sxe_hw *hw, u16 reg_idx, u32 val);
+ u32 (*filter_array_read)(struct sxe_hw *hw, u16 reg_idx);
+ void (*filter_array_clear)(struct sxe_hw *hw);
+ void (*filter_switch)(struct sxe_hw *hw, bool enable);
+ void (*untagged_pkts_rcv_switch)(struct sxe_hw *hw, u32 vf, bool accept);
+ s32 (*filter_configure)(struct sxe_hw *hw, u32 vid, u32 pool,
+ bool vlan_on, bool vlvf_bypass);
+};
+
+struct sxe_filter_vlan {
+ const struct sxe_filter_vlan_operations *ops;
+};
+
+struct sxe_filter_info {
+ struct sxe_filter_mac mac;
+ struct sxe_filter_vlan vlan;
+};
+
+struct sxe_dbu_operations {
+ void (*rx_pkt_buf_size_configure)(struct sxe_hw *hw, u8 num_pb,
+ u32 headroom, u16 strategy);
+ void (*rx_pkt_buf_switch)(struct sxe_hw *hw, bool is_on);
+ void (*rx_multi_ring_configure)(struct sxe_hw *hw, u8 tcs,
+ bool is_4q, bool sriov_enable);
+ void (*rss_key_set_all)(struct sxe_hw *hw, u32 *rss_key);
+ void (*rss_redir_tbl_set_all)(struct sxe_hw *hw, u8 *redir_tbl);
+ void (*rx_cap_switch_on)(struct sxe_hw *hw);
+ void (*rss_hash_pkt_type_set)(struct sxe_hw *hw, u32 version);
+ void (*rss_hash_pkt_type_update)(struct sxe_hw *hw, u32 version);
+ void (*rss_rings_used_set)(struct sxe_hw *hw, u32 rss_num,
+ u16 pool, u16 pf_offset);
+ void (*lro_ack_switch)(struct sxe_hw *hw, bool is_on);
+ void (*vf_rx_switch)(struct sxe_hw *hw, u32 reg_offset,
+ u32 vf_index, bool is_off);
+
+ s32 (*fnav_mode_init)(struct sxe_hw *hw, u32 fnavctrl, u32 fnav_mode);
+ s32 (*fnav_specific_rule_mask_set)(struct sxe_hw *hw,
+ union sxe_fnav_rule_info *mask);
+ s32 (*fnav_specific_rule_add)(struct sxe_hw *hw,
+ union sxe_fnav_rule_info *input,
+ u16 soft_id, u8 queue);
+ s32 (*fnav_specific_rule_del)(struct sxe_hw *hw,
+ union sxe_fnav_rule_info *input, u16 soft_id);
+ s32 (*fnav_sample_hash_cmd_get)(struct sxe_hw *hw,
+ u8 flow_type, u32 hash_value,
+ u8 queue, u64 *hash_cmd);
+ void (*fnav_sample_stats_reinit)(struct sxe_hw *hw);
+ void (*fnav_sample_hash_set)(struct sxe_hw *hw, u64 hash);
+ s32 (*fnav_single_sample_rule_del)(struct sxe_hw *hw, u32 hash);
+
+ void (*ptp_init)(struct sxe_hw *hw);
+ void (*ptp_freq_adjust)(struct sxe_hw *hw, u32 adj_freq);
+ void (*ptp_systime_init)(struct sxe_hw *hw);
+ u64 (*ptp_systime_get)(struct sxe_hw *hw);
+ void (*ptp_tx_timestamp_get)(struct sxe_hw *hw, u32 *ts_sec, u32 *ts_ns);
+ void (*ptp_timestamp_mode_set)(struct sxe_hw *hw, bool is_l2,
+ u32 tsctl, u32 tses);
+ void (*ptp_rx_timestamp_clear)(struct sxe_hw *hw);
+ u64 (*ptp_rx_timestamp_get)(struct sxe_hw *hw);
+ bool (*ptp_is_rx_timestamp_valid)(struct sxe_hw *hw);
+ void (*ptp_timestamp_enable)(struct sxe_hw *hw);
+
+ void (*tx_pkt_buf_switch)(struct sxe_hw *hw, bool is_on);
+
+ void (*dcb_tc_rss_configure)(struct sxe_hw *hw, u16 rss_i);
+
+ void (*tx_pkt_buf_size_configure)(struct sxe_hw *hw, u8 num_pb);
+
+ void (*rx_cap_switch_off)(struct sxe_hw *hw);
+ u32 (*rx_pkt_buf_size_get)(struct sxe_hw *hw, u8 pb);
+ void (*rx_func_switch_on)(struct sxe_hw *hw);
+
+ void (*tx_ring_disable)(struct sxe_hw *hw, u8 reg_idx,
+ unsigned long timeout);
+ void (*rx_ring_disable)(struct sxe_hw *hw, u8 reg_idx,
+ unsigned long timeout);
+
+ u32 (*tx_dbu_fc_status_get)(struct sxe_hw *hw);
+};
+
+struct sxe_dbu_info {
+ const struct sxe_dbu_operations *ops;
+};
+
+
+struct sxe_dma_operations {
+ void (*rx_dma_ctrl_init)(struct sxe_hw *hw);
+ void (*rx_ring_disable)(struct sxe_hw *hw, u8 ring_idx);
+ void (*rx_ring_switch)(struct sxe_hw *hw, u8 reg_idx, bool is_on);
+ void (*rx_ring_switch_not_polling)(struct sxe_hw *hw, u8 reg_idx,
+ bool is_on);
+ void (*rx_ring_desc_configure)(struct sxe_hw *hw, u32 desc_mem_len,
+ u64 desc_dma_addr, u8 reg_idx);
+ void (*rx_desc_thresh_set)(struct sxe_hw *hw, u8 reg_idx);
+ void (*rx_rcv_ctl_configure)(struct sxe_hw *hw, u8 reg_idx,
+ u32 header_buf_len, u32 pkg_buf_len);
+ void (*rx_lro_ctl_configure)(struct sxe_hw *hw, u8 reg_idx, u32 max_desc);
+ u32 (*rx_desc_ctrl_get)(struct sxe_hw *hw, u8 reg_idx);
+ void (*rx_dma_lro_ctl_set)(struct sxe_hw *hw);
+ void (*rx_drop_switch)(struct sxe_hw *hw, u8 idx, bool is_enable);
+ void (*rx_tph_update)(struct sxe_hw *hw, u8 ring_idx, u8 cpu);
+
+ void (*tx_enable)(struct sxe_hw *hw);
+ void (*tx_multi_ring_configure)(struct sxe_hw *hw, u8 tcs, u16 pool_mask,
+ bool sriov_enable, u16 max_txq);
+ void (*tx_ring_desc_configure)(struct sxe_hw *hw, u32 desc_mem_len,
+ u64 desc_dma_addr, u8 reg_idx);
+ void (*tx_desc_thresh_set)(struct sxe_hw *hw, u8 reg_idx, u32 wb_thresh,
+ u32 host_thresh, u32 prefech_thresh);
+ void (*tx_ring_switch)(struct sxe_hw *hw, u8 reg_idx, bool is_on);
+ void (*tx_ring_switch_not_polling)(struct sxe_hw *hw, u8 reg_idx, bool is_on);
+ void (*tx_pkt_buf_thresh_configure)(struct sxe_hw *hw, u8 num_pb, bool dcb_enable);
+ u32 (*tx_desc_ctrl_get)(struct sxe_hw *hw, u8 reg_idx);
+ void (*tx_ring_info_get)(struct sxe_hw *hw, u8 idx, u32 *head, u32 *tail);
+ void (*tx_desc_wb_thresh_clear)(struct sxe_hw *hw, u8 reg_idx);
+
+ void (*vlan_tag_strip_switch)(struct sxe_hw *hw, u16 reg_index, bool is_enable);
+ void (*tx_vlan_tag_set)(struct sxe_hw *hw, u16 vid, u16 qos, u32 vf);
+ void (*tx_vlan_tag_clear)(struct sxe_hw *hw, u32 vf);
+ void (*tx_tph_update)(struct sxe_hw *hw, u8 ring_idx, u8 cpu);
+
+ void (*tph_switch)(struct sxe_hw *hw, bool is_enable);
+
+ void (*dcb_rx_bw_alloc_configure)(struct sxe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type,
+ u8 *prio_tc,
+ u8 max_priority);
+ void (*dcb_tx_desc_bw_alloc_configure)(struct sxe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type);
+ void (*dcb_tx_data_bw_alloc_configure)(struct sxe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type,
+ u8 *prio_tc,
+ u8 max_priority);
+ void (*dcb_pfc_configure)(struct sxe_hw *hw, u8 pfc_en, u8 *prio_tc,
+ u8 max_priority);
+ void (*dcb_tc_stats_configure)(struct sxe_hw *hw);
+ void (*dcb_rx_up_tc_map_set)(struct sxe_hw *hw, u8 tc);
+ void (*dcb_rx_up_tc_map_get)(struct sxe_hw *hw, u8 *map);
+ void (*dcb_rate_limiter_clear)(struct sxe_hw *hw, u8 ring_max);
+
+ void (*vt_pool_loopback_switch)(struct sxe_hw *hw, bool is_enable);
+ u32 (*rx_pool_get)(struct sxe_hw *hw, u8 reg_idx);
+ u32 (*tx_pool_get)(struct sxe_hw *hw, u8 reg_idx);
+ void (*tx_pool_set)(struct sxe_hw *hw, u8 reg_idx, u32 bitmap);
+ void (*rx_pool_set)(struct sxe_hw *hw, u8 reg_idx, u32 bitmap);
+
+ void (*vf_tx_desc_addr_clear)(struct sxe_hw *hw, u8 vf_idx, u8 ring_per_pool);
+ void (*pool_mac_anti_spoof_set)(struct sxe_hw *hw, u8 vf_idx, bool status);
+ void (*pool_vlan_anti_spoof_set)(struct sxe_hw *hw, u8 vf_idx, bool status);
+ void (*spoof_count_enable)(struct sxe_hw *hw, u8 reg_idx, u8 bit_index);
+ void (*pool_rx_ring_drop_enable)(struct sxe_hw *hw, u8 vf_idx,
+ u16 pf_vlan, u8 ring_per_pool);
+
+ void (*max_dcb_memory_window_set)(struct sxe_hw *hw, u32 value);
+ void (*dcb_tx_ring_rate_factor_set)(struct sxe_hw *hw, u32 ring_idx, u32 rate);
+
+ void (*vf_tx_ring_disable)(struct sxe_hw *hw, u8 ring_per_pool, u8 vf_idx);
+ void (*all_ring_disable)(struct sxe_hw *hw, u32 ring_max);
+ void (*tx_ring_tail_init)(struct sxe_hw *hw, u8 reg_idx);
+};
+
+struct sxe_dma_info {
+ const struct sxe_dma_operations *ops;
+};
+
+struct sxe_sec_operations {
+ void (*ipsec_rx_ip_store)(struct sxe_hw *hw, __be32 *ip_addr, u8 ip_len, u8 ip_idx);
+ void (*ipsec_rx_spi_store)(struct sxe_hw *hw, __be32 spi, u8 ip_idx, u16 idx);
+ void (*ipsec_rx_key_store)(struct sxe_hw *hw, u32 *key, u8 key_len,
+ u32 salt, u32 mode, u16 idx);
+ void (*ipsec_tx_key_store)(struct sxe_hw *hw, u32 *key, u8 key_len, u32 salt, u16 idx);
+ void (*ipsec_sec_data_stop)(struct sxe_hw *hw, bool is_linkup);
+ void (*ipsec_engine_start)(struct sxe_hw *hw, bool is_linkup);
+ void (*ipsec_engine_stop)(struct sxe_hw *hw, bool is_linkup);
+ bool (*ipsec_offload_is_disable)(struct sxe_hw *hw);
+ void (*ipsec_sa_disable)(struct sxe_hw *hw);
+};
+
+struct sxe_sec_info {
+ const struct sxe_sec_operations *ops;
+};
+
+struct sxe_stat_operations {
+ void (*stats_clear)(struct sxe_hw *hw);
+ void (*stats_get)(struct sxe_hw *hw, struct sxe_mac_stats *st);
+
+ u32 (*tx_packets_num_get)(struct sxe_hw *hw);
+ u32 (*unsecurity_packets_num_get)(struct sxe_hw *hw);
+ u32 (*mac_stats_dump)(struct sxe_hw *hw, u32 *regs_buff, u32 buf_size);
+ u32 (*tx_dbu_to_mac_stats)(struct sxe_hw *hw);
+};
+
+struct sxe_stat_info {
+ const struct sxe_stat_operations *ops;
+};
+
+struct sxe_mbx_operations {
+ void (*init)(struct sxe_hw *hw);
+
+ s32 (*msg_send)(struct sxe_hw *hw, u32 *msg, u16 len, u16 index);
+ s32 (*msg_rcv)(struct sxe_hw *hw, u32 *msg, u16 len, u16 index);
+
+ bool (*req_check)(struct sxe_hw *hw, u8 vf_idx);
+ bool (*ack_check)(struct sxe_hw *hw, u8 vf_idx);
+ bool (*rst_check)(struct sxe_hw *hw, u8 vf_idx);
+
+ void (*mbx_mem_clear)(struct sxe_hw *hw, u8 vf_idx);
+};
+
+struct sxe_mbx_stats {
+ u32 send_msgs;
+ u32 rcv_msgs;
+
+ u32 reqs;
+ u32 acks;
+ u32 rsts;
+};
+
+struct sxe_mbx_info {
+ const struct sxe_mbx_operations *ops;
+ struct sxe_mbx_stats stats;
+ u32 retry;
+ u32 interval;
+ u32 msg_len;
+};
+
+struct sxe_pcie_operations {
+ void (*vt_mode_set)(struct sxe_hw *hw, u32 value);
+};
+
+struct sxe_pcie_info {
+ const struct sxe_pcie_operations *ops;
+};
+
+enum sxe_hw_state {
+ SXE_HW_STOP,
+ SXE_HW_FAULT,
+};
+
+enum sxe_fc_mode {
+ SXE_FC_NONE = 0,
+ SXE_FC_RX_PAUSE,
+ SXE_FC_TX_PAUSE,
+ SXE_FC_FULL,
+ SXE_FC_DEFAULT,
+};
+
+struct sxe_fc_info {
+ u32 high_water[MAX_TRAFFIC_CLASS];
+ u32 low_water[MAX_TRAFFIC_CLASS];
+ u16 pause_time;
+ bool strict_ieee;
+ bool disable_fc_autoneg;
+ u16 send_xon;
+ enum sxe_fc_mode current_mode;
+ enum sxe_fc_mode requested_mode;
+};
+
+struct sxe_fc_nego_mode {
+ u32 adv_sym;
+ u32 adv_asm;
+ u32 lp_sym;
+ u32 lp_asm;
+
+};
+
+struct sxe_hdc_operations {
+ s32 (*pf_lock_get)(struct sxe_hw *hw, u32 trylock);
+ void (*pf_lock_release)(struct sxe_hw *hw, u32 retry_cnt);
+ bool (*is_fw_over_set)(struct sxe_hw *hw);
+ u32 (*fw_ack_header_rcv)(struct sxe_hw *hw);
+ void (*packet_send_done)(struct sxe_hw *hw);
+ void (*packet_header_send)(struct sxe_hw *hw, u32 value);
+ void (*packet_data_dword_send)(struct sxe_hw *hw,
+ u16 dword_index, u32 value);
+ u32 (*packet_data_dword_rcv)(struct sxe_hw *hw, u16 dword_index);
+ u32 (*fw_status_get)(struct sxe_hw *hw);
+ void (*drv_status_set)(struct sxe_hw *hw, u32 value);
+ u32 (*irq_event_get)(struct sxe_hw *hw);
+ void (*irq_event_clear)(struct sxe_hw *hw, u32 event);
+ void (*fw_ov_clear)(struct sxe_hw *hw);
+ u32 (*channel_state_get)(struct sxe_hw *hw);
+ void (*resource_clean)(struct sxe_hw *hw);
+};
+
+struct sxe_hdc_info {
+ u32 pf_lock_val;
+ const struct sxe_hdc_operations *ops;
+};
+
+struct sxe_phy_operations {
+ s32 (*reg_write)(struct sxe_hw *hw, s32 prtad, u32 reg_addr,
+ u32 device_type, u16 phy_data);
+ s32 (*reg_read)(struct sxe_hw *hw, s32 prtad, u32 reg_addr,
+ u32 device_type, u16 *phy_data);
+ s32 (*identifier_get)(struct sxe_hw *hw, u32 prtad, u32 *id);
+ s32 (*link_cap_get)(struct sxe_hw *hw, u32 prtad, u32 *speed);
+ s32 (*reset)(struct sxe_hw *hw, u32 prtad);
+};
+
+struct sxe_phy_reg_info {
+ const struct sxe_phy_operations *ops;
+};
+
+struct sxe_hw {
+ u8 __iomem *reg_base_addr;
+
+ void *adapter;
+ void *priv;
+ unsigned long state;
+ void (*fault_handle)(void *priv);
+ u32 (*reg_read)(const volatile void *reg);
+ void (*reg_write)(u32 value, volatile void *reg);
+
+ struct sxe_hw_setup setup;
+ struct sxe_irq_info irq;
+ struct sxe_mac_info mac;
+ struct sxe_filter_info filter;
+ struct sxe_dbu_info dbu;
+ struct sxe_dma_info dma;
+ struct sxe_sec_info sec;
+ struct sxe_stat_info stat;
+ struct sxe_fc_info fc;
+
+ struct sxe_mbx_info mbx;
+ struct sxe_pcie_info pcie;
+ struct sxe_hdc_info hdc;
+ struct sxe_phy_reg_info phy;
+};
+
+u16 sxe_mac_reg_num_get(void);
+
+void sxe_hw_fault_handle(struct sxe_hw *hw);
+
+bool sxe_device_supports_autoneg_fc(struct sxe_hw *hw);
+
+void sxe_hw_ops_init(struct sxe_hw *hw);
+
+u32 sxe_hw_rss_key_get_by_idx(struct sxe_hw *hw, u8 reg_idx);
+
+bool sxe_hw_is_rss_enabled(struct sxe_hw *hw);
+
+u32 sxe_hw_rss_field_get(struct sxe_hw *hw);
+
+static inline bool sxe_is_hw_fault(struct sxe_hw *hw)
+{
+ return test_bit(SXE_HW_FAULT, &hw->state);
+}
+
+static inline void sxe_hw_fault_handle_init(struct sxe_hw *hw,
+ void (*handle)(void *), void *priv)
+{
+ hw->priv = priv;
+ hw->fault_handle = handle;
+
+}
+
+static inline void sxe_hw_reg_handle_init(struct sxe_hw *hw,
+ u32 (*read)(const volatile void *),
+ void (*write)(u32, volatile void *))
+{
+ hw->reg_read = read;
+ hw->reg_write = write;
+
+}
+
+#ifdef SXE_DPDK
+
+void sxe_hw_crc_strip_config(struct sxe_hw *hw, bool keep_crc);
+
+void sxe_hw_stats_seq_clean(struct sxe_hw *hw, struct sxe_mac_stats *stats);
+
+void sxe_hw_hdc_drv_status_set(struct sxe_hw *hw, u32 value);
+
+s32 sxe_hw_nic_reset(struct sxe_hw *hw);
+
+u16 sxe_hw_fc_pause_time_get(struct sxe_hw *hw);
+
+void sxe_hw_fc_pause_time_set(struct sxe_hw *hw, u16 pause_time);
+
+void sxe_fc_autoneg_localcap_set(struct sxe_hw *hw);
+
+u32 sxe_hw_fc_tc_high_water_mark_get(struct sxe_hw *hw, u8 tc_idx);
+
+u32 sxe_hw_fc_tc_low_water_mark_get(struct sxe_hw *hw, u8 tc_idx);
+
+u16 sxe_hw_fc_send_xon_get(struct sxe_hw *hw);
+
+void sxe_hw_fc_send_xon_set(struct sxe_hw *hw, u16 send_xon);
+
+u32 sxe_hw_rx_mode_get(struct sxe_hw *hw);
+
+void sxe_hw_rx_mode_set(struct sxe_hw *hw, u32 filter_ctrl);
+
+void sxe_hw_specific_irq_enable(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_specific_irq_disable(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_irq_general_reg_set(struct sxe_hw *hw, u32 value);
+
+u32 sxe_hw_irq_general_reg_get(struct sxe_hw *hw);
+
+void sxe_hw_event_irq_map(struct sxe_hw *hw, u8 offset, u16 irq_idx);
+
+void sxe_hw_ring_irq_map(struct sxe_hw *hw, bool is_tx,
+ u16 reg_idx, u16 irq_idx);
+
+void sxe_hw_ring_irq_interval_set(struct sxe_hw *hw,
+ u16 irq_idx, u32 interval);
+
+void sxe_hw_event_irq_auto_clear_set(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_all_irq_disable(struct sxe_hw *hw);
+
+void sxe_hw_ring_irq_auto_disable(struct sxe_hw *hw,
+ bool is_msix);
+
+u32 sxe_hw_irq_cause_get(struct sxe_hw *hw);
+
+void sxe_hw_pending_irq_write_clear(struct sxe_hw *hw, u32 value);
+
+u32 sxe_hw_ring_irq_switch_get(struct sxe_hw *hw, u8 idx);
+
+void sxe_hw_ring_irq_switch_set(struct sxe_hw *hw, u8 idx, u32 value);
+
+s32 sxe_hw_uc_addr_add(struct sxe_hw *hw, u32 rar_idx,
+ u8 *addr, u32 pool_idx);
+
+s32 sxe_hw_uc_addr_del(struct sxe_hw *hw, u32 index);
+
+u32 sxe_hw_uta_hash_table_get(struct sxe_hw *hw, u8 reg_idx);
+
+void sxe_hw_uta_hash_table_set(struct sxe_hw *hw,
+ u8 reg_idx, u32 value);
+
+void sxe_hw_mta_hash_table_set(struct sxe_hw *hw,
+ u8 index, u32 value);
+
+void sxe_hw_mc_filter_enable(struct sxe_hw *hw);
+
+void sxe_hw_vlan_filter_array_write(struct sxe_hw *hw,
+ u16 reg_index, u32 value);
+
+u32 sxe_hw_vlan_filter_array_read(struct sxe_hw *hw, u16 reg_index);
+
+void sxe_hw_vlan_filter_switch(struct sxe_hw *hw, bool is_enable);
+
+u32 sxe_hw_vlan_type_get(struct sxe_hw *hw);
+
+void sxe_hw_vlan_type_set(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_vlan_ext_vet_write(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_vlan_tag_strip_switch(struct sxe_hw *hw,
+ u16 reg_index, bool is_enable);
+
+void sxe_hw_txctl_vlan_type_set(struct sxe_hw *hw, u32 value);
+
+u32 sxe_hw_txctl_vlan_type_get(struct sxe_hw *hw);
+
+u32 sxe_hw_ext_vlan_get(struct sxe_hw *hw);
+
+void sxe_hw_ext_vlan_set(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_pf_rst_done_set(struct sxe_hw *hw);
+
+u32 sxe_hw_all_regs_group_num_get(void);
+
+void sxe_hw_all_regs_group_read(struct sxe_hw *hw, u32 *data);
+
+s32 sxe_hw_fc_enable(struct sxe_hw *hw);
+
+bool sxe_hw_is_fc_autoneg_disabled(struct sxe_hw *hw);
+
+void sxe_hw_fc_status_get(struct sxe_hw *hw,
+ bool *rx_pause_on, bool *tx_pause_on);
+
+void sxe_hw_fc_requested_mode_set(struct sxe_hw *hw,
+ enum sxe_fc_mode mode);
+
+void sxe_hw_fc_tc_high_water_mark_set(struct sxe_hw *hw,
+ u8 tc_idx, u32 mark);
+
+void sxe_hw_fc_tc_low_water_mark_set(struct sxe_hw *hw,
+ u8 tc_idx, u32 mark);
+
+void sxe_hw_fc_autoneg_disable_set(struct sxe_hw *hw,
+ bool is_disabled);
+
+u32 sxe_hw_rx_pkt_buf_size_get(struct sxe_hw *hw, u8 pb);
+
+void sxe_hw_ptp_init(struct sxe_hw *hw);
+
+void sxe_hw_ptp_timestamp_mode_set(struct sxe_hw *hw,
+ bool is_l2, u32 tsctl, u32 tses);
+
+void sxe_hw_ptp_timestamp_enable(struct sxe_hw *hw);
+
+void sxe_hw_ptp_time_inc_stop(struct sxe_hw *hw);
+
+void sxe_hw_ptp_rx_timestamp_clear(struct sxe_hw *hw);
+
+void sxe_hw_ptp_timestamp_disable(struct sxe_hw *hw);
+
+bool sxe_hw_ptp_is_rx_timestamp_valid(struct sxe_hw *hw);
+
+u64 sxe_hw_ptp_rx_timestamp_get(struct sxe_hw *hw);
+
+void sxe_hw_ptp_tx_timestamp_get(struct sxe_hw *hw,
+ u32 *ts_sec, u32 *ts_ns);
+
+u64 sxe_hw_ptp_systime_get(struct sxe_hw *hw);
+
+void sxe_hw_rss_cap_switch(struct sxe_hw *hw, bool is_on);
+
+void sxe_hw_rss_key_set_all(struct sxe_hw *hw, u32 *rss_key);
+
+void sxe_hw_rss_field_set(struct sxe_hw *hw, u32 rss_field);
+
+void sxe_hw_rss_redir_tbl_set_all(struct sxe_hw *hw, u8 *redir_tbl);
+
+u32 sxe_hw_rss_redir_tbl_get_by_idx(struct sxe_hw *hw, u16 reg_idx);
+
+void sxe_hw_rss_redir_tbl_set_by_idx(struct sxe_hw *hw,
+ u16 reg_idx, u32 value);
+
+void sxe_hw_rx_dma_ctrl_init(struct sxe_hw *hw);
+
+void sxe_hw_mac_max_frame_set(struct sxe_hw *hw, u32 max_frame);
+
+void sxe_hw_rx_udp_frag_checksum_disable(struct sxe_hw *hw);
+
+void sxe_hw_rx_ip_checksum_offload_switch(struct sxe_hw *hw,
+ bool is_on);
+
+void sxe_hw_rx_ring_switch(struct sxe_hw *hw, u8 reg_idx, bool is_on);
+
+void sxe_hw_rx_ring_switch_not_polling(struct sxe_hw *hw, u8 reg_idx, bool is_on);
+
+void sxe_hw_rx_ring_desc_configure(struct sxe_hw *hw,
+ u32 desc_mem_len, u64 desc_dma_addr,
+ u8 reg_idx);
+
+void sxe_hw_rx_rcv_ctl_configure(struct sxe_hw *hw, u8 reg_idx,
+ u32 header_buf_len, u32 pkg_buf_len
+ );
+
+void sxe_hw_rx_drop_switch(struct sxe_hw *hw, u8 idx, bool is_enable);
+
+void sxe_hw_rx_desc_thresh_set(struct sxe_hw *hw, u8 reg_idx);
+
+void sxe_hw_rx_lro_ack_switch(struct sxe_hw *hw, bool is_on);
+
+void sxe_hw_rx_dma_lro_ctrl_set(struct sxe_hw *hw);
+
+void sxe_hw_rx_nfs_filter_disable(struct sxe_hw *hw);
+
+void sxe_hw_rx_lro_enable(struct sxe_hw *hw, bool is_enable);
+
+void sxe_hw_rx_lro_ctl_configure(struct sxe_hw *hw,
+ u8 reg_idx, u32 max_desc);
+void sxe_hw_loopback_switch(struct sxe_hw *hw, bool is_enable);
+
+void sxe_hw_rx_cap_switch_off(struct sxe_hw *hw);
+
+void sxe_hw_tx_ring_info_get(struct sxe_hw *hw,
+ u8 idx, u32 *head, u32 *tail);
+
+void sxe_hw_tx_ring_switch(struct sxe_hw *hw, u8 reg_idx, bool is_on);
+
+void sxe_hw_tx_ring_switch_not_polling(struct sxe_hw *hw, u8 reg_idx, bool is_on);
+
+void sxe_hw_rx_queue_desc_reg_configure(struct sxe_hw *hw,
+ u8 reg_idx, u32 rdh_value,
+ u32 rdt_value);
+
+u32 sxe_hw_hdc_fw_status_get(struct sxe_hw *hw);
+
+s32 sxe_hw_hdc_lock_get(struct sxe_hw *hw, u32 trylock);
+
+void sxe_hw_hdc_lock_release(struct sxe_hw *hw, u32 retry_cnt);
+
+bool sxe_hw_hdc_is_fw_over_set(struct sxe_hw *hw);
+
+void sxe_hw_hdc_fw_ov_clear(struct sxe_hw *hw);
+
+u32 sxe_hw_hdc_fw_ack_header_get(struct sxe_hw *hw);
+
+void sxe_hw_hdc_packet_send_done(struct sxe_hw *hw);
+
+void sxe_hw_hdc_packet_header_send(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_hdc_packet_data_dword_send(struct sxe_hw *hw,
+ u16 dword_index, u32 value);
+
+u32 sxe_hw_hdc_packet_data_dword_rcv(struct sxe_hw *hw,
+ u16 dword_index);
+
+u32 sxe_hw_hdc_channel_state_get(struct sxe_hw *hw);
+
+u32 sxe_hw_pending_irq_read_clear(struct sxe_hw *hw);
+
+void sxe_hw_all_ring_disable(struct sxe_hw *hw, u32 ring_max);
+
+void sxe_hw_tx_ring_head_init(struct sxe_hw *hw, u8 reg_idx);
+
+void sxe_hw_tx_ring_tail_init(struct sxe_hw *hw, u8 reg_idx);
+
+void sxe_hw_tx_enable(struct sxe_hw *hw);
+
+void sxe_hw_tx_desc_thresh_set(
+ struct sxe_hw *hw,
+ u8 reg_idx,
+ u32 wb_thresh,
+ u32 host_thresh,
+ u32 prefech_thresh);
+
+void sxe_hw_tx_pkt_buf_switch(struct sxe_hw *hw, bool is_on);
+
+void sxe_hw_tx_pkt_buf_size_configure(struct sxe_hw *hw, u8 num_pb);
+
+void sxe_hw_tx_pkt_buf_thresh_configure(struct sxe_hw *hw,
+ u8 num_pb, bool dcb_enable);
+
+void sxe_hw_tx_ring_desc_configure(struct sxe_hw *hw,
+ u32 desc_mem_len,
+ u64 desc_dma_addr, u8 reg_idx);
+
+void sxe_hw_mac_txrx_enable(struct sxe_hw *hw);
+
+void sxe_hw_rx_cap_switch_on(struct sxe_hw *hw);
+
+void sxe_hw_mac_pad_enable(struct sxe_hw *hw);
+
+bool sxe_hw_is_link_state_up(struct sxe_hw *hw);
+
+u32 sxe_hw_link_speed_get(struct sxe_hw *hw);
+
+void sxe_hw_fc_base_init(struct sxe_hw *hw);
+
+void sxe_hw_stats_get(struct sxe_hw *hw, struct sxe_mac_stats *stats);
+
+void sxe_hw_rxq_stat_map_set(struct sxe_hw *hw, u8 idx, u32 value);
+
+void sxe_hw_txq_stat_map_set(struct sxe_hw *hw, u8 idx, u32 value);
+
+void sxe_hw_uc_addr_clear(struct sxe_hw *hw);
+
+void sxe_hw_vt_disable(struct sxe_hw *hw);
+
+void sxe_hw_stats_regs_clean(struct sxe_hw *hw);
+
+void sxe_hw_vlan_ext_type_set(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_link_speed_set(struct sxe_hw *hw, u32 speed);
+
+void sxe_hw_crc_configure(struct sxe_hw *hw);
+
+void sxe_hw_vlan_filter_array_clear(struct sxe_hw *hw);
+
+void sxe_hw_no_snoop_disable(struct sxe_hw *hw);
+
+void sxe_hw_dcb_rate_limiter_clear(struct sxe_hw *hw, u8 ring_max);
+
+s32 sxe_hw_pfc_enable(struct sxe_hw *hw, u8 tc_idx);
+
+void sxe_hw_dcb_vmdq_mq_configure(struct sxe_hw *hw, u8 num_pools);
+
+void sxe_hw_dcb_vmdq_default_pool_configure(struct sxe_hw *hw,
+ u8 default_pool_enabled,
+ u8 default_pool_idx);
+
+void sxe_hw_dcb_vmdq_up_2_tc_configure(struct sxe_hw *hw,
+ u8 *tc_arr);
+
+void sxe_hw_dcb_vmdq_vlan_configure(struct sxe_hw *hw,
+ u8 num_pools);
+
+void sxe_hw_dcb_vmdq_pool_configure(struct sxe_hw *hw,
+ u8 pool_idx, u16 vlan_id,
+ u64 pools_map);
+
+void sxe_hw_dcb_rx_configure(struct sxe_hw *hw, bool is_vt_on,
+ u8 sriov_active, u8 pg_tcs);
+
+void sxe_hw_dcb_tx_configure(struct sxe_hw *hw, bool is_vt_on, u8 pg_tcs);
+
+void sxe_hw_pool_xmit_enable(struct sxe_hw *hw, u16 reg_idx, u8 pool_num);
+
+void sxe_hw_rx_pkt_buf_size_set(struct sxe_hw *hw, u8 tc_idx, u16 pbsize);
+
+void sxe_hw_dcb_tc_stats_configure(struct sxe_hw *hw,
+ u8 tc_count, bool vmdq_active);
+
+void sxe_hw_dcb_rx_bw_alloc_configure(struct sxe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type,
+ u8 *prio_tc,
+ u8 max_priority);
+
+void sxe_hw_dcb_tx_desc_bw_alloc_configure(struct sxe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type);
+
+void sxe_hw_dcb_tx_data_bw_alloc_configure(struct sxe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type,
+ u8 *prio_tc,
+ u8 max_priority);
+
+void sxe_hw_dcb_pfc_configure(struct sxe_hw *hw,
+ u8 pfc_en, u8 *prio_tc,
+ u8 max_priority);
+
+void sxe_hw_vmdq_mq_configure(struct sxe_hw *hw);
+
+void sxe_hw_vmdq_default_pool_configure(struct sxe_hw *hw,
+ u8 default_pool_enabled,
+ u8 default_pool_idx);
+
+void sxe_hw_vmdq_vlan_configure(struct sxe_hw *hw,
+ u8 num_pools, u32 rx_mode);
+
+void sxe_hw_vmdq_pool_configure(struct sxe_hw *hw,
+ u8 pool_idx, u16 vlan_id,
+ u64 pools_map);
+
+void sxe_hw_vmdq_loopback_configure(struct sxe_hw *hw);
+
+void sxe_hw_tx_multi_queue_configure(struct sxe_hw *hw,
+ bool vmdq_enable, bool sriov_enable, u16 pools_num);
+
+void sxe_hw_dcb_max_mem_window_set(struct sxe_hw *hw, u32 value);
+
+void sxe_hw_dcb_tx_ring_rate_factor_set(struct sxe_hw *hw,
+ u32 ring_idx, u32 rate);
+
+void sxe_hw_mbx_init(struct sxe_hw *hw);
+
+void sxe_hw_vt_ctrl_cfg(struct sxe_hw *hw, u8 num_vfs);
+
+void sxe_hw_tx_pool_bitmap_set(struct sxe_hw *hw,
+ u8 reg_idx, u32 bitmap);
+
+void sxe_hw_rx_pool_bitmap_set(struct sxe_hw *hw,
+ u8 reg_idx, u32 bitmap);
+
+void sxe_hw_vt_pool_loopback_switch(struct sxe_hw *hw,
+ bool is_enable);
+
+void sxe_hw_mac_pool_clear(struct sxe_hw *hw, u8 rar_idx);
+
+s32 sxe_hw_uc_addr_pool_enable(struct sxe_hw *hw,
+ u8 rar_idx, u8 pool_idx);
+
+void sxe_hw_pcie_vt_mode_set(struct sxe_hw *hw, u32 value);
+
+u32 sxe_hw_pcie_vt_mode_get(struct sxe_hw *hw);
+
+void sxe_hw_pool_mac_anti_spoof_set(struct sxe_hw *hw,
+ u8 vf_idx, bool status);
+
+void sxe_rx_fc_threshold_set(struct sxe_hw *hw);
+
+void sxe_hw_rx_multi_ring_configure(struct sxe_hw *hw,
+ u8 tcs, bool is_4Q,
+ bool sriov_enable);
+
+void sxe_hw_rx_queue_mode_set(struct sxe_hw *hw, u32 mrqc);
+
+bool sxe_hw_vf_rst_check(struct sxe_hw *hw, u8 vf_idx);
+
+bool sxe_hw_vf_req_check(struct sxe_hw *hw, u8 vf_idx);
+
+bool sxe_hw_vf_ack_check(struct sxe_hw *hw, u8 vf_idx);
+
+s32 sxe_hw_rcv_msg_from_vf(struct sxe_hw *hw, u32 *msg,
+ u16 msg_len, u16 index);
+
+s32 sxe_hw_send_msg_to_vf(struct sxe_hw *hw, u32 *msg,
+ u16 msg_len, u16 index);
+
+void sxe_hw_mbx_mem_clear(struct sxe_hw *hw, u8 vf_idx);
+
+u32 sxe_hw_pool_rx_mode_get(struct sxe_hw *hw, u16 pool_idx);
+
+void sxe_hw_pool_rx_mode_set(struct sxe_hw *hw,
+ u32 vmolr, u16 pool_idx);
+
+void sxe_hw_tx_vlan_tag_clear(struct sxe_hw *hw, u32 vf);
+
+u32 sxe_hw_rx_pool_bitmap_get(struct sxe_hw *hw, u8 reg_idx);
+
+u32 sxe_hw_tx_pool_bitmap_get(struct sxe_hw *hw, u8 reg_idx);
+
+void sxe_hw_pool_rx_ring_drop_enable(struct sxe_hw *hw, u8 vf_idx,
+ u16 pf_vlan, u8 ring_per_pool);
+
+void sxe_hw_spoof_count_enable(struct sxe_hw *hw,
+ u8 reg_idx, u8 bit_index);
+
+u32 sxe_hw_tx_vlan_insert_get(struct sxe_hw *hw, u32 vf);
+
+bool sxe_hw_vt_status(struct sxe_hw *hw);
+
+s32 sxe_hw_vlvf_slot_find(struct sxe_hw *hw, u32 vlan, bool vlvf_bypass);
+
+u32 sxe_hw_vlan_pool_filter_read(struct sxe_hw *hw, u16 reg_index);
+
+void sxe_hw_mirror_vlan_set(struct sxe_hw *hw, u8 idx, u32 lsb, u32 msb);
+
+void sxe_hw_mirror_virtual_pool_set(struct sxe_hw *hw, u8 idx, u32 lsb, u32 msb);
+
+void sxe_hw_mirror_ctl_set(struct sxe_hw *hw, u8 rule_id,
+ u8 mirror_type, u8 dst_pool, bool on);
+
+void sxe_hw_mirror_rule_clear(struct sxe_hw *hw, u8 rule_id);
+
+u32 sxe_hw_mac_max_frame_get(struct sxe_hw *hw);
+
+void sxe_hw_mta_hash_table_update(struct sxe_hw *hw,
+ u8 reg_idx, u8 bit_idx);
+
+void sxe_hw_vf_queue_drop_enable(struct sxe_hw *hw, u8 vf_idx,
+ u8 ring_per_pool);
+void sxe_hw_fc_mac_addr_set(struct sxe_hw *hw, u8 *mac_addr);
+
+void sxe_hw_macsec_enable(struct sxe_hw *hw, bool is_up, u32 tx_mode,
+ u32 rx_mode, u32 pn_trh);
+
+void sxe_hw_macsec_disable(struct sxe_hw *hw, bool is_up);
+
+void sxe_hw_macsec_txsc_set(struct sxe_hw *hw, u32 scl, u32 sch);
+
+void sxe_hw_macsec_rxsc_set(struct sxe_hw *hw, u32 scl, u32 sch, u16 pi);
+
+void sxe_hw_macsec_tx_sa_configure(struct sxe_hw *hw, u8 sa_idx,
+ u8 an, u32 pn, u32 *keys);
+
+void sxe_hw_macsec_rx_sa_configure(struct sxe_hw *hw, u8 sa_idx,
+ u8 an, u32 pn, u32 *keys);
+void sxe_hw_vt_pool_loopback_switch(struct sxe_hw *hw,
+ bool is_enable);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+void sxe_hw_fnav_rx_pkt_buf_size_reset(struct sxe_hw *hw, u32 pbsize);
+
+void sxe_hw_fnav_flex_mask_set(struct sxe_hw *hw, u16 flex_mask);
+
+void sxe_hw_fnav_ipv6_mask_set(struct sxe_hw *hw, u16 src_mask, u16 dst_mask);
+
+s32 sxe_hw_fnav_flex_offset_set(struct sxe_hw *hw, u16 offset);
+
+void sxe_hw_fivetuple_filter_add(struct rte_eth_dev *dev,
+ struct sxe_fivetuple_node_info *filter);
+
+void sxe_hw_fivetuple_filter_del(struct sxe_hw *hw, u16 reg_index);
+
+void sxe_hw_ethertype_filter_add(struct sxe_hw *hw,
+ u8 reg_index, u16 ethertype, u16 queue);
+
+void sxe_hw_ethertype_filter_del(struct sxe_hw *hw, u8 filter_type);
+
+void sxe_hw_syn_filter_add(struct sxe_hw *hw, u16 queue, u8 priority);
+
+void sxe_hw_syn_filter_del(struct sxe_hw *hw);
+
+void sxe_hw_rss_key_set_all(struct sxe_hw *hw, u32 *rss_key);
+#endif
+
+void sxe_hw_fnav_enable(struct sxe_hw *hw, u32 fnavctrl);
+
+s32 sxe_hw_fnav_sample_rules_table_reinit(struct sxe_hw *hw);
+
+s32 sxe_hw_fnav_specific_rule_add(struct sxe_hw *hw,
+ union sxe_fnav_rule_info *input,
+ u16 soft_id, u8 queue);
+
+s32 sxe_hw_fnav_specific_rule_del(struct sxe_hw *hw,
+ union sxe_fnav_rule_info *input,
+ u16 soft_id);
+
+void sxe_hw_fnav_sample_rule_configure(struct sxe_hw *hw,
+ u8 flow_type, u32 hash_value, u8 queue);
+
+void sxe_hw_rss_redir_tbl_reg_write(struct sxe_hw *hw,
+ u16 reg_idx, u32 value);
+
+u32 sxe_hw_fnav_port_mask_get(__be16 src_port_mask, __be16 dst_port_mask);
+
+s32 sxe_hw_fnav_specific_rule_mask_set(struct sxe_hw *hw,
+ union sxe_fnav_rule_info *input_mask);
+
+s32 sxe_hw_vlan_filter_configure(struct sxe_hw *hw,
+ u32 vid, u32 pool,
+ bool vlan_on, bool vlvf_bypass);
+
+void sxe_hw_ptp_systime_init(struct sxe_hw *hw);
+
+#endif
+#endif
new file mode 100644
@@ -0,0 +1,267 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef _SXE_LOGS_H_
+#define _SXE_LOGS_H_
+
+#include <stdio.h>
+#include <sys/time.h>
+#include <pthread.h>
+
+#include "sxe_types.h"
+
+#define LOG_FILE_NAME_LEN 256
+#define LOG_FILE_PATH "/var/log/"
+#define LOG_FILE_PREFIX "sxepmd.log"
+
+extern s32 sxe_log_init;
+extern s32 sxe_log_rx;
+extern s32 sxe_log_tx;
+extern s32 sxe_log_drv;
+extern s32 sxe_log_hw;
+
+#define INIT sxe_log_init
+#define RX sxe_log_rx
+#define TX sxe_log_tx
+#define HW sxe_log_hw
+#define DRV sxe_log_drv
+
+#define UNUSED(x) (void)(x)
+
+#define TIME(log_time) \
+ do { \
+ struct timeval tv; \
+ struct tm *td; \
+ gettimeofday(&tv, NULL); \
+ td = localtime(&tv.tv_sec); \
+ strftime(log_time, sizeof(log_time), "%Y-%m-%d-%H:%M:%S", td); \
+ } while (0)
+
+#define filename_printf(x) (strrchr((x), '/')?strrchr((x), '/')+1:(x))
+
+#ifdef SXE_DPDK_DEBUG
+#define PMD_LOG_DEBUG(logtype, fmt, ...) \
+ do { \
+ s8 log_time[40]; \
+ TIME(log_time); \
+ rte_log(RTE_LOG_DEBUG, logtype, \
+ "[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+ "DEBUG", log_time, pthread_self(), \
+ filename_printf(__FILE__), __LINE__, \
+ __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#define PMD_LOG_INFO(logtype, fmt, ...) \
+ do { \
+ s8 log_time[40]; \
+ TIME(log_time); \
+ rte_log(RTE_LOG_INFO, logtype, \
+ "[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+ "INFO", log_time, pthread_self(), \
+ filename_printf(__FILE__), __LINE__, \
+ __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#define PMD_LOG_NOTICE(logtype, fmt, ...) \
+ do { \
+ s8 log_time[40]; \
+ TIME(log_time); \
+ rte_log(RTE_LOG_NOTICE, logtype, \
+ "[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+ "NOTICE", log_time, pthread_self(), \
+ filename_printf(__FILE__), __LINE__, \
+ __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#define PMD_LOG_WARN(logtype, fmt, ...) \
+ do { \
+ s8 log_time[40]; \
+ TIME(log_time); \
+ rte_log(RTE_LOG_WARNING, logtype, \
+ "[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+ "WARN", log_time, pthread_self(), \
+ filename_printf(__FILE__), __LINE__, \
+ __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#define PMD_LOG_ERR(logtype, fmt, ...) \
+ do { \
+ s8 log_time[40]; \
+ TIME(log_time); \
+ rte_log(RTE_LOG_ERR, logtype, \
+ "[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+ "ERR", log_time, pthread_self(), \
+ filename_printf(__FILE__), __LINE__, \
+ __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#define PMD_LOG_CRIT(logtype, fmt, ...) \
+ do { \
+ s8 log_time[40]; \
+ TIME(log_time); \
+ rte_log(RTE_LOG_CRIT, logtype, \
+ "[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+ "CRIT", log_time, pthread_self(), \
+ filename_printf(__FILE__), __LINE__, \
+ __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#define PMD_LOG_ALERT(logtype, fmt, ...) \
+ do { \
+ s8 log_time[40]; \
+ TIME(log_time); \
+ rte_log(RTE_LOG_ALERT, logtype, \
+ "[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+ "ALERT", log_time, pthread_self(), \
+ filename_printf(__FILE__), __LINE__, \
+ __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#define PMD_LOG_EMERG(logtype, fmt, ...) \
+ do { \
+ s8 log_time[40]; \
+ TIME(log_time); \
+ rte_log(RTE_LOG_EMERG, logtype, \
+ "[%s][%s][%ld]%s:%d:%s: " fmt "\n", \
+ "EMERG", log_time, pthread_self(), \
+ filename_printf(__FILE__), __LINE__, \
+ __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#else
+#define PMD_LOG_DEBUG(logtype, fmt, ...) \
+ rte_log(RTE_LOG_DEBUG, logtype, "%s(): " \
+ fmt "\n", __func__, ##__VA_ARGS__)
+
+#define PMD_LOG_INFO(logtype, fmt, ...) \
+ rte_log(RTE_LOG_INFO, logtype, "%s(): " \
+ fmt "\n", __func__, ##__VA_ARGS__)
+
+#define PMD_LOG_NOTICE(logtype, fmt, ...) \
+ rte_log(RTE_LOG_NOTICE, logtype, "%s(): " \
+ fmt "\n", __func__, ##__VA_ARGS__)
+
+#define PMD_LOG_WARN(logtype, fmt, ...) \
+ rte_log(RTE_LOG_WARNING, logtype, "%s(): " \
+ fmt "\n", __func__, ##__VA_ARGS__)
+
+#define PMD_LOG_ERR(logtype, fmt, ...) \
+ rte_log(RTE_LOG_ERR, logtype, "%s(): " \
+ fmt "\n", __func__, ##__VA_ARGS__)
+
+#define PMD_LOG_CRIT(logtype, fmt, ...) \
+ rte_log(RTE_LOG_CRIT, logtype, "%s(): " \
+ fmt "\n", __func__, ##__VA_ARGS__)
+
+#define PMD_LOG_ALERT(logtype, fmt, ...) \
+ rte_log(RTE_LOG_ALERT, logtype, "%s(): " \
+ fmt "\n", __func__, ##__VA_ARGS__)
+
+#define PMD_LOG_EMERG(logtype, fmt, ...) \
+ rte_log(RTE_LOG_EMERG, logtype, "%s(): " \
+ fmt "\n", __func__, ##__VA_ARGS__)
+
+#endif
+
+#define PMD_INIT_FUNC_TRACE() PMD_LOG_DEBUG(INIT, " >>")
+
+#ifdef SXE_DPDK_DEBUG
+#define LOG_DEBUG(fmt, ...) \
+ PMD_LOG_DEBUG(DRV, fmt, ##__VA_ARGS__)
+
+#define LOG_INFO(fmt, ...) \
+ PMD_LOG_INFO(DRV, fmt, ##__VA_ARGS__)
+
+#define LOG_WARN(fmt, ...) \
+ PMD_LOG_WARN(DRV, fmt, ##__VA_ARGS__)
+
+#define LOG_ERROR(fmt, ...) \
+ PMD_LOG_ERR(DRV, fmt, ##__VA_ARGS__)
+
+#define LOG_DEBUG_BDF(fmt, ...) \
+ PMD_LOG_DEBUG(HW, "[%s]" fmt, adapter->name, ##__VA_ARGS__)
+
+#define LOG_INFO_BDF(fmt, ...) \
+ PMD_LOG_INFO(HW, "[%s]" fmt, adapter->name, ##__VA_ARGS__)
+
+#define LOG_WARN_BDF(fmt, ...) \
+ PMD_LOG_WARN(HW, "[%s]" fmt, adapter->name, ##__VA_ARGS__)
+
+#define LOG_ERROR_BDF(fmt, ...) \
+ PMD_LOG_ERR(HW, "[%s]" fmt, adapter->name, ##__VA_ARGS__)
+
+#else
+#define LOG_DEBUG(fmt, ...)
+#define LOG_INFO(fmt, ...)
+#define LOG_WARN(fmt, ...)
+#define LOG_ERROR(fmt, ...)
+#define LOG_DEBUG_BDF(fmt, ...) UNUSED(adapter)
+#define LOG_INFO_BDF(fmt, ...) UNUSED(adapter)
+#define LOG_WARN_BDF(fmt, ...) UNUSED(adapter)
+#define LOG_ERROR_BDF(fmt, ...) UNUSED(adapter)
+#endif
+
+#ifdef SXE_DPDK_DEBUG
+#define LOG_DEV_DEBUG(fmt, ...) \
+ do { \
+ UNUSED(adapter); \
+ LOG_DEBUG_BDF(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define LOG_DEV_INFO(fmt, ...) \
+ do { \
+ UNUSED(adapter); \
+ LOG_INFO_BDF(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define LOG_DEV_WARN(fmt, ...) \
+ do { \
+ UNUSED(adapter); \
+ LOG_WARN_BDF(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define LOG_DEV_ERR(fmt, ...) \
+ do { \
+ UNUSED(adapter); \
+ LOG_ERROR_BDF(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define LOG_MSG_DEBUG(msglvl, fmt, ...) \
+ do { \
+ UNUSED(adapter); \
+ LOG_DEBUG_BDF(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define LOG_MSG_INFO(msglvl, fmt, ...) \
+ do { \
+ UNUSED(adapter); \
+ LOG_INFO_BDF(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define LOG_MSG_WARN(msglvl, fmt, ...) \
+ do { \
+ UNUSED(adapter); \
+ LOG_WARN_BDF(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define LOG_MSG_ERR(msglvl, fmt, ...) \
+ do { \
+ UNUSED(adapter); \
+ LOG_ERROR_BDF(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#else
+#define LOG_DEV_DEBUG(fmt, ...) UNUSED(adapter)
+#define LOG_DEV_INFO(fmt, ...) UNUSED(adapter)
+#define LOG_DEV_WARN(fmt, ...) UNUSED(adapter)
+#define LOG_DEV_ERR(fmt, ...) UNUSED(adapter)
+#define LOG_MSG_DEBUG(msglvl, fmt, ...) UNUSED(adapter)
+#define LOG_MSG_INFO(msglvl, fmt, ...) UNUSED(adapter)
+#define LOG_MSG_WARN(msglvl, fmt, ...) UNUSED(adapter)
+#define LOG_MSG_ERR(msglvl, fmt, ...) UNUSED(adapter)
+#endif
+
+void sxe_log_stream_init(void);
+
+#endif
new file mode 100644
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+
+#include "sxe_types.h"
+#include "sxe_offload_common.h"
+#include "sxe_compat_version.h"
+
+u64 __sxe_rx_queue_offload_capa_get(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ u64 offloads = 0;
+
+ offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+
+ return offloads;
+}
+
+u64 __sxe_rx_port_offload_capa_get(struct rte_eth_dev *dev)
+{
+ u64 rx_offload_capa;
+
+ rx_offload_capa = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+#ifdef DEV_RX_JUMBO_FRAME
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+#endif
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+ RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
+ if (!RTE_ETH_DEV_SRIOV(dev).active)
+ rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
+
+ return rx_offload_capa;
+}
+
+u64 __sxe_tx_port_offload_capa_get(struct rte_eth_dev *dev)
+{
+ u64 tx_offload_capa;
+ RTE_SET_USED(dev);
+
+ tx_offload_capa =
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
+
+ return tx_offload_capa;
+}
+
new file mode 100644
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_OFFLOAD_COMMON_H__
+#define __SXE_OFFLOAD_COMMON_H__
+
+u64 __sxe_rx_queue_offload_capa_get(struct rte_eth_dev *dev);
+
+u64 __sxe_rx_port_offload_capa_get(struct rte_eth_dev *dev);
+
+u64 __sxe_tx_port_offload_capa_get(struct rte_eth_dev *dev);
+
+#endif
+
new file mode 100644
@@ -0,0 +1,439 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include "sxe_dpdk_version.h"
+#include "sxe_compat_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_bus_pci.h>
+#elif defined DPDK_21_11_5
+#include <ethdev_driver.h>
+#include <rte_bus_pci.h>
+#else
+#include <ethdev_driver.h>
+#include <bus_pci_driver.h>
+#endif
+
+#include "sxe_rx.h"
+#include "sxe_tx.h"
+#include "sxe_logs.h"
+#include "sxe_regs.h"
+#include "sxevf_regs.h"
+#include "sxe.h"
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#include "sxe_vec_common.h"
+#include <rte_vect.h>
+#endif
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+#include "sxevf.h"
+#endif
+#include "sxe_queue_common.h"
+#include "sxe_queue.h"
+
+static void sxe_tx_queues_clear(struct rte_eth_dev *dev)
+{
+ u16 i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct sxe_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (txq != NULL && txq->ops != NULL) {
+ txq->ops->mbufs_release(txq);
+ txq->ops->init(txq);
+ }
+ }
+
+}
+
+static void sxe_rx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed)
+{
+ u16 i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct sxe_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (rxq != NULL) {
+ sxe_rx_queue_mbufs_free(rxq);
+ sxe_rx_queue_init(rx_batch_alloc_allowed, rxq);
+ }
+ }
+
+}
+
+s32 __rte_cold __sxe_rx_queue_setup(struct rx_setup *rx_setup, bool is_vf)
+{
+ struct rte_eth_dev *dev = rx_setup->dev;
+ const struct rte_eth_rxconf *rx_conf = rx_setup->rx_conf;
+ u16 queue_idx = rx_setup->queue_idx;
+ u32 socket_id = rx_setup->socket_id;
+ u16 desc_num = rx_setup->desc_num;
+ struct rte_mempool *mp = rx_setup->mp;
+ const struct rte_memzone *rx_mz;
+ struct sxe_rx_queue *rxq;
+ u16 len;
+ u64 offloads;
+ s32 ret = 0;
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+ struct sxe_adapter *pf_adapter = dev->data->dev_private;
+ struct sxevf_adapter *vf_adapter = dev->data->dev_private;
+#endif
+
+ PMD_INIT_FUNC_TRACE();
+
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
+ if (desc_num % SXE_RX_DESC_RING_ALIGN != 0 ||
+ (desc_num > SXE_MAX_RING_DESC) ||
+ (desc_num < SXE_MIN_RING_DESC)) {
+ PMD_LOG_ERR(INIT, "desc_num %u error", desc_num);
+ ret = -EINVAL;
+ goto l_end;
+ }
+
+ if (dev->data->rx_queues[queue_idx] != NULL) {
+ sxe_rx_queue_free(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct sxe_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL) {
+ PMD_LOG_ERR(INIT, "rxq malloc mem failed");
+ ret = -ENOMEM;
+ goto l_end;
+ }
+
+ rxq->mb_pool = mp;
+ rxq->ring_depth = desc_num;
+ rxq->batch_alloc_size = rx_conf->rx_free_thresh;
+ rxq->queue_id = queue_idx;
+ rxq->reg_idx = (u16)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+ queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
+ rxq->port_id = dev->data->port_id;
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
+
+ rxq->drop_en = rx_conf->rx_drop_en;
+ rxq->deferred_start = rx_conf->rx_deferred_start;
+ rxq->offloads = offloads;
+
+ rxq->pkt_type_mask = SXE_PACKET_TYPE_MASK;
+
+ rx_mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ SXE_RX_RING_SIZE, SXE_ALIGN, socket_id);
+ if (rx_mz == NULL) {
+ PMD_LOG_ERR(INIT, "rxq malloc desc mem failed");
+ sxe_rx_queue_free(rxq);
+ ret = -ENOMEM;
+ goto l_end;
+ }
+
+ rxq->mz = rx_mz;
+
+ memset(rx_mz->addr, 0, SXE_RX_RING_SIZE);
+
+ if (is_vf)
+ rxq->rdt_reg_addr = (volatile u32 *)(rx_setup->reg_base_addr +
+ SXE_VFRDT(rxq->reg_idx));
+ else
+ rxq->rdt_reg_addr = (volatile u32 *)(rx_setup->reg_base_addr +
+ SXE_RDT(rxq->reg_idx));
+
+ rxq->base_addr = rx_mz->iova;
+
+ rxq->desc_ring = (union sxe_rx_data_desc *)rx_mz->addr;
+
+ if (!sxe_check_is_rx_batch_alloc_support(rxq)) {
+ PMD_LOG_DEBUG(INIT, "queue[%d] doesn't support rx batch alloc "
+ "- canceling the feature for the whole port[%d]",
+ rxq->queue_id, rxq->port_id);
+ *rx_setup->rx_batch_alloc_allowed = false;
+ }
+
+ len = desc_num;
+ if (*rx_setup->rx_batch_alloc_allowed)
+ len += RTE_PMD_SXE_MAX_RX_BURST;
+
+ rxq->buffer_ring = rte_zmalloc_socket("rxq->sw_ring",
+ sizeof(struct sxe_rx_buffer) * len,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq->buffer_ring) {
+ PMD_LOG_ERR(INIT, "rxq malloc buffer mem failed");
+ sxe_rx_queue_free(rxq);
+ ret = -ENOMEM;
+ goto l_end;
+ }
+
+ rxq->sc_buffer_ring =
+ rte_zmalloc_socket("rxq->sw_sc_ring",
+ sizeof(struct sxe_rx_buffer) * len,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq->sc_buffer_ring) {
+ PMD_LOG_ERR(INIT, "rxq malloc sc buffer mem failed");
+ sxe_rx_queue_free(rxq);
+ ret = -ENOMEM;
+ goto l_end;
+ }
+
+ PMD_LOG_DEBUG(INIT, "buffer_ring=%p sc_buffer_ring=%p desc_ring=%p "
+ "dma_addr=0x%"SXE_PRIX64,
+ rxq->buffer_ring, rxq->sc_buffer_ring, rxq->desc_ring,
+ rxq->base_addr);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+ if (!rte_is_power_of_2(desc_num)) {
+ PMD_LOG_DEBUG(INIT, "queue[%d] doesn't meet Vector Rx "
+ "preconditions - canceling the feature for "
+ "the whole port[%d]",
+ rxq->queue_id, rxq->port_id);
+ if (is_vf)
+ vf_adapter->rx_vec_allowed = false;
+ else
+ pf_adapter->rx_vec_allowed = false;
+
+ } else {
+ sxe_rxq_vec_setup(rxq);
+ }
+#endif
+
+ dev->data->rx_queues[queue_idx] = rxq;
+
+ sxe_rx_queue_init(*rx_setup->rx_batch_alloc_allowed, rxq);
+
+l_end:
+ return ret;
+}
+
+int __rte_cold __sxe_tx_queue_setup(struct tx_setup *tx_setup, bool is_vf)
+{
+ s32 ret;
+ struct rte_eth_dev *dev = tx_setup->dev;
+ const struct rte_eth_txconf *tx_conf = tx_setup->tx_conf;
+ u16 tx_queue_id = tx_setup->queue_idx;
+ u32 socket_id = tx_setup->socket_id;
+ u16 ring_depth = tx_setup->desc_num;
+ struct sxe_tx_queue *txq;
+ u16 rs_thresh, free_thresh;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = sxe_txq_arg_validate(dev, ring_depth, &rs_thresh,
+ &free_thresh, tx_conf);
+ if (ret) {
+ PMD_LOG_ERR(INIT, "tx queue[%d] arg validate failed", tx_queue_id);
+ goto l_end;
+ } else {
+ PMD_LOG_INFO(INIT, "tx queue[%d] ring_depth=%d, "
+ "rs_thresh=%d, free_thresh=%d", tx_queue_id,
+ ring_depth, rs_thresh, free_thresh);
+ }
+
+ txq = sxe_tx_queue_alloc(dev, tx_queue_id, ring_depth, socket_id);
+ if (!txq) {
+ PMD_LOG_ERR(INIT, "tx queue[%d] resource alloc failed", tx_queue_id);
+ ret = -ENOMEM;
+ goto l_end;
+ }
+
+ txq->ops = sxe_tx_default_ops_get();
+ txq->ring_depth = ring_depth;
+ txq->queue_idx = tx_queue_id;
+ txq->port_id = dev->data->port_id;
+ txq->pthresh = tx_conf->tx_thresh.pthresh;
+ txq->hthresh = tx_conf->tx_thresh.hthresh;
+ txq->wthresh = tx_conf->tx_thresh.wthresh;
+ txq->rs_thresh = rs_thresh;
+ txq->free_thresh = free_thresh;
+ txq->tx_deferred_start = tx_conf->tx_deferred_start;
+ txq->reg_idx = (u16)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+ tx_queue_id : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + tx_queue_id);
+ txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+ if (is_vf)
+ txq->tdt_reg_addr = (volatile u32 *)(tx_setup->reg_base_addr +
+ SXE_VFTDT(txq->reg_idx));
+ else
+ txq->tdt_reg_addr = (u32 *)(tx_setup->reg_base_addr +
+ SXE_TDT(txq->reg_idx));
+
+ PMD_LOG_INFO(INIT, "buffer_ring=%p desc_ring=%p dma_addr=0x%"PRIx64,
+ txq->buffer_ring, txq->desc_ring, (u64)txq->base_addr);
+ sxe_tx_function_set(dev, txq);
+
+ txq->ops->init(txq);
+
+ dev->data->tx_queues[tx_queue_id] = txq;
+
+l_end:
+ return ret;
+}
+
+void __sxe_rx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct sxe_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->ring_depth;
+
+ qinfo->conf.rx_free_thresh = rxq->batch_alloc_size;
+ qinfo->conf.rx_drop_en = rxq->drop_en;
+ qinfo->conf.rx_deferred_start = rxq->deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
+
+}
+
+void __sxe_tx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+ struct rte_eth_txq_info *q_info)
+{
+ struct sxe_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ q_info->nb_desc = txq->ring_depth;
+ q_info->conf.tx_thresh.pthresh = txq->pthresh;
+ q_info->conf.tx_thresh.hthresh = txq->hthresh;
+ q_info->conf.tx_thresh.wthresh = txq->wthresh;
+ q_info->conf.tx_free_thresh = txq->free_thresh;
+ q_info->conf.tx_rs_thresh = txq->rs_thresh;
+ q_info->conf.offloads = txq->offloads;
+ q_info->conf.tx_deferred_start = txq->tx_deferred_start;
+
+}
+
+s32 __sxe_tx_done_cleanup(void *tx_queue, u32 free_cnt)
+{
+ int ret;
+ struct sxe_tx_queue *txq = (struct sxe_tx_queue *)tx_queue;
+ if (txq->offloads == 0 &&
+ txq->rs_thresh >= RTE_PMD_SXE_MAX_TX_BURST) {
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+ if (txq->rs_thresh <= RTE_SXE_MAX_TX_FREE_BUF_SZ &&
+#ifndef DPDK_19_11_6
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128 &&
+#endif
+ (rte_eal_process_type() != RTE_PROC_PRIMARY ||
+ txq->buffer_ring_vec != NULL)) {
+ ret = sxe_tx_done_cleanup_vec(txq, free_cnt);
+ } else{
+ ret = sxe_tx_done_cleanup_simple(txq, free_cnt);
+ }
+#else
+ ret = sxe_tx_done_cleanup_simple(txq, free_cnt);
+#endif
+
+ } else {
+ ret = sxe_tx_done_cleanup_full(txq, free_cnt);
+ }
+
+ return ret;
+}
+
+s32 __rte_cold __sxe_rx_queue_mbufs_alloc(struct sxe_rx_queue *rxq)
+{
+ struct sxe_rx_buffer *buf_ring = rxq->buffer_ring;
+ s32 ret = 0;
+ u64 dma_addr;
+ u16 i;
+
+ for (i = 0; i < rxq->ring_depth; i++) {
+ volatile union sxe_rx_data_desc *desc;
+ struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+
+ if (mbuf == NULL) {
+ PMD_LOG_ERR(DRV, "rx mbuf alloc failed queue_id=%u",
+ (u16)rxq->queue_id);
+ ret = -ENOMEM;
+ goto l_end;
+ }
+
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->port = rxq->port_id;
+
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+ desc = &rxq->desc_ring[i];
+ desc->read.hdr_addr = 0;
+ desc->read.pkt_addr = dma_addr;
+ buf_ring[i].mbuf = mbuf;
+ }
+
+l_end:
+ return ret;
+}
+
+void __rte_cold __sxe_rx_queue_free(struct sxe_rx_queue *rxq)
+{
+ if (rxq != NULL) {
+ sxe_rx_queue_mbufs_free(rxq);
+ rte_free(rxq->buffer_ring);
+ rte_free(rxq->sc_buffer_ring);
+ rte_memzone_free(rxq->mz);
+ rte_free(rxq);
+ }
+}
+
+void __rte_cold __sxe_tx_queue_free(struct sxe_tx_queue *txq)
+{
+ if (txq != NULL && txq->ops != NULL) {
+ txq->ops->mbufs_release(txq);
+ txq->ops->buffer_ring_free(txq);
+ rte_memzone_free(txq->mz);
+ rte_free(txq);
+ }
+
+}
+
+void __rte_cold __sxe_txrx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ sxe_tx_queues_clear(dev);
+
+ sxe_rx_queues_clear(dev, rx_batch_alloc_allowed);
+
+}
+
+void __sxe_queues_free(struct rte_eth_dev *dev)
+{
+ unsigned int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ __sxe_rx_queue_free(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ __sxe_tx_queue_free(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+
+}
+
+void __sxe_secondary_proc_init(struct rte_eth_dev *eth_dev,
+ bool rx_batch_alloc_allowed, bool *rx_vec_allowed)
+{
+ struct sxe_tx_queue *txq;
+ if (eth_dev->data->tx_queues) {
+ txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1];
+ sxe_tx_function_set(eth_dev, txq);
+ } else {
+ PMD_LOG_NOTICE(INIT, "No TX queues configured yet. "
+ "Using default TX function.");
+ }
+
+ sxe_rx_function_set(eth_dev, rx_batch_alloc_allowed, rx_vec_allowed);
+}
+
new file mode 100644
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_QUEUE_COMMON_H__
+#define __SXE_QUEUE_COMMON_H__
+
+#include "sxe_types.h"
+#include "sxe_compat_platform.h"
+#include "sxe_compat_version.h"
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+
+#define RTE_PMD_SXE_MAX_RX_BURST 32
+
+enum sxe_ctxt_num {
+ SXE_CTXT_DESC_0 = 0,
+ SXE_CTXT_DESC_1 = 1,
+ SXE_CTXT_DESC_NUM = 2,
+};
+
+struct rx_setup {
+ struct rte_eth_dev *dev;
+ u16 queue_idx;
+ u16 desc_num;
+ u32 socket_id;
+ const struct rte_eth_rxconf *rx_conf;
+ struct rte_mempool *mp;
+ u8 __iomem *reg_base_addr;
+ bool *rx_batch_alloc_allowed;
+};
+
+struct tx_setup {
+ struct rte_eth_dev *dev;
+ u16 queue_idx;
+ u16 desc_num;
+ u32 socket_id;
+ const struct rte_eth_txconf *tx_conf;
+ u8 __iomem *reg_base_addr;
+};
+
+union sxe_tx_data_desc {
+ struct {
+ __le64 buffer_addr;
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd;
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+struct sxe_rx_buffer {
+ struct rte_mbuf *mbuf;
+};
+
+struct sxe_rx_queue_stats {
+ u64 csum_err;
+};
+
+union sxe_rx_data_desc {
+ struct {
+ __le64 pkt_addr;
+ __le64 hdr_addr;
+ } read;
+ struct {
+ struct {
+ union {
+ __le32 data;
+ struct {
+ __le16 pkt_info;
+ __le16 hdr_info;
+ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss;
+ struct {
+ __le16 ip_id;
+ __le16 csum;
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error;
+ __le16 length;
+ __le16 vlan;
+ } upper;
+ } wb;
+};
+
+struct sxe_tx_buffer {
+ struct rte_mbuf *mbuf;
+ u16 next_id;
+ u16 last_id;
+};
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+struct sxe_tx_buffer_vec {
+ struct rte_mbuf *mbuf;
+};
+#endif
+
+union sxe_tx_offload {
+ u64 data[2];
+ struct {
+ u64 l2_len:7;
+ u64 l3_len:9;
+ u64 l4_len:8;
+ u64 tso_segsz:16;
+ u64 vlan_tci:16;
+
+ u64 outer_l3_len:8;
+ u64 outer_l2_len:8;
+ };
+};
+
+struct sxe_ctxt_info {
+ u64 flags;
+ union sxe_tx_offload tx_offload;
+ union sxe_tx_offload tx_offload_mask;
+};
+
+struct sxe_tx_queue {
+ volatile union sxe_tx_data_desc *desc_ring;
+ u64 base_addr;
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+ union {
+ struct sxe_tx_buffer *buffer_ring;
+ struct sxe_tx_buffer_vec *buffer_ring_vec;
+ };
+#else
+ struct sxe_tx_buffer *buffer_ring;
+#endif
+ volatile u32 *tdt_reg_addr;
+ u16 ring_depth;
+ u16 next_to_use;
+ u16 free_thresh;
+
+ u16 rs_thresh;
+
+ u16 desc_used_num;
+ u16 next_to_clean;
+ u16 desc_free_num;
+ u16 next_dd;
+ u16 next_rs;
+ u16 queue_idx;
+ u16 reg_idx;
+ u16 port_id;
+ u8 pthresh;
+ u8 hthresh;
+
+ u8 wthresh;
+ u64 offloads;
+ u32 ctx_curr;
+ struct sxe_ctxt_info ctx_cache[SXE_CTXT_DESC_NUM];
+ const struct sxe_txq_ops *ops;
+ u8 tx_deferred_start;
+ const struct rte_memzone *mz;
+};
+
+struct sxe_rx_queue {
+ struct rte_mempool *mb_pool;
+ volatile union sxe_rx_data_desc *desc_ring;
+ u64 base_addr;
+ volatile u32 *rdt_reg_addr;
+ struct sxe_rx_buffer *buffer_ring;
+ struct sxe_rx_buffer *sc_buffer_ring;
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+ struct rte_mbuf *pkt_first_seg;
+ struct rte_mbuf *pkt_last_seg;
+ u64 mbuf_init_value;
+ u8 is_using_sse;
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM)
+ u16 realloc_num;
+ u16 realloc_start;
+#endif
+#endif
+ u16 ring_depth;
+ u16 processing_idx;
+ u16 hold_num;
+ u16 completed_pkts_num;
+ u16 next_ret_pkg;
+ u16 batch_alloc_trigger;
+
+ u16 batch_alloc_size;
+ u16 queue_id;
+ u16 reg_idx;
+ u16 pkt_type_mask;
+ u16 port_id;
+ u8 crc_len;
+ u8 drop_en;
+ u8 deferred_start;
+ u64 vlan_flags;
+ u64 offloads;
+ struct rte_mbuf fake_mbuf;
+ struct rte_mbuf *completed_ring[RTE_PMD_SXE_MAX_RX_BURST * 2];
+ const struct rte_memzone *mz;
+ struct sxe_rx_queue_stats rx_stats;
+};
+
+struct sxe_txq_ops {
+ void (*init)(struct sxe_tx_queue *txq);
+ void (*mbufs_release)(struct sxe_tx_queue *txq);
+ void (*buffer_ring_free)(struct sxe_tx_queue *txq);
+};
+
+s32 __rte_cold __sxe_rx_queue_setup(struct rx_setup *rx_setup, bool is_vf);
+
+int __rte_cold __sxe_tx_queue_setup(struct tx_setup *tx_setup, bool is_vf);
+
+void __sxe_rx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+ struct rte_eth_rxq_info *qinfo);
+
+void __sxe_tx_queue_info_get(struct rte_eth_dev *dev, u16 queue_id,
+ struct rte_eth_txq_info *q_info);
+
+s32 __sxe_tx_done_cleanup(void *tx_queue, u32 free_cnt);
+
+s32 __rte_cold __sxe_rx_queue_mbufs_alloc(struct sxe_rx_queue *rxq);
+
+void __rte_cold __sxe_tx_queue_free(struct sxe_tx_queue *txq);
+
+void sxe_rx_queue_free(struct sxe_rx_queue *rxq);
+
+void __rte_cold __sxe_rx_queue_free(struct sxe_rx_queue *rxq);
+
+void __rte_cold __sxe_txrx_queues_clear(struct rte_eth_dev *dev, bool rx_batch_alloc_allowed);
+
+void __sxe_queues_free(struct rte_eth_dev *dev);
+
+void __sxe_secondary_proc_init(struct rte_eth_dev *eth_dev,
+ bool rx_batch_alloc_allowed, bool *rx_vec_allowed);
+
+#endif
new file mode 100644
@@ -0,0 +1,350 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_mbuf.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+#include <rte_prefetch.h>
+#include <rte_malloc.h>
+
+#include "sxe.h"
+#include "sxe_rx.h"
+#include "sxe_logs.h"
+#include "sxe_hw.h"
+#include "sxe_queue_common.h"
+#include "sxe_vf.h"
+#include "sxe_errno.h"
+#include "sxe_irq.h"
+#include "sxe_rx_common.h"
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#include "sxe_vec_common.h"
+#include "rte_vect.h"
+#endif
+
+static inline void sxe_rx_resource_prefetch(u16 next_idx,
+ struct sxe_rx_buffer *buf_ring,
+ volatile union sxe_rx_data_desc *desc_ring)
+{
+ rte_sxe_prefetch(buf_ring[next_idx].mbuf);
+
+ if ((next_idx & 0x3) == 0) {
+ rte_sxe_prefetch(&desc_ring[next_idx]);
+ rte_sxe_prefetch(&buf_ring[next_idx]);
+ }
+
+}
+
+void __rte_cold __sxe_rx_function_set(struct rte_eth_dev *dev,
+ bool rx_batch_alloc_allowed, bool *rx_vec_allowed)
+{
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+ u16 i, is_using_sse;
+
+ if (sxe_rx_vec_condition_check(dev) ||
+ !rx_batch_alloc_allowed
+#ifndef DPDK_19_11_6
+ || rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128
+#endif
+ ) {
+ PMD_LOG_DEBUG(INIT, "Port[%d] doesn't meet Vector Rx "
+ "preconditions", dev->data->port_id);
+ *rx_vec_allowed = false;
+ }
+#else
+ UNUSED(rx_vec_allowed);
+#endif
+
+ if (dev->data->lro) {
+ if (rx_batch_alloc_allowed) {
+ PMD_LOG_DEBUG(INIT, "LRO is requested. Using a bulk "
+ "allocation version");
+ dev->rx_pkt_burst = sxe_batch_alloc_lro_pkts_recv;
+ } else {
+ PMD_LOG_DEBUG(INIT, "LRO is requested. Using a single "
+ "allocation version");
+ dev->rx_pkt_burst = sxe_single_alloc_lro_pkts_recv;
+ }
+ } else if (dev->data->scattered_rx) {
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+ if (*rx_vec_allowed) {
+ PMD_LOG_DEBUG(INIT, "Using Vector Scattered Rx "
+ "callback (port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = sxe_scattered_pkts_vec_recv;
+
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+
+ } else if (rx_batch_alloc_allowed) {
+#else
+ if (rx_batch_alloc_allowed) {
+#endif
+
+ PMD_LOG_DEBUG(INIT, "Using a Scattered with bulk "
+ "allocation callback (port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = sxe_batch_alloc_lro_pkts_recv;
+ } else {
+ PMD_LOG_DEBUG(INIT, "Using Regular (non-vector, "
+ "single allocation) "
+ "Scattered Rx callback "
+ "(port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = sxe_single_alloc_lro_pkts_recv;
+ }
+ }
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+ else if (*rx_vec_allowed) {
+ PMD_LOG_DEBUG(INIT, "Vector rx enabled, please make sure RX "
+ "burst size no less than %d (port=%d).",
+ SXE_DESCS_PER_LOOP,
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = sxe_pkts_vec_recv;
+ }
+#endif
+ else if (rx_batch_alloc_allowed) {
+ PMD_LOG_DEBUG(INIT, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function "
+ "will be used on port=%d.",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = sxe_batch_alloc_pkts_recv;
+ } else {
+ PMD_LOG_DEBUG(INIT, "Rx Burst Bulk Alloc Preconditions are not "
+ "satisfied, or Scattered Rx is requested "
+ "(port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = sxe_pkts_recv;
+ }
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+ is_using_sse =
+ (dev->rx_pkt_burst == sxe_scattered_pkts_vec_recv ||
+ dev->rx_pkt_burst == sxe_pkts_vec_recv);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct sxe_rx_queue *rxq = dev->data->rx_queues[i];
+
+ rxq->is_using_sse = is_using_sse;
+ }
+#endif
+
+}
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+s32 __sxe_rx_descriptor_done(void *rx_queue, u16 offset)
+{
+ volatile union sxe_rx_data_desc *desc;
+ struct sxe_rx_queue *rxq = rx_queue;
+ u32 index;
+ s32 is_done = 0;
+
+ LOG_DEBUG("check rx queue[%u], offset desc[%u]\n",
+ rxq->queue_id, offset);
+ if (unlikely(offset >= rxq->ring_depth)) {
+ LOG_DEBUG("offset=%u >= ring depth=%u\n",
+ offset, rxq->ring_depth);
+ goto l_end;
+ }
+
+ index = rxq->processing_idx + offset;
+ if (index >= rxq->ring_depth)
+ index -= rxq->ring_depth;
+
+ desc = &rxq->desc_ring[index];
+ is_done = !!(desc->wb.upper.status_error &
+ rte_cpu_to_le_32(SXE_RXDADV_STAT_DD));
+
+l_end:
+ return is_done;
+}
+#endif
+
+s32 __sxe_rx_descriptor_status(void *rx_queue, u16 offset)
+{
+ int ret = RTE_ETH_RX_DESC_AVAIL;
+ struct sxe_rx_queue *rxq = rx_queue;
+ volatile u32 *status;
+ u32 hold_num, desc;
+
+ if (unlikely(offset >= rxq->ring_depth)) {
+ LOG_DEBUG("rx queue[%u] get desc status err,"
+ "offset=%u >= ring_depth=%u\n",
+ rxq->queue_id, offset, rxq->ring_depth);
+ ret = -EINVAL;
+ goto l_end;
+ }
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#if defined(RTE_ARCH_X86)
+ if (rxq->is_using_sse)
+ hold_num = rxq->realloc_num;
+ else
+#endif
+#endif
+ hold_num = rxq->hold_num;
+ if (offset >= rxq->ring_depth - hold_num) {
+ ret = RTE_ETH_RX_DESC_UNAVAIL;
+ goto l_end;
+ }
+
+ desc = rxq->processing_idx + offset;
+ if (desc >= rxq->ring_depth)
+ desc -= rxq->ring_depth;
+
+ status = &rxq->desc_ring[desc].wb.upper.status_error;
+ if (*status & rte_cpu_to_le_32(SXE_RXDADV_STAT_DD))
+ ret = RTE_ETH_RX_DESC_DONE;
+
+l_end:
+ LOG_DEBUG("rx queue[%u] get desc status=%d\n", rxq->queue_id, ret);
+ return ret;
+}
+
+u16 __sxe_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,
+ u16 pkts_num)
+{
+ struct sxe_rx_queue *rxq = (struct sxe_rx_queue *)rx_queue;
+ volatile union sxe_rx_data_desc *desc_ring = rxq->desc_ring;
+ volatile union sxe_rx_data_desc *cur_desc;
+ struct sxe_rx_buffer *buff_ring = rxq->buffer_ring;
+ struct sxe_rx_buffer *cur_buf;
+ struct rte_mbuf *cur_mb;
+ struct rte_mbuf *new_mb;
+ union sxe_rx_data_desc rxd;
+ u16 processing_idx = rxq->processing_idx;
+ u64 dma_addr;
+ u32 staterr;
+ u32 pkt_info;
+ u16 done_num = 0;
+ u16 hold_num = 0;
+ u16 pkt_len;
+
+ while (done_num < pkts_num) {
+ cur_desc = &desc_ring[processing_idx];
+ staterr = cur_desc->wb.upper.status_error;
+ if (!(staterr & rte_cpu_to_le_32(SXE_RXDADV_STAT_DD)))
+ break;
+
+ rxd = *cur_desc;
+
+ LOG_DEBUG("port_id=%u queue_id=%u processing_idx=%u "
+ "staterr=0x%08x pkt_len=%u",
+ (unsigned int)rxq->port_id, (unsigned int) rxq->queue_id,
+ (unsigned int)processing_idx, (unsigned int) staterr,
+ (unsigned int)rte_le_to_cpu_16(rxd.wb.upper.length));
+
+ new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (new_mb == NULL) {
+ LOG_ERROR("RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", (unsigned int) rxq->port_id,
+ (unsigned int) rxq->queue_id);
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ hold_num++;
+ cur_buf = &buff_ring[processing_idx];
+ processing_idx++;
+ if (processing_idx == rxq->ring_depth)
+ processing_idx = 0;
+
+ sxe_rx_resource_prefetch(processing_idx, buff_ring, desc_ring);
+
+ cur_mb = cur_buf->mbuf;
+ cur_buf->mbuf = new_mb;
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(new_mb));
+ cur_desc->read.hdr_addr = 0;
+ cur_desc->read.pkt_addr = dma_addr;
+
+ cur_mb->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_packet_prefetch((char *)cur_mb->buf_addr + cur_mb->data_off);
+ cur_mb->nb_segs = 1;
+ cur_mb->next = NULL;
+ pkt_len = (u16)(rte_le_to_cpu_16(rxd.wb.upper.length) -
+ rxq->crc_len);
+ cur_mb->pkt_len = pkt_len;
+ cur_mb->data_len = pkt_len;
+
+ pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
+
+ sxe_rx_mbuf_common_header_fill(rxq, cur_mb, rxd, pkt_info, staterr);
+
+ rx_pkts[done_num++] = cur_mb;
+ }
+
+ rxq->processing_idx = processing_idx;
+
+ hold_num = (u16) (hold_num + rxq->hold_num);
+ if (hold_num > rxq->batch_alloc_size) {
+ LOG_DEBUG("port_id=%u queue_id=%u rx_tail=%u "
+ "num_hold=%u num_done=%u",
+ (unsigned int)rxq->port_id, (unsigned int)rxq->queue_id,
+ (unsigned int)processing_idx, (unsigned int)hold_num,
+ (unsigned int)done_num);
+ processing_idx = (u16)((processing_idx == 0) ?
+ (rxq->ring_depth - 1) : (processing_idx - 1));
+ SXE_PCI_REG_WC_WRITE(rxq->rdt_reg_addr, processing_idx);
+ hold_num = 0;
+ }
+
+ rxq->hold_num = hold_num;
+ return done_num;
+}
+
+const u32 *__sxe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ const u32 *ptypes = NULL;
+ static const u32 ptypes_arr[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_TUNNEL_IP,
+ RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == sxe_pkts_recv ||
+ dev->rx_pkt_burst == sxe_batch_alloc_pkts_recv ||
+ dev->rx_pkt_burst == sxe_single_alloc_lro_pkts_recv ||
+ dev->rx_pkt_burst == sxe_batch_alloc_lro_pkts_recv) {
+ ptypes = ptypes_arr;
+ goto l_end;
+ }
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+#if defined(RTE_ARCH_X86)
+ if (dev->rx_pkt_burst == sxe_pkts_vec_recv ||
+ dev->rx_pkt_burst == sxe_scattered_pkts_vec_recv) {
+ ptypes = ptypes_arr;
+ }
+#endif
+#endif
+
+l_end:
+ return ptypes;
+}
+
new file mode 100644
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_RX_COMMON_H__
+#define __SXE_RX_COMMON_H__
+
+#include "sxe_dpdk_version.h"
+
+void __rte_cold __sxe_rx_function_set(struct rte_eth_dev *dev,
+ bool rx_batch_alloc_allowed, bool *rx_vec_allowed);
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+s32 __sxe_rx_descriptor_done(void *rx_queue, u16 offset);
+#endif
+
+s32 __sxe_rx_descriptor_status(void *rx_queue, u16 offset);
+
+u16 __sxe_pkts_recv(void *rx_queue, struct rte_mbuf **rx_pkts,
+ u16 pkts_num);
+
+const u32 *__sxe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+
+#endif
+
new file mode 100644
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#else
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#endif
+#include <rte_net.h>
+
+#include "sxe_hw.h"
+#include "sxe_logs.h"
+#include "sxe_queue_common.h"
+#include "sxe_tx_common.h"
+
+int __sxe_tx_descriptor_status(void *tx_queue, u16 offset)
+{
+ int ret = RTE_ETH_TX_DESC_FULL;
+ u32 desc_idx;
+ struct sxe_tx_queue *txq = tx_queue;
+ volatile u32 *status;
+
+ if (unlikely(offset >= txq->ring_depth)) {
+ ret = -EINVAL;
+ goto l_end;
+ }
+
+ desc_idx = txq->next_to_use + offset;
+
+ desc_idx = ((desc_idx + txq->rs_thresh - 1) / txq->rs_thresh) * txq->rs_thresh;
+ if (desc_idx >= txq->ring_depth) {
+ desc_idx -= txq->ring_depth;
+ if (desc_idx >= txq->ring_depth)
+ desc_idx -= txq->ring_depth;
+ }
+
+ status = &txq->desc_ring[desc_idx].wb.status;
+ if (*status & rte_cpu_to_le_32(SXE_TX_DESC_STAT_DD))
+ ret = RTE_ETH_TX_DESC_DONE;
+
+l_end:
+ return ret;
+}
+
new file mode 100644
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_TX_COMMON_H__
+#define __SXE_TX_COMMON_H__
+
+int __sxe_tx_descriptor_status(void *tx_queue, u16 offset);
+
+u16 __sxe_pkts_xmit_with_offload(void *tx_queue, struct rte_mbuf **tx_pkts, u16 pkts_num);
+
+#endif
new file mode 100644
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_DPDK_TYPES_H__
+#define __SXE_DPDK_TYPES_H__
+
+#include <sys/time.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <string.h>
+
+#include <rte_common.h>
+
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+
+typedef char s8;
+typedef int16_t s16;
+typedef int32_t s32;
+typedef int64_t s64;
+
+typedef s8 S8;
+typedef s16 S16;
+typedef s32 S32;
+
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+
+#define __be16 u16
+#define __be32 u32
+#define __be64 u64
+
+#endif
new file mode 100644
@@ -0,0 +1,995 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#if defined(__KERNEL__) || defined(SXE_KERNEL_TEST)
+#include <linux/etherdevice.h>
+
+#include "sxevf_hw.h"
+#include "sxevf_regs.h"
+#include "sxe_log.h"
+#include "sxevf_irq.h"
+#include "sxevf_msg.h"
+#include "sxevf_ring.h"
+#include "sxevf.h"
+#include "sxevf_rx_proc.h"
+#else
+#include "sxe_errno.h"
+#include "sxe_logs.h"
+#include "sxe_dpdk_version.h"
+#include "sxe_compat_version.h"
+#include "sxevf.h"
+#include "sxevf_hw.h"
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+struct sxevf_adapter;
+#endif
+
+#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+#define DMA_MASK_NONE 0x0ULL
+
+#define SXEVF_REG_READ_CNT 5
+
+#define SXE_REG_READ_FAIL 0xffffffffU
+
+#define SXEVF_RING_WAIT_LOOP (100)
+#define SXEVF_MAX_RX_DESC_POLL (10)
+
+
+#define SXEVF_REG_READ(hw, addr) sxevf_reg_read(hw, addr)
+#define SXEVF_REG_WRITE(hw, reg, value) sxevf_reg_write(hw, reg, value)
+#define SXEVF_WRITE_FLUSH(a) sxevf_reg_read(a, SXE_VFSTATUS)
+
+#ifndef SXE_DPDK
+void sxevf_hw_fault_handle(struct sxevf_hw *hw)
+{
+ struct sxevf_adapter *adapter = hw->adapter;
+
+ if (test_bit(SXEVF_HW_FAULT, &hw->state))
+ return;
+
+ set_bit(SXEVF_HW_FAULT, &hw->state);
+
+ LOG_DEV_ERR("sxe nic hw fault\n");
+
+ if ((hw->fault_handle != NULL) && (hw->priv != NULL))
+ hw->fault_handle(hw->priv);
+
+}
+
+static void sxevf_hw_fault_check(struct sxevf_hw *hw, u32 reg)
+{
+ u32 value;
+ u8 __iomem *base_addr = hw->reg_base_addr;
+ struct sxevf_adapter *adapter = hw->adapter;
+ u8 i;
+
+ if (reg == SXE_VFSTATUS)
+ sxevf_hw_fault_handle(hw);
+
+
+ for (i = 0; i < SXEVF_REG_READ_CNT; i++) {
+ value = hw->reg_read(base_addr + SXE_VFSTATUS);
+
+ if (value != SXEVF_REG_READ_FAIL)
+ break;
+
+ mdelay(20);
+ }
+
+ LOG_INFO_BDF("retry done i:%d value:0x%x\n", i, value);
+
+ if (value == SXEVF_REG_READ_FAIL)
+ sxevf_hw_fault_handle(hw);
+
+}
+
+static u32 sxevf_reg_read(struct sxevf_hw *hw, u32 reg)
+{
+ u32 value;
+ u8 __iomem *base_addr = hw->reg_base_addr;
+ struct sxevf_adapter *adapter = hw->adapter;
+
+ if (sxevf_is_hw_fault(hw)) {
+ value = SXEVF_REG_READ_FAIL;
+ goto l_ret;
+ }
+
+ value = hw->reg_read(base_addr + reg);
+ if (unlikely(value == SXEVF_REG_READ_FAIL)) {
+ LOG_ERROR_BDF("reg[0x%x] read failed, value=%#x\n", reg, value);
+ sxevf_hw_fault_check(hw, reg);
+ }
+
+l_ret:
+ return value;
+}
+
+static void sxevf_reg_write(struct sxevf_hw *hw, u32 reg, u32 value)
+{
+ u8 __iomem *base_addr = hw->reg_base_addr;
+
+ if (sxevf_is_hw_fault(hw))
+ return;
+
+ hw->reg_write(value, base_addr + reg);
+
+}
+
+#else
+
+static u32 sxevf_reg_read(struct sxevf_hw *hw, u32 reg)
+{
+ u32 i, value;
+ u8 __iomem *base_addr = hw->reg_base_addr;
+
+ value = rte_le_to_cpu_32(rte_read32(base_addr + reg));
+ if (unlikely(value == SXEVF_REG_READ_FAIL)) {
+ for (i = 0; i < SXEVF_REG_READ_CNT; i++) {
+ LOG_ERROR("reg[0x%x] read failed, value=%#x\n",
+ reg, value);
+ value = rte_le_to_cpu_32(rte_read32(base_addr + reg));
+ if (value != SXEVF_REG_READ_FAIL) {
+ LOG_INFO("reg[0x%x] read ok, value=%#x\n",
+ reg, value);
+ break;
+ }
+
+ mdelay(3);
+ }
+ }
+
+ return value;
+}
+
+static void sxevf_reg_write(struct sxevf_hw *hw, u32 reg, u32 value)
+{
+ u8 __iomem *base_addr = hw->reg_base_addr;
+
+ rte_write32((rte_cpu_to_le_32(value)), (base_addr + reg));
+
+}
+#endif
+
+void sxevf_hw_stop(struct sxevf_hw *hw)
+{
+ u8 i;
+ u32 value;
+
+ for (i = 0; i < SXEVF_TXRX_RING_NUM_MAX; i++) {
+ value = SXEVF_REG_READ(hw, SXE_VFRXDCTL(i));
+ if (value & SXE_VFRXDCTL_ENABLE) {
+ value &= ~SXE_VFRXDCTL_ENABLE;
+ SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(i), value);
+ }
+ }
+
+ SXEVF_WRITE_FLUSH(hw);
+
+ SXEVF_REG_WRITE(hw, SXE_VFEIMC, SXEVF_VFEIMC_IRQ_MASK);
+ SXEVF_REG_READ(hw, SXE_VFEICR);
+
+ for (i = 0; i < SXEVF_TXRX_RING_NUM_MAX; i++) {
+ value = SXEVF_REG_READ(hw, SXE_VFTXDCTL(i));
+ if (value & SXE_VFTXDCTL_ENABLE) {
+ value &= ~SXE_VFTXDCTL_ENABLE;
+ SXEVF_REG_WRITE(hw, SXE_VFTXDCTL(i), value);
+ }
+ }
+
+}
+
+void sxevf_msg_write(struct sxevf_hw *hw, u8 index, u32 msg)
+{
+ struct sxevf_adapter *adapter = hw->adapter;
+
+ SXEVF_REG_WRITE(hw, SXE_VFMBMEM + (index << 2), msg);
+
+ LOG_DEBUG_BDF("index:%u write mbx mem:0x%x.\n", index, msg);
+
+}
+
+u32 sxevf_msg_read(struct sxevf_hw *hw, u8 index)
+{
+ u32 value = SXEVF_REG_READ(hw, SXE_VFMBMEM + (index << 2));
+ struct sxevf_adapter *adapter = hw->adapter;
+
+ LOG_DEBUG_BDF("index:%u read mbx mem:0x%x.\n", index, value);
+
+ return value;
+}
+
+u32 sxevf_mailbox_read(struct sxevf_hw *hw)
+{
+ return SXEVF_REG_READ(hw, SXE_VFMAILBOX);
+}
+
+void sxevf_mailbox_write(struct sxevf_hw *hw, u32 value)
+{
+ SXEVF_REG_WRITE(hw, SXE_VFMAILBOX, value);
+}
+
+void sxevf_pf_req_irq_trigger(struct sxevf_hw *hw)
+{
+ SXEVF_REG_WRITE(hw, SXE_VFMAILBOX, SXE_VFMAILBOX_REQ);
+
+}
+
+void sxevf_pf_ack_irq_trigger(struct sxevf_hw *hw)
+{
+ SXEVF_REG_WRITE(hw, SXE_VFMAILBOX, SXE_VFMAILBOX_ACK);
+
+}
+
+void sxevf_event_irq_map(struct sxevf_hw *hw, u16 vector)
+{
+ u8 allocation;
+ u32 ivar;
+
+ allocation = vector | SXEVF_IVAR_ALLOC_VALID;
+
+ ivar = SXEVF_REG_READ(hw, SXE_VFIVAR_MISC);
+ ivar &= ~0xFF;
+ ivar |= allocation;
+
+ SXEVF_REG_WRITE(hw, SXE_VFIVAR_MISC, ivar);
+
+}
+
+void sxevf_specific_irq_enable(struct sxevf_hw *hw, u32 value)
+{
+ SXEVF_REG_WRITE(hw, SXE_VFEIMS, value);
+
+}
+
+void sxevf_irq_enable(struct sxevf_hw *hw, u32 mask)
+{
+ SXEVF_REG_WRITE(hw, SXE_VFEIAM, mask);
+ SXEVF_REG_WRITE(hw, SXE_VFEIMS, mask);
+
+}
+
+void sxevf_irq_disable(struct sxevf_hw *hw)
+{
+ SXEVF_REG_WRITE(hw, SXE_VFEIAM, 0);
+ SXEVF_REG_WRITE(hw, SXE_VFEIMC, ~0);
+
+ SXEVF_WRITE_FLUSH(hw);
+
+}
+
+void sxevf_hw_ring_irq_map(struct sxevf_hw *hw, bool is_tx, u16 hw_ring_idx, u16 vector)
+{
+ u8 allocation;
+ u32 ivar, position;
+
+ allocation = vector | SXEVF_IVAR_ALLOC_VALID;
+
+ position = ((hw_ring_idx & 1) * 16) + (8 * is_tx);
+
+ ivar = SXEVF_REG_READ(hw, SXE_VFIVAR(hw_ring_idx >> 1));
+ ivar &= ~(0xFF << position);
+ ivar |= (allocation << position);
+
+ SXEVF_REG_WRITE(hw, SXE_VFIVAR(hw_ring_idx >> 1), ivar);
+
+}
+
+void sxevf_ring_irq_interval_set(struct sxevf_hw *hw, u16 irq_idx, u32 interval)
+{
+ u32 eitr = interval & SXEVF_EITR_ITR_MASK;
+
+ eitr |= SXEVF_EITR_CNT_WDIS;
+
+ SXEVF_REG_WRITE(hw, SXE_VFEITR(irq_idx), eitr);
+
+}
+
+static void sxevf_event_irq_interval_set(struct sxevf_hw *hw, u16 irq_idx, u32 value)
+{
+ SXEVF_REG_WRITE(hw, SXE_VFEITR(irq_idx), value);
+
+}
+
+static void sxevf_pending_irq_clear(struct sxevf_hw *hw)
+{
+ SXEVF_REG_READ(hw, SXE_VFEICR);
+
+}
+
+static void sxevf_ring_irq_trigger(struct sxevf_hw *hw, u64 eics)
+{
+ SXEVF_REG_WRITE(hw, SXE_VFEICS, eics);
+
+}
+
+static const struct sxevf_irq_operations sxevf_irq_ops = {
+ .ring_irq_interval_set = sxevf_ring_irq_interval_set,
+ .event_irq_interval_set = sxevf_event_irq_interval_set,
+ .ring_irq_map = sxevf_hw_ring_irq_map,
+ .event_irq_map = sxevf_event_irq_map,
+ .pending_irq_clear = sxevf_pending_irq_clear,
+ .ring_irq_trigger = sxevf_ring_irq_trigger,
+ .specific_irq_enable = sxevf_specific_irq_enable,
+ .irq_enable = sxevf_irq_enable,
+ .irq_disable = sxevf_irq_disable,
+};
+
+void sxevf_hw_reset(struct sxevf_hw *hw)
+{
+ SXEVF_REG_WRITE(hw, SXE_VFCTRL, SXE_VFCTRL_RST);
+ SXEVF_WRITE_FLUSH(hw);
+
+}
+
+static bool sxevf_hw_rst_done(struct sxevf_hw *hw)
+{
+ return !(SXEVF_REG_READ(hw, SXE_VFCTRL) & SXE_VFCTRL_RST);
+}
+
+u32 sxevf_link_state_get(struct sxevf_hw *hw)
+{
+ return SXEVF_REG_READ(hw, SXE_VFLINKS);
+}
+
+u32 dump_regs[] = {
+ SXE_VFCTRL,
+};
+
+u16 sxevf_reg_dump_num_get(void)
+{
+ return ARRAY_SIZE(dump_regs);
+}
+
+static u32 sxevf_reg_dump(struct sxevf_hw *hw, u32 *regs_buff, u32 buf_size)
+{
+ u32 i;
+ u32 regs_num = buf_size / sizeof(u32);
+
+ for (i = 0; i < regs_num; i++)
+ regs_buff[i] = SXEVF_REG_READ(hw, dump_regs[i]);
+
+ return i;
+}
+
+#define PATTERN_TEST 1
+#define SET_READ_TEST 2
+#define WRITE_NO_TEST 3
+#define TABLE32_TEST 4
+#define TABLE64_TEST_LO 5
+#define TABLE64_TEST_HI 6
+
+struct sxevf_self_test_reg {
+ u32 reg;
+ u8 array_len;
+ u8 test_type;
+ u32 mask;
+ u32 write;
+};
+
+static const struct sxevf_self_test_reg self_test_reg[] = {
+ { SXE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+ { SXE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { SXE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFFFF, 0x000FFFFF },
+ { SXE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, SXEVF_RXDCTL_ENABLE },
+ { SXE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { SXE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
+ { SXE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { SXE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { SXE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
+ { .reg = 0 }
+};
+
+static s32 sxevf_reg_pattern_test(struct sxevf_hw *hw, u32 reg,
+ u32 mask, u32 write)
+{
+ s32 ret = 0;
+ u32 pat, val, before;
+ static const u32 test_pattern[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFE};
+ struct sxevf_adapter *adapter = hw->adapter;
+
+ if (sxevf_is_hw_fault(hw)) {
+ LOG_ERROR_BDF("hw fault\n");
+ ret = -SXEVF_DIAG_TEST_BLOCKED;
+ goto l_end;
+ }
+
+ for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
+ before = SXEVF_REG_READ(hw, reg);
+
+ SXEVF_REG_WRITE(hw, reg, test_pattern[pat] & write);
+ val = SXEVF_REG_READ(hw, reg);
+ if (val != (test_pattern[pat] & write & mask)) {
+ LOG_MSG_ERR(drv, "pattern test reg %04X failed: "
+ "got 0x%08X expected 0x%08X\n",
+ reg, val, (test_pattern[pat] & write & mask));
+ SXEVF_REG_WRITE(hw, reg, before);
+ ret = -SXEVF_DIAG_REG_PATTERN_TEST_ERR;
+ goto l_end;
+ }
+
+ SXEVF_REG_WRITE(hw, reg, before);
+ }
+
+l_end:
+ return ret;
+}
+
+static s32 sxevf_reg_set_and_check(struct sxevf_hw *hw, int reg,
+ u32 mask, u32 write)
+{
+ s32 ret = 0;
+ u32 val, before;
+ struct sxevf_adapter *adapter = hw->adapter;
+
+ if (sxevf_is_hw_fault(hw)) {
+ LOG_ERROR_BDF("hw fault\n");
+ ret = -SXEVF_DIAG_TEST_BLOCKED;
+ goto l_end;
+ }
+
+ before = SXEVF_REG_READ(hw, reg);
+ SXEVF_REG_WRITE(hw, reg, write & mask);
+ val = SXEVF_REG_READ(hw, reg);
+ if ((write & mask) != (val & mask)) {
+ LOG_DEV_ERR("set/check reg %04X test failed: "
+ "got 0x%08X expected 0x%08X\n",
+ reg, (val & mask), (write & mask));
+ SXEVF_REG_WRITE(hw, reg, before);
+ ret = -SXEVF_DIAG_CHECK_REG_TEST_ERR;
+ goto l_end;
+ }
+
+ SXEVF_REG_WRITE(hw, reg, before);
+
+l_end:
+ return ret;
+}
+
+static s32 sxevf_regs_test(struct sxevf_hw *hw)
+{
+ u32 i;
+ s32 ret = 0;
+ const struct sxevf_self_test_reg *test = self_test_reg;
+ struct sxevf_adapter *adapter = hw->adapter;
+
+ while (test->reg) {
+ for (i = 0; i < test->array_len; i++) {
+ switch (test->test_type) {
+ case PATTERN_TEST:
+ ret = sxevf_reg_pattern_test(hw,
+ test->reg + (i * 0x40),
+ test->mask, test->write);
+ break;
+ case TABLE32_TEST:
+ ret = sxevf_reg_pattern_test(hw,
+ test->reg + (i * 4),
+ test->mask, test->write);
+ break;
+ case TABLE64_TEST_LO:
+ ret = sxevf_reg_pattern_test(hw,
+ test->reg + (i * 8),
+ test->mask, test->write);
+ break;
+ case TABLE64_TEST_HI:
+ ret = sxevf_reg_pattern_test(hw,
+ (test->reg + 4) + (i * 8),
+ test->mask, test->write);
+ break;
+ case SET_READ_TEST:
+ ret = sxevf_reg_set_and_check(hw,
+ test->reg + (i * 0x40),
+ test->mask, test->write);
+ break;
+ case WRITE_NO_TEST:
+ SXEVF_REG_WRITE(hw, test->reg + (i * 0x40),
+ test->write);
+ break;
+ default:
+ LOG_ERROR_BDF("reg test mod err, type=%d\n",
+ test->test_type);
+ break;
+ }
+
+ if (ret)
+ goto l_end;
+
+ }
+ test++;
+ }
+
+l_end:
+ return ret;
+}
+
+static const struct sxevf_setup_operations sxevf_setup_ops = {
+ .reset = sxevf_hw_reset,
+ .hw_stop = sxevf_hw_stop,
+ .regs_test = sxevf_regs_test,
+ .regs_dump = sxevf_reg_dump,
+ .link_state_get = sxevf_link_state_get,
+ .reset_done = sxevf_hw_rst_done,
+};
+
+static void sxevf_tx_ring_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
+ u64 desc_dma_addr, u8 reg_idx)
+{
+ SXEVF_REG_WRITE(hw, SXEVF_TDBAL(reg_idx), (desc_dma_addr &
+ DMA_BIT_MASK(32)));
+ SXEVF_REG_WRITE(hw, SXEVF_TDBAH(reg_idx), (desc_dma_addr >> 32));
+ SXEVF_REG_WRITE(hw, SXEVF_TDLEN(reg_idx), desc_mem_len);
+ SXEVF_REG_WRITE(hw, SXEVF_TDH(reg_idx), 0);
+ SXEVF_REG_WRITE(hw, SXEVF_TDT(reg_idx), 0);
+
+}
+
+static void sxevf_tx_writeback_off(struct sxevf_hw *hw, u8 reg_idx)
+{
+ SXEVF_REG_WRITE(hw, SXEVF_TDWBAH(reg_idx), 0);
+ SXEVF_REG_WRITE(hw, SXEVF_TDWBAL(reg_idx), 0);
+
+}
+
+static void sxevf_tx_desc_thresh_set(
+ struct sxevf_hw *hw,
+ u8 reg_idx,
+ u32 wb_thresh,
+ u32 host_thresh,
+ u32 prefech_thresh)
+{
+ u32 txdctl = 0;
+
+ txdctl |= (wb_thresh << SXEVF_TXDCTL_WTHRESH_SHIFT);
+ txdctl |= (host_thresh << SXEVF_TXDCTL_HTHRESH_SHIFT) |
+ prefech_thresh;
+
+ SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl);
+
+}
+
+void sxevf_tx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on)
+{
+ u32 wait_loop = SXEVF_MAX_TXRX_DESC_POLL;
+ struct sxevf_adapter *adapter = hw->adapter;
+
+ u32 txdctl = SXEVF_REG_READ(hw, SXEVF_TXDCTL(reg_idx));
+ if (is_on) {
+ txdctl |= SXEVF_TXDCTL_ENABLE;
+ SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl);
+
+ do {
+ usleep_range(1000, 2000);
+ txdctl = SXEVF_REG_READ(hw, SXEVF_TXDCTL(reg_idx));
+ } while (--wait_loop && !(txdctl & SXEVF_TXDCTL_ENABLE));
+ } else {
+ txdctl &= ~SXEVF_TXDCTL_ENABLE;
+ SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl);
+
+ do {
+ usleep_range(1000, 2000);
+ txdctl = SXEVF_REG_READ(hw, SXEVF_TXDCTL(reg_idx));
+ } while (--wait_loop && (txdctl & SXEVF_TXDCTL_ENABLE));
+ }
+
+ if (!wait_loop) {
+ LOG_DEV_ERR("tx ring %u switch %u failed within "
+ "the polling period\n", reg_idx, is_on);
+ }
+
+}
+
+static void sxevf_rx_disable(struct sxevf_hw *hw, u8 reg_idx)
+{
+ u32 rxdctl;
+ u32 wait_loop = SXEVF_RX_RING_POLL_MAX;
+ struct sxevf_adapter *adapter = hw->adapter;
+
+ if (!hw->reg_base_addr)
+ return;
+
+ rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx));
+ rxdctl &= ~SXE_VFRXDCTL_ENABLE;
+ SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(reg_idx), rxdctl);
+
+ do {
+ udelay(10);
+ rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx));
+ } while (--wait_loop && (rxdctl & SXE_VFRXDCTL_ENABLE));
+
+ if (!wait_loop) {
+ LOG_ERROR_BDF("RXDCTL.ENABLE queue %d not cleared while polling\n",
+ reg_idx);
+ }
+
+}
+
+void sxevf_rx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on)
+{
+ u32 rxdctl;
+ u32 wait_loop = SXEVF_RING_WAIT_LOOP;
+ struct sxevf_adapter *adapter = hw->adapter;
+
+ rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx));
+ if (is_on) {
+ rxdctl |= SXEVF_RXDCTL_ENABLE | SXEVF_RXDCTL_VME;
+ SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(reg_idx), rxdctl);
+
+ do {
+ usleep_range(1000, 2000);
+ rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx));
+ } while (--wait_loop && !(rxdctl & SXEVF_RXDCTL_ENABLE));
+ } else {
+ rxdctl &= ~SXEVF_RXDCTL_ENABLE;
+ SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(reg_idx), rxdctl);
+
+ do {
+ usleep_range(1000, 2000);
+ rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx));
+ } while (--wait_loop && (rxdctl & SXEVF_RXDCTL_ENABLE));
+ }
+
+ SXEVF_WRITE_FLUSH(hw);
+
+ if (!wait_loop) {
+ LOG_DEV_ERR("rx ring %u switch %u failed within "
+ "the polling period\n", reg_idx, is_on);
+ }
+
+}
+
+void sxevf_rx_ring_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
+ u64 desc_dma_addr, u8 reg_idx)
+{
+ SXEVF_REG_WRITE(hw, SXE_VFRDBAL(reg_idx),
+ (desc_dma_addr & DMA_BIT_MASK(32)));
+ SXEVF_REG_WRITE(hw, SXE_VFRDBAH(reg_idx), (desc_dma_addr >> 32));
+ SXEVF_REG_WRITE(hw, SXE_VFRDLEN(reg_idx), desc_mem_len);
+
+ SXEVF_WRITE_FLUSH(hw);
+
+ SXEVF_REG_WRITE(hw, SXE_VFRDH(reg_idx), 0);
+ SXEVF_REG_WRITE(hw, SXE_VFRDT(reg_idx), 0);
+
+}
+
+void sxevf_rx_rcv_ctl_configure(struct sxevf_hw *hw, u8 reg_idx,
+ u32 header_buf_len, u32 pkg_buf_len, bool drop_en)
+{
+ u32 srrctl = 0;
+
+ if (drop_en)
+ srrctl = SXEVF_SRRCTL_DROP_EN;
+
+ srrctl |= ((header_buf_len << SXEVF_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ SXEVF_SRRCTL_BSIZEHDR_MASK);
+ srrctl |= ((pkg_buf_len >> SXEVF_SRRCTL_BSIZEPKT_SHIFT) &
+ SXEVF_SRRCTL_BSIZEPKT_MASK);
+
+ SXEVF_REG_WRITE(hw, SXE_VFSRRCTL(reg_idx), srrctl);
+
+}
+
+static void sxevf_tx_ring_info_get(struct sxevf_hw *hw,
+ u8 idx, u32 *head, u32 *tail)
+{
+ *head = SXEVF_REG_READ(hw, SXE_VFTDH(idx));
+ *tail = SXEVF_REG_READ(hw, SXE_VFTDT(idx));
+
+}
+
+static const struct sxevf_dma_operations sxevf_dma_ops = {
+ .tx_ring_desc_configure = sxevf_tx_ring_desc_configure,
+ .tx_writeback_off = sxevf_tx_writeback_off,
+ .tx_desc_thresh_set = sxevf_tx_desc_thresh_set,
+ .tx_ring_switch = sxevf_tx_ring_switch,
+ .tx_ring_info_get = sxevf_tx_ring_info_get,
+
+ .rx_disable = sxevf_rx_disable,
+ .rx_ring_switch = sxevf_rx_ring_switch,
+ .rx_ring_desc_configure = sxevf_rx_ring_desc_configure,
+ .rx_rcv_ctl_configure = sxevf_rx_rcv_ctl_configure,
+};
+
+#ifdef SXE_DPDK
+#define SXEVF_32BIT_COUNTER_UPDATE(reg, last, cur) \
+ { \
+ u32 latest = SXEVF_REG_READ(hw, reg); \
+ cur += (latest - last) & UINT_MAX; \
+ last = latest; \
+ }
+
+#define SXEVF_36BIT_COUNTER_UPDATE(lsb, msb, last, cur) \
+ { \
+ u64 new_lsb = SXEVF_REG_READ(hw, lsb); \
+ u64 new_msb = SXEVF_REG_READ(hw, msb); \
+ u64 latest = ((new_msb << 32) | new_lsb); \
+ cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
+ last = latest; \
+ }
+
+#else
+#define SXEVF_32BIT_COUNTER_UPDATE(reg, last_counter, counter) \
+ { \
+ u32 current_counter = SXEVF_REG_READ(hw, reg); \
+ if (current_counter < last_counter) \
+ counter += 0x100000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFFF00000000LL; \
+ counter |= current_counter; \
+ }
+
+#define SXEVF_36BIT_COUNTER_UPDATE(reg_lsb, reg_msb, last_counter, counter) \
+ { \
+ u64 current_counter_lsb = SXEVF_REG_READ(hw, reg_lsb); \
+ u64 current_counter_msb = SXEVF_REG_READ(hw, reg_msb); \
+ u64 current_counter = (current_counter_msb << 32) | \
+ current_counter_lsb; \
+ if (current_counter < last_counter) \
+ counter += 0x1000000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFF000000000LL; \
+ counter |= current_counter; \
+ }
+#endif
+
+void sxevf_packet_stats_get(struct sxevf_hw *hw,
+ struct sxevf_hw_stats *stats)
+{
+ SXEVF_32BIT_COUNTER_UPDATE(SXEVF_VFGPRC, stats->last_vfgprc,
+ stats->vfgprc);
+ SXEVF_32BIT_COUNTER_UPDATE(SXEVF_VFGPTC, stats->last_vfgptc,
+ stats->vfgptc);
+ SXEVF_36BIT_COUNTER_UPDATE(SXEVF_VFGORC_LSB, SXEVF_VFGORC_MSB,
+ stats->last_vfgorc,
+ stats->vfgorc);
+ SXEVF_36BIT_COUNTER_UPDATE(SXEVF_VFGOTC_LSB, SXEVF_VFGOTC_MSB,
+ stats->last_vfgotc,
+ stats->vfgotc);
+ SXEVF_32BIT_COUNTER_UPDATE(SXEVF_VFMPRC, stats->last_vfmprc,
+ stats->vfmprc);
+
+}
+
+void sxevf_stats_init_value_get(struct sxevf_hw *hw,
+ struct sxevf_hw_stats *stats)
+{
+ stats->last_vfgprc = SXEVF_REG_READ(hw, SXE_VFGPRC);
+ stats->last_vfgorc = SXEVF_REG_READ(hw, SXE_VFGORC_LSB);
+ stats->last_vfgorc |= (((u64)(SXEVF_REG_READ(hw, SXE_VFGORC_MSB))) << 32);
+ stats->last_vfgptc = SXEVF_REG_READ(hw, SXE_VFGPTC);
+ stats->last_vfgotc = SXEVF_REG_READ(hw, SXE_VFGOTC_LSB);
+ stats->last_vfgotc |= (((u64)(SXEVF_REG_READ(hw, SXE_VFGOTC_MSB))) << 32);
+ stats->last_vfmprc = SXEVF_REG_READ(hw, SXE_VFMPRC);
+
+}
+static const struct sxevf_stat_operations sxevf_stat_ops = {
+ .packet_stats_get = sxevf_packet_stats_get,
+ .stats_init_value_get = sxevf_stats_init_value_get,
+};
+
+static void sxevf_rx_max_used_ring_set(struct sxevf_hw *hw, u16 max_rx_ring)
+{
+ u32 rqpl = 0;
+
+ if (max_rx_ring > 1)
+ rqpl |= BIT(29);
+
+ SXEVF_REG_WRITE(hw, SXE_VFPSRTYPE, rqpl);
+
+}
+
+static const struct sxevf_dbu_operations sxevf_dbu_ops = {
+ .rx_max_used_ring_set = sxevf_rx_max_used_ring_set,
+};
+
+static const struct sxevf_mbx_operations sxevf_mbx_ops = {
+
+ .mailbox_read = sxevf_mailbox_read,
+ .mailbox_write = sxevf_mailbox_write,
+
+ .msg_write = sxevf_msg_write,
+ .msg_read = sxevf_msg_read,
+
+ .pf_req_irq_trigger = sxevf_pf_req_irq_trigger,
+ .pf_ack_irq_trigger = sxevf_pf_ack_irq_trigger,
+};
+
+void sxevf_hw_ops_init(struct sxevf_hw *hw)
+{
+ hw->setup.ops = &sxevf_setup_ops;
+ hw->irq.ops = &sxevf_irq_ops;
+ hw->mbx.ops = &sxevf_mbx_ops;
+ hw->dma.ops = &sxevf_dma_ops;
+ hw->stat.ops = &sxevf_stat_ops;
+ hw->dbu.ops = &sxevf_dbu_ops;
+
+}
+
+#ifdef SXE_DPDK
+
+#define SXEVF_RSS_FIELD_MASK 0xffff0000
+#define SXEVF_MRQC_RSSEN 0x00000001
+
+#define SXEVF_RSS_KEY_SIZE (40)
+#define SXEVF_MAX_RSS_KEY_ENTRIES (10)
+#define SXEVF_MAX_RETA_ENTRIES (128)
+
+void sxevf_rxtx_reg_init(struct sxevf_hw *hw)
+{
+ int i;
+ u32 vfsrrctl;
+
+ vfsrrctl = 0x100 << SXEVF_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ vfsrrctl |= 0x800 >> SXEVF_SRRCTL_BSIZEPKT_SHIFT;
+
+ SXEVF_REG_WRITE(hw, SXE_VFPSRTYPE, 0);
+
+ for (i = 0; i < 7; i++) {
+ SXEVF_REG_WRITE(hw, SXE_VFRDH(i), 0);
+ SXEVF_REG_WRITE(hw, SXE_VFRDT(i), 0);
+ SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(i), 0);
+ SXEVF_REG_WRITE(hw, SXE_VFSRRCTL(i), vfsrrctl);
+ SXEVF_REG_WRITE(hw, SXE_VFTDH(i), 0);
+ SXEVF_REG_WRITE(hw, SXE_VFTDT(i), 0);
+ SXEVF_REG_WRITE(hw, SXE_VFTXDCTL(i), 0);
+ SXEVF_REG_WRITE(hw, SXE_VFTDWBAH(i), 0);
+ SXEVF_REG_WRITE(hw, SXE_VFTDWBAL(i), 0);
+ }
+
+ SXEVF_WRITE_FLUSH(hw);
+
+}
+
+u32 sxevf_irq_cause_get(struct sxevf_hw *hw)
+{
+ return SXEVF_REG_READ(hw, SXE_VFEICR);
+}
+
+void sxevf_tx_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
+ u64 desc_dma_addr, u8 reg_idx)
+{
+
+ SXEVF_REG_WRITE(hw, SXEVF_TDBAL(reg_idx), (desc_dma_addr &
+ DMA_BIT_MASK(32)));
+ SXEVF_REG_WRITE(hw, SXEVF_TDBAH(reg_idx), (desc_dma_addr >> 32));
+ SXEVF_REG_WRITE(hw, SXEVF_TDLEN(reg_idx), desc_mem_len);
+ SXEVF_REG_WRITE(hw, SXEVF_TDH(reg_idx), 0);
+ SXEVF_REG_WRITE(hw, SXEVF_TDT(reg_idx), 0);
+
+}
+
+void sxevf_rss_bit_num_set(struct sxevf_hw *hw, u32 value)
+{
+ SXEVF_REG_WRITE(hw, SXE_VFPSRTYPE, value);
+
+}
+
+void sxevf_hw_vlan_tag_strip_switch(struct sxevf_hw *hw,
+ u16 reg_index, bool is_enable)
+{
+ u32 vlnctrl;
+
+ vlnctrl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_index));
+
+ if (is_enable)
+ vlnctrl |= SXEVF_RXDCTL_VME;
+ else
+ vlnctrl &= ~SXEVF_RXDCTL_VME;
+
+ SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(reg_index), vlnctrl);
+
+}
+
+void sxevf_tx_queue_thresh_set(struct sxevf_hw *hw, u8 reg_idx,
+ u32 prefech_thresh, u32 host_thresh, u32 wb_thresh)
+{
+ u32 txdctl = SXEVF_REG_READ(hw, SXEVF_TXDCTL(reg_idx));
+
+ txdctl |= (prefech_thresh & SXEVF_TXDCTL_THRESH_MASK);
+ txdctl |= ((host_thresh & SXEVF_TXDCTL_THRESH_MASK) << SXEVF_TXDCTL_HTHRESH_SHIFT);
+ txdctl |= ((wb_thresh & SXEVF_TXDCTL_THRESH_MASK)<< SXEVF_TXDCTL_WTHRESH_SHIFT);
+
+ SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl);
+
+}
+
+void sxevf_rx_desc_tail_set(struct sxevf_hw *hw, u8 reg_idx, u32 value)
+{
+ SXEVF_REG_WRITE(hw, SXE_VFRDT(reg_idx), value);
+
+}
+
+u32 sxevf_hw_rss_redir_tbl_get(struct sxevf_hw *hw, u16 reg_idx)
+{
+ return SXEVF_REG_READ(hw, SXE_VFRETA(reg_idx >> 2));
+}
+
+void sxevf_hw_rss_redir_tbl_set(struct sxevf_hw *hw,
+ u16 reg_idx, u32 value)
+{
+ SXEVF_REG_WRITE(hw, SXE_VFRETA(reg_idx >> 2), value);
+}
+
+u32 sxevf_hw_rss_key_get(struct sxevf_hw *hw, u8 reg_idx)
+{
+ u32 rss_key;
+
+ if (reg_idx >= SXEVF_MAX_RSS_KEY_ENTRIES)
+ rss_key = 0;
+ else
+ rss_key = SXEVF_REG_READ(hw, SXE_VFRSSRK(reg_idx));
+
+ return rss_key;
+}
+
+u32 sxevf_hw_rss_field_get(struct sxevf_hw *hw)
+{
+ u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC);
+ return (mrqc & SXEVF_RSS_FIELD_MASK);
+}
+
+bool sxevf_hw_is_rss_enabled(struct sxevf_hw *hw)
+{
+ bool rss_enable = false;
+ u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC);
+ if (mrqc & SXEVF_MRQC_RSSEN)
+ rss_enable = true;
+
+ return rss_enable;
+}
+
+void sxevf_hw_rss_key_set_all(struct sxevf_hw *hw, u32 *rss_key)
+{
+ u32 i;
+
+ for (i = 0; i < SXEVF_MAX_RSS_KEY_ENTRIES; i++)
+ SXEVF_REG_WRITE(hw, SXE_VFRSSRK(i), rss_key[i]);
+
+}
+
+void sxevf_hw_rss_cap_switch(struct sxevf_hw *hw, bool is_on)
+{
+ u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC);
+ if (is_on)
+ mrqc |= SXEVF_MRQC_RSSEN;
+ else
+ mrqc &= ~SXEVF_MRQC_RSSEN;
+
+ SXEVF_REG_WRITE(hw, SXE_VFMRQC, mrqc);
+
+}
+
+void sxevf_hw_rss_field_set(struct sxevf_hw *hw, u32 rss_field)
+{
+ u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC);
+
+ mrqc &= ~SXEVF_RSS_FIELD_MASK;
+ mrqc |= rss_field;
+ SXEVF_REG_WRITE(hw, SXE_VFMRQC, mrqc);
+
+}
+
+u32 sxevf_hw_regs_group_read(struct sxevf_hw *hw,
+ const struct sxevf_reg_info *regs,
+ u32 *reg_buf)
+{
+ u32 j, i = 0;
+ int count = 0;
+
+ while (regs[i].count) {
+ for (j = 0; j < regs[i].count; j++) {
+ reg_buf[count + j] = SXEVF_REG_READ(hw,
+ regs[i].addr + j * regs[i].stride);
+ LOG_INFO("regs= %s, regs_addr=%x, regs_value=%04x\n",
+ regs[i].name, regs[i].addr, reg_buf[count + j]);
+ }
+
+ i++;
+ count += j;
+ }
+
+ return count;
+};
+
+#endif
new file mode 100644
@@ -0,0 +1,352 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_HW_H__
+#define __SXEVF_HW_H__
+
+#if defined(__KERNEL__) || defined(SXE_KERNEL_TEST)
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/if_ether.h>
+#else
+#include "sxe_compat_platform.h"
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+#endif
+
+#include "sxevf_regs.h"
+
+#if defined(__KERNEL__) || defined(SXE_KERNEL_TEST)
+#define SXE_PRIU64 "llu"
+#define SXE_PRIX64 "llx"
+#define SXE_PRID64 "lld"
+#else
+#define SXE_PRIU64 PRIu64
+#define SXE_PRIX64 PRIx64
+#define SXE_PRID64 PRId64
+#endif
+
+#define SXEVF_TXRX_RING_NUM_MAX 8
+#define SXEVF_MAX_TXRX_DESC_POLL (10)
+#define SXEVF_TX_DESC_PREFETCH_THRESH_32 (32)
+#define SXEVF_TX_DESC_HOST_THRESH_1 (1)
+#define SXEVF_TX_DESC_WRITEBACK_THRESH_8 (8)
+#define SXEVF_TXDCTL_HTHRESH_SHIFT (8)
+#define SXEVF_TXDCTL_WTHRESH_SHIFT (16)
+
+#define SXEVF_TXDCTL_THRESH_MASK (0x7F)
+
+#define SXEVF_RX_RING_POLL_MAX (10)
+
+#define SXEVF_MAC_HDR_LEN_MAX (127)
+#define SXEVF_NETWORK_HDR_LEN_MAX (511)
+
+#define SXEVF_LINK_SPEED_UNKNOWN 0
+#define SXEVF_LINK_SPEED_1GB_FULL 0x0020
+#define SXEVF_LINK_SPEED_10GB_FULL 0x0080
+#define SXEVF_LINK_SPEED_100_FULL 0x0008
+
+#define SXEVF_VFT_TBL_SIZE (128)
+#define SXEVF_HW_TXRX_RING_NUM_MAX (128)
+
+#define SXEVF_VLAN_TAG_SIZE (4)
+
+#define SXEVF_HW_UC_ENTRY_NUM_MAX 128
+
+enum {
+ SXEVF_LINK_TO_PHY = 0,
+ SXEVF_LINK_TO_DOWN,
+ SXEVF_LINK_TO_REINIT,
+};
+
+enum {
+ SXEVF_DIAG_TEST_PASSED = 0,
+ SXEVF_DIAG_TEST_BLOCKED = 1,
+ SXEVF_DIAG_REG_PATTERN_TEST_ERR = 2,
+ SXEVF_DIAG_CHECK_REG_TEST_ERR = 3,
+};
+
+struct sxevf_hw;
+
+struct sxevf_hw_stats {
+ u64 base_vfgprc;
+ u64 base_vfgptc;
+ u64 base_vfgorc;
+ u64 base_vfgotc;
+ u64 base_vfmprc;
+
+ u64 last_vfgprc;
+ u64 last_vfgptc;
+ u64 last_vfgorc;
+ u64 last_vfgotc;
+ u64 last_vfmprc;
+
+ u64 vfgprc;
+ u64 vfgptc;
+ u64 vfgorc;
+ u64 vfgotc;
+ u64 vfmprc;
+
+ u64 saved_reset_vfgprc;
+ u64 saved_reset_vfgptc;
+ u64 saved_reset_vfgorc;
+ u64 saved_reset_vfgotc;
+ u64 saved_reset_vfmprc;
+};
+
+void sxevf_hw_ops_init(struct sxevf_hw *hw);
+
+
+struct sxevf_setup_operations {
+ void (*reset)(struct sxevf_hw *hw);
+ void (*hw_stop)(struct sxevf_hw *hw);
+ s32 (*regs_test)(struct sxevf_hw *hw);
+ u32 (*link_state_get)(struct sxevf_hw *hw);
+ u32 (*regs_dump)(struct sxevf_hw *hw, u32 *regs_buff, u32 buf_size);
+ bool (*reset_done)(struct sxevf_hw *hw);
+};
+
+struct sxevf_hw_setup {
+ const struct sxevf_setup_operations *ops;
+};
+
+struct sxevf_irq_operations {
+ void (*pending_irq_clear)(struct sxevf_hw *hw);
+ void (*ring_irq_interval_set)(struct sxevf_hw *hw, u16 irq_idx, u32 interval);
+ void (*event_irq_interval_set)(struct sxevf_hw *hw, u16 irq_idx, u32 value);
+ void (*ring_irq_map)(struct sxevf_hw *hw, bool is_tx, u16 hw_ring_idx, u16 irq_idx);
+ void (*event_irq_map)(struct sxevf_hw *hw, u16 irq_idx);
+ void (*ring_irq_trigger)(struct sxevf_hw *hw, u64 eics);
+ void (*irq_enable)(struct sxevf_hw *hw, u32 mask);
+ void (*specific_irq_enable)(struct sxevf_hw *hw, u32 value);
+ void (*irq_disable)(struct sxevf_hw *hw);
+ void (*irq_off)(struct sxevf_hw *hw);
+};
+
+struct sxevf_irq_info {
+ const struct sxevf_irq_operations *ops;
+};
+
+struct sxevf_mbx_operations {
+
+ u32 (*mailbox_read)(struct sxevf_hw *hw);
+ void (*mailbox_write)(struct sxevf_hw *hw, u32 value);
+
+ void (*msg_write)(struct sxevf_hw *hw, u8 index, u32 msg);
+ u32 (*msg_read)(struct sxevf_hw *hw, u8 index);
+
+ void (*pf_req_irq_trigger)(struct sxevf_hw *hw);
+ void (*pf_ack_irq_trigger)(struct sxevf_hw *hw);
+};
+
+struct sxevf_mbx_stats {
+ u32 send_msgs;
+ u32 rcv_msgs;
+
+ u32 reqs;
+ u32 acks;
+ u32 rsts;
+};
+
+struct sxevf_mbx_info {
+ const struct sxevf_mbx_operations *ops;
+
+ struct sxevf_mbx_stats stats;
+ u32 msg_len;
+ u32 retry;
+ u32 interval;
+ u32 reg_value;
+ u32 api_version;
+};
+
+struct sxevf_dma_operations {
+ void (*tx_ring_desc_configure)(struct sxevf_hw *hw, u32 desc_mem_len,
+ u64 desc_dma_addr, u8 reg_idx);
+ void (*tx_writeback_off)(struct sxevf_hw *hw, u8 reg_idx);
+ void (*tx_desc_thresh_set)(struct sxevf_hw *hw, u8 reg_idx,
+ u32 wb_thresh, u32 host_thresh, u32 prefech_thresh);
+ void (*tx_ring_switch)(struct sxevf_hw *hw, u8 reg_idx, bool is_on);
+ void (*tx_desc_wb_flush)(struct sxevf_hw *hw, u8 val);
+ void (*tx_ring_info_get)(struct sxevf_hw *hw, u8 reg_idx,
+ u32 *head, u32 *tail);
+ void (*rx_disable)(struct sxevf_hw *hw, u8 reg_idx);
+ void (*rx_ring_switch)(struct sxevf_hw *hw, u8 reg_idx, bool is_on);
+ void (*rx_ring_desc_configure)(struct sxevf_hw *hw, u32 desc_mem_len,
+ u64 desc_dma_addr, u8 reg_idx);
+ void (*rx_rcv_ctl_configure)(struct sxevf_hw *hw, u8 reg_idx,
+ u32 header_buf_len, u32 pkg_buf_len, bool drop_en);
+};
+
+struct sxevf_dma_info {
+ const struct sxevf_dma_operations *ops;
+};
+
+struct sxevf_stat_operations {
+ void (*packet_stats_get)(struct sxevf_hw *hw,
+ struct sxevf_hw_stats *stats);
+ void (*stats_init_value_get)(struct sxevf_hw *hw,
+ struct sxevf_hw_stats *stats);
+};
+
+struct sxevf_stat_info {
+ const struct sxevf_stat_operations *ops;
+};
+
+struct sxevf_dbu_operations {
+ void (*rx_max_used_ring_set)(struct sxevf_hw *hw, u16 max_rx_ring);
+
+};
+
+struct sxevf_dbu_info {
+ const struct sxevf_dbu_operations *ops;
+};
+
+enum sxevf_hw_state {
+ SXEVF_HW_STOP,
+ SXEVF_HW_FAULT,
+};
+
+struct sxevf_hw {
+ u8 __iomem *reg_base_addr;
+ void *adapter;
+
+ void *priv;
+ unsigned long state;
+ void (*fault_handle)(void *priv);
+ u32 (*reg_read)(const volatile void *reg);
+ void (*reg_write)(u32 value, volatile void *reg);
+ s32 board_type;
+
+ struct sxevf_hw_setup setup;
+ struct sxevf_irq_info irq;
+ struct sxevf_mbx_info mbx;
+
+ struct sxevf_dma_info dma;
+ struct sxevf_stat_info stat;
+ struct sxevf_dbu_info dbu;
+};
+
+struct sxevf_reg_info {
+ u32 addr;
+ u32 count;
+ u32 stride;
+ const s8 *name;
+};
+
+u16 sxevf_reg_dump_num_get(void);
+
+void sxevf_hw_fault_handle(struct sxevf_hw *hw);
+
+static inline bool sxevf_is_hw_fault(struct sxevf_hw *hw)
+{
+ return test_bit(SXEVF_HW_FAULT, &hw->state);
+}
+
+static inline void sxevf_hw_fault_handle_init(struct sxevf_hw *hw,
+ void (*handle)(void *), void *priv)
+{
+ hw->priv = priv;
+ hw->fault_handle = handle;
+
+}
+
+static inline void sxevf_hw_reg_handle_init(struct sxevf_hw *hw,
+ u32 (*read)(const volatile void *),
+ void (*write)(u32, volatile void *))
+{
+ hw->reg_read = read;
+ hw->reg_write = write;
+
+}
+
+#ifdef SXE_DPDK
+
+void sxevf_irq_disable(struct sxevf_hw *hw);
+
+void sxevf_hw_stop(struct sxevf_hw *hw);
+
+void sxevf_hw_reset(struct sxevf_hw *hw);
+
+void sxevf_msg_write(struct sxevf_hw *hw, u8 index, u32 msg);
+
+u32 sxevf_msg_read(struct sxevf_hw *hw, u8 index);
+
+u32 sxevf_mailbox_read(struct sxevf_hw *hw);
+
+void sxevf_mailbox_write(struct sxevf_hw *hw, u32 value);
+
+void sxevf_pf_req_irq_trigger(struct sxevf_hw *hw);
+
+void sxevf_pf_ack_irq_trigger(struct sxevf_hw *hw);
+
+void sxevf_rxtx_reg_init(struct sxevf_hw *hw);
+
+void sxevf_irq_enable(struct sxevf_hw *hw, u32 mask);
+
+u32 sxevf_irq_cause_get(struct sxevf_hw *hw);
+
+void sxevf_event_irq_map(struct sxevf_hw *hw, u16 vector);
+
+void sxevf_hw_ring_irq_map(struct sxevf_hw *hw, bool is_tx, u16 hw_ring_idx, u16 vector);
+
+void sxevf_ring_irq_interval_set(struct sxevf_hw *hw, u16 irq_idx, u32 interval);
+
+void sxevf_tx_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
+ u64 desc_dma_addr, u8 reg_idx);
+
+void sxevf_rx_ring_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len,
+ u64 desc_dma_addr, u8 reg_idx);
+
+void sxevf_rx_rcv_ctl_configure(struct sxevf_hw *hw, u8 reg_idx,
+ u32 header_buf_len, u32 pkg_buf_len,
+ bool drop_en);
+
+void sxevf_rss_bit_num_set(struct sxevf_hw *hw, u32 value);
+
+void sxevf_hw_vlan_tag_strip_switch(struct sxevf_hw *hw,
+ u16 reg_index, bool is_enable);
+
+void sxevf_tx_queue_thresh_set(struct sxevf_hw *hw, u8 reg_idx,
+ u32 prefech_thresh, u32 host_thresh, u32 wb_thresh);
+
+void sxevf_tx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on);
+
+void sxevf_rx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on);
+
+void sxevf_rx_desc_tail_set(struct sxevf_hw *hw, u8 reg_idx, u32 value);
+
+void sxevf_specific_irq_enable(struct sxevf_hw *hw, u32 value);
+
+void sxevf_packet_stats_get(struct sxevf_hw *hw,
+ struct sxevf_hw_stats *stats);
+
+void sxevf_stats_init_value_get(struct sxevf_hw *hw,
+ struct sxevf_hw_stats *stats);
+
+u32 sxevf_hw_rss_redir_tbl_get(struct sxevf_hw *hw, u16 reg_idx);
+
+void sxevf_hw_rss_redir_tbl_set(struct sxevf_hw *hw,
+ u16 reg_idx, u32 value);
+
+u32 sxevf_hw_rss_key_get(struct sxevf_hw *hw, u8 reg_idx);
+
+u32 sxevf_hw_rss_field_get(struct sxevf_hw *hw);
+
+void sxevf_hw_rss_field_set(struct sxevf_hw *hw, u32 rss_field);
+
+void sxevf_hw_rss_cap_switch(struct sxevf_hw *hw, bool is_on);
+
+void sxevf_hw_rss_key_set_all(struct sxevf_hw *hw, u32 *rss_key);
+
+bool sxevf_hw_is_rss_enabled(struct sxevf_hw *hw);
+
+u32 sxevf_link_state_get(struct sxevf_hw *hw);
+
+u32 sxevf_hw_regs_group_read(struct sxevf_hw *hw,
+ const struct sxevf_reg_info *regs,
+ u32 *reg_buf);
+
+#endif
+#endif
new file mode 100644
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXEVF_REGS_H__
+#define __SXEVF_REGS_H__
+
+#define SXEVF_REG_READ_FAIL 0xffffffffU
+#define SXEVF_REG_READ_RETRY 5
+
+#define SXE_VFLINKS_UP 0x00000008
+#define SXE_VFLINKS_SPEED 0x00000006
+#define SXE_VFLINKS_SPEED_10G 0x00000006
+#define SXE_VFLINKS_SPEED_1G 0x00000004
+#define SXE_VFLINKS_SPEED_100 0x00000002
+
+#define SXE_VFCTRL 0x00000
+#define SXE_VFSTATUS 0x00008
+#define SXE_VFLINKS 0x00018
+#define SXE_VFFRTIMER 0x00048
+#define SXE_VFRXMEMWRAP 0x03190
+#define SXE_VFEICR 0x00100
+#define SXE_VFEICS 0x00104
+#define SXE_VFEIMS 0x00108
+#define SXE_VFEIMC 0x0010C
+#define SXE_VFEIAM 0x00114
+#define SXE_VFEITR(x) (0x00820 + (4 * (x)))
+#define SXE_VFIVAR(x) (0x00120 + (4 * (x)))
+#define SXE_VFIVAR_MISC 0x00140
+#define SXE_VFRDBAL(x) (0x01000 + (0x40 * (x)))
+#define SXE_VFRDBAH(x) (0x01004 + (0x40 * (x)))
+#define SXE_VFRDLEN(x) (0x01008 + (0x40 * (x)))
+#define SXE_VFRDH(x) (0x01010 + (0x40 * (x)))
+#define SXE_VFRDT(x) (0x01018 + (0x40 * (x)))
+#define SXE_VFRXDCTL(x) (0x01028 + (0x40 * (x)))
+#define SXE_VFSRRCTL(x) (0x01014 + (0x40 * (x)))
+#define SXE_VFLROCTL(x) (0x0102C + (0x40 * (x)))
+#define SXE_VFPSRTYPE 0x00300
+#define SXE_VFTDBAL(x) (0x02000 + (0x40 * (x)))
+#define SXE_VFTDBAH(x) (0x02004 + (0x40 * (x)))
+#define SXE_VFTDLEN(x) (0x02008 + (0x40 * (x)))
+#define SXE_VFTDH(x) (0x02010 + (0x40 * (x)))
+#define SXE_VFTDT(x) (0x02018 + (0x40 * (x)))
+#define SXE_VFTXDCTL(x) (0x02028 + (0x40 * (x)))
+#define SXE_VFTDWBAL(x) (0x02038 + (0x40 * (x)))
+#define SXE_VFTDWBAH(x) (0x0203C + (0x40 * (x)))
+#define SXE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x)))
+#define SXE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x)))
+#define SXE_VFGPRC 0x0101C
+#define SXE_VFGPTC 0x0201C
+#define SXE_VFGORC_LSB 0x01020
+#define SXE_VFGORC_MSB 0x01024
+#define SXE_VFGOTC_LSB 0x02020
+#define SXE_VFGOTC_MSB 0x02024
+#define SXE_VFMPRC 0x01034
+#define SXE_VFMRQC 0x3000
+#define SXE_VFRSSRK(x) (0x3100 + ((x) * 4))
+#define SXE_VFRETA(x) (0x3200 + ((x) * 4))
+
+#define SXEVF_VFEIMC_IRQ_MASK (7)
+#define SXEVF_IVAR_ALLOC_VALID (0x80)
+
+#define SXEVF_EITR_CNT_WDIS (0x80000000)
+#define SXEVF_EITR_ITR_MASK (0x00000FF8)
+#define SXEVF_EITR_ITR_SHIFT (2)
+#define SXEVF_EITR_ITR_MAX (SXEVF_EITR_ITR_MASK >> SXEVF_EITR_ITR_SHIFT)
+
+#define SXE_VFRXDCTL_ENABLE 0x02000000
+#define SXE_VFTXDCTL_ENABLE 0x02000000
+#define SXE_VFCTRL_RST 0x04000000
+
+#define SXEVF_RXDCTL_ENABLE 0x02000000
+#define SXEVF_RXDCTL_VME 0x40000000
+
+#define SXEVF_PSRTYPE_RQPL_SHIFT 29
+
+#define SXEVF_SRRCTL_DROP_EN 0x10000000
+#define SXEVF_SRRCTL_DESCTYPE_DATA_ONEBUF 0x02000000
+#define SXEVF_SRRCTL_BSIZEPKT_SHIFT (10)
+#define SXEVF_SRRCTL_BSIZEHDRSIZE_SHIFT (2)
+#define SXEVF_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define SXEVF_SRRCTL_BSIZEHDR_MASK 0x00003F00
+
+#define SXE_VFMAILBOX 0x002FC
+#define SXE_VFMBMEM 0x00200
+
+#define SXE_VFMAILBOX_REQ 0x00000001
+#define SXE_VFMAILBOX_ACK 0x00000002
+#define SXE_VFMAILBOX_VFU 0x00000004
+#define SXE_VFMAILBOX_PFU 0x00000008
+#define SXE_VFMAILBOX_PFSTS 0x00000010
+#define SXE_VFMAILBOX_PFACK 0x00000020
+#define SXE_VFMAILBOX_RSTI 0x00000040
+#define SXE_VFMAILBOX_RSTD 0x00000080
+#define SXE_VFMAILBOX_RC_BIT 0x000000B0
+
+#define SXEVF_TDBAL(_i) (0x02000 + ((_i) * 0x40))
+#define SXEVF_TDBAH(_i) (0x02004 + ((_i) * 0x40))
+#define SXEVF_TDLEN(_i) (0x02008 + ((_i) * 0x40))
+#define SXEVF_TDH(_i) (0x02010 + ((_i) * 0x40))
+#define SXEVF_TDT(_i) (0x02018 + ((_i) * 0x40))
+#define SXEVF_TXDCTL(_i) (0x02028 + ((_i) * 0x40))
+#define SXEVF_TDWBAL(_i) (0x02038 + ((_i) * 0x40))
+#define SXEVF_TDWBAH(_i) (0x0203C + ((_i) * 0x40))
+
+#define SXEVF_TXDCTL_SWFLSH (0x02000000)
+#define SXEVF_TXDCTL_ENABLE (0x02000000)
+
+#define SXEVF_VFGPRC 0x0101C
+#define SXEVF_VFGPTC 0x0201C
+#define SXEVF_VFGORC_LSB 0x01020
+#define SXEVF_VFGORC_MSB 0x01024
+#define SXEVF_VFGOTC_LSB 0x02020
+#define SXEVF_VFGOTC_MSB 0x02024
+#define SXEVF_VFMPRC 0x01034
+
+#define SXEVF_EICR_MASK 0x07
+
+#endif
new file mode 100644
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __DRV_MSG_H__
+#define __DRV_MSG_H__
+
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+
+#define SXE_VERSION_LEN 32
+
+
+
+
+
+typedef struct sxe_version_resp {
+ U8 fw_version[SXE_VERSION_LEN];
+} sxe_version_resp_s;
+
+#endif
new file mode 100644
new file mode 100644
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_PORT_H__
+#define __SXE_PORT_H__
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "mgc_types.h"
+#include "ps3_types.h"
+
+typedef enum MglPortCmdSetCode {
+ MGL_CMD_PORT_SET_BASE = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 0),
+ MGL_CMD_PORT_SET_REG = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 1),
+ MGL_CMD_PORT_SET_LED = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 2),
+ MGL_CMD_SXE_SOC_HTHRESHOLD = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 3),
+ MGL_CMD_SXE_SFP_HTHRESHOLD = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 4),
+ MGL_CMD_SXE_SOC_RST = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 5),
+ MGL_CMD_SXE_SET_MFGINFO = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 6),
+ MGL_CMD_SXE_SET_INSIGHT = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 7),
+ MGL_CMD_SXE_OPT_INSIGHT = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 8),
+ MGL_CMD_SXE_SET_LLDPSTATE = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 9),
+} MglPortCmdSetCode_e;
+
+typedef enum MglPortCmdGetCode {
+ MGL_CMD_SXE_GET_REG = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 0),
+ MGL_CMD_SXE_GET_SOC_INFO = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 1),
+ MGL_CMD_SXE_LOG_EXPORT = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 2),
+ MGL_CMD_SXE_REGS_DUMP = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 3),
+ MGL_CMD_SXE_GET_MFGINFO = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 4),
+ MGL_CMD_SXE_MAC_ADDR_GET = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 5),
+ MGL_CMD_SXE_GET_INSIGHT = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 6),
+} MglPortCmdGetCode_e;
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
new file mode 100644
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_CLI_H__
+#define __SXE_CLI_H__
+
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+
+#define SXE_VERION_LEN (32)
+#define SXE_MAC_NUM (128)
+#define SXE_PORT_TRANSCEIVER_LEN (32)
+#define SXE_PORT_VENDOR_LEN (32)
+#define SXE_CHIP_TYPE_LEN (32)
+#define SXE_VPD_SN_LEN (16)
+#define SXE_SOC_RST_TIME (0x93A80)
+#define SXE_SFP_TEMP_THRESHOLD_INTERVAL (3)
+#define MGC_TERMLOG_INFO_MAX_LEN (12 * 1024)
+#define SXE_REGS_DUMP_MAX_LEN (12 * 1024)
+#define SXE_PRODUCT_NAME_LEN (32)
+
+typedef enum sxe_led_mode {
+ SXE_IDENTIFY_LED_BLINK_ON = 0,
+ SXE_IDENTIFY_LED_BLINK_OFF,
+ SXE_IDENTIFY_LED_ON,
+ SXE_IDENTIFY_LED_OFF,
+ SXE_IDENTIFY_LED_RESET,
+} sxe_led_mode_s;
+
+typedef struct sxe_led_ctrl {
+ U32 mode;
+ U32 duration;
+
+} sxe_led_ctrl_s;
+
+typedef struct sxe_led_ctrl_resp {
+ U32 ack;
+} sxe_led_ctrl_resp_s;
+
+typedef enum PortLinkSpeed {
+ PORT_LINK_NO = 0,
+ PORT_LINK_100M = 1,
+ PORT_LINK_1G = 2,
+ PORT_LINK_10G = 3,
+} PortLinkSpeed_e;
+
+typedef struct SysSocInfo {
+ S8 fwVer[SXE_VERION_LEN];
+ S8 optVer[SXE_VERION_LEN];
+ U8 socStatus;
+ U8 pad[3];
+ S32 socTemp;
+ U64 chipId;
+ S8 chipType[SXE_CHIP_TYPE_LEN];
+ S8 pba[SXE_VPD_SN_LEN];
+ S8 productName[SXE_PRODUCT_NAME_LEN];
+} SysSocInfo_s;
+
+typedef struct SysPortInfo {
+ U64 mac[SXE_MAC_NUM];
+ U8 isPortAbs;
+ U8 linkStat;
+ U8 linkSpeed;
+
+
+ U8 isSfp:1;
+ U8 isGetInfo:1;
+ U8 rvd:6;
+ S8 opticalModTemp;
+ U8 pad[3];
+ S8 transceiverType[SXE_PORT_TRANSCEIVER_LEN];
+ S8 vendorName[SXE_PORT_VENDOR_LEN];
+ S8 vendorPn[SXE_PORT_VENDOR_LEN];
+} SysPortInfo_s;
+
+typedef struct SysInfoResp {
+ SysSocInfo_s socInfo;
+ SysPortInfo_s portInfo;
+} SysInfoResp_s;
+
+typedef enum SfpTempTdMode {
+ SFP_TEMP_THRESHOLD_MODE_ALARM = 0,
+ SFP_TEMP_THRESHOLD_MODE_WARN,
+} SfpTempTdMode_e;
+
+typedef struct SfpTempTdSet {
+ U8 mode;
+ U8 pad[3];
+ S8 hthreshold;
+ S8 lthreshold;
+} SfpTempTdSet_s;
+
+typedef struct SxeLogExportResp {
+ U16 curLogLen;
+ U8 isEnd;
+ U8 pad;
+ S32 sessionId;
+ S8 data[0];
+} SxeLogExportResp_s;
+
+typedef enum SxeLogExportType {
+ SXE_LOG_EXPORT_REQ = 0,
+ SXE_LOG_EXPORT_FIN,
+ SXE_LOG_EXPORT_ABORT,
+} SxeLogExportType_e;
+
+typedef struct SxeLogExportReq {
+ U8 isALLlog;
+ U8 cmdtype;
+ U8 isBegin;
+ U8 pad;
+ S32 sessionId;
+ U32 logLen;
+} SxeLogExportReq_s;
+
+typedef struct SocRstReq {
+ U32 time;
+} SocRstReq_s;
+
+typedef struct RegsDumpResp {
+ U32 curdwLen;
+ U8 data[0];
+} RegsDumpResp_s;
+
+enum {
+ SXE_MFG_PART_NUMBER_LEN = 8,
+ SXE_MFG_SERIAL_NUMBER_LEN = 16,
+ SXE_MFG_REVISION_LEN = 4,
+ SXE_MFG_OEM_STR_LEN = 64,
+ SXE_MFG_SXE_BOARD_ASSEMBLY_LEN = 32,
+ SXE_MFG_SXE_BOARD_TRACE_NUM_LEN = 16,
+ SXE_MFG_SXE_MAC_ADDR_CNT = 2,
+};
+
+typedef struct sxeMfgInfo {
+ U8 partNumber[SXE_MFG_PART_NUMBER_LEN];
+ U8 serialNumber[SXE_MFG_SERIAL_NUMBER_LEN];
+ U32 mfgDate;
+ U8 revision[SXE_MFG_REVISION_LEN];
+ U32 reworkDate;
+ U8 pad[4];
+ U64 macAddr[SXE_MFG_SXE_MAC_ADDR_CNT];
+ U8 boardTraceNum[SXE_MFG_SXE_BOARD_TRACE_NUM_LEN];
+ U8 boardAssembly[SXE_MFG_SXE_BOARD_ASSEMBLY_LEN];
+ U8 extra1[SXE_MFG_OEM_STR_LEN];
+ U8 extra2[SXE_MFG_OEM_STR_LEN];
+} sxeMfgInfo_t;
+
+typedef struct SxeLldpInfo {
+ U8 lldpState;
+ U8 pad[3];
+} SxeLldpInfo_t;
+
+typedef struct RegsDumpReq {
+ U32 baseAddr;
+ U32 dwLen;
+} RegsDumpReq_s;
+
+typedef enum sxe_pcs_mode {
+ SXE_PCS_MODE_1000BASE_KX_WO = 0,
+ SXE_PCS_MODE_1000BASE_KX_W,
+ SXE_PCS_MODE_SGMII,
+ SXE_PCS_MODE_10GBASE_KR_WO,
+ SXE_PCS_MODE_AUTO_NEGT_73,
+ SXE_PCS_MODE_LPBK_PHY_TX2RX,
+ SXE_PCS_MODE_LPBK_PHY_RX2TX,
+ SXE_PCS_MODE_LPBK_PCS_RX2TX,
+ SXE_PCS_MODE_BUTT,
+} sxe_pcs_mode_e;
+
+typedef enum sxe_remote_fault_mode {
+ SXE_REMOTE_FALUT_NO_ERROR = 0,
+ SXE_REMOTE_FALUT_OFFLINE,
+ SXE_REMOTE_FALUT_LINK_FAILURE,
+ SXE_REMOTE_FALUT_AUTO_NEGOTIATION,
+ SXE_REMOTE_UNKNOWN,
+} sxe_remote_fault_e;
+
+typedef struct sxe_phy_cfg {
+ sxe_pcs_mode_e mode;
+ U32 mtu;
+} sxe_pcs_cfg_s;
+
+typedef enum sxe_an_speed {
+ SXE_AN_SPEED_NO_LINK = 0,
+ SXE_AN_SPEED_100M,
+ SXE_AN_SPEED_1G,
+ SXE_AN_SPEED_10G,
+ SXE_AN_SPEED_UNKNOWN,
+} sxe_an_speed_e;
+
+typedef enum sxe_phy_pause_cap {
+ SXE_PAUSE_CAP_NO_PAUSE = 0,
+ SXE_PAUSE_CAP_ASYMMETRIC_PAUSE,
+ SXE_PAUSE_CAP_SYMMETRIC_PAUSE,
+ SXE_PAUSE_CAP_BOTH_PAUSE,
+ SXE_PAUSE_CAP_UNKNOWN,
+} sxe_phy_pause_cap_e;
+
+typedef enum sxe_phy_duplex_type {
+ SXE_FULL_DUPLEX = 0,
+ SXE_HALF_DUPLEX = 1,
+ SXE_UNKNOWN_DUPLEX,
+} sxe_phy_duplex_type_e;
+
+typedef struct sxe_phy_an_cap {
+ sxe_remote_fault_e remote_fault;
+ sxe_phy_pause_cap_e pause_cap;
+ sxe_phy_duplex_type_e duplex_cap;
+} sxe_phy_an_cap_s;
+
+typedef struct sxe_an_cap {
+ sxe_phy_an_cap_s local;
+ sxe_phy_an_cap_s peer;
+} sxe_an_cap_s;
+#endif
new file mode 100644
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_HDC_H__
+#define __SXE_HDC_H__
+
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+
+#define HDC_CACHE_TOTAL_LEN (16 *1024)
+#define ONE_PACKET_LEN_MAX (1024)
+#define DWORD_NUM (256)
+#define HDC_TRANS_RETRY_COUNT (3)
+
+
+typedef enum SxeHdcErrnoCode {
+ PKG_OK = 0,
+ PKG_ERR_REQ_LEN,
+ PKG_ERR_RESP_LEN,
+ PKG_ERR_PKG_SKIP,
+ PKG_ERR_NODATA,
+ PKG_ERR_PF_LK,
+ PKG_ERR_OTHER,
+} SxeHdcErrnoCode_e;
+
+typedef union HdcHeader {
+ struct {
+ U8 pid:4;
+ U8 errCode:4;
+ U8 len;
+ U16 startPkg:1;
+ U16 endPkg:1;
+ U16 isRd:1;
+ U16 msi:1;
+ U16 totalLen:12;
+ } head;
+ U32 dw0;
+} HdcHeader_u;
+
+#endif
+
new file mode 100644
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef _SXE_IOCTL_H_
+#define _SXE_IOCTL_H_
+
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+
+struct SxeIoctlSyncCmd {
+ U64 traceid;
+ void *inData;
+ U32 inLen;
+ void *outData;
+ U32 outLen;
+};
+
+#define SXE_CMD_IOCTL_SYNC_CMD _IOWR('M', 1, struct SxeIoctlSyncCmd)
+
+#endif
new file mode 100644
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_MSG_H__
+#define __SXE_MSG_H__
+
+#ifdef SXE_HOST_DRIVER
+#include "sxe_drv_type.h"
+#endif
+
+#define SXE_MAC_ADDR_LEN 6
+
+#define SXE_HDC_CMD_HDR_SIZE sizeof(struct sxe_hdc_cmd_hdr)
+#define SXE_HDC_MSG_HDR_SIZE sizeof(struct sxe_hdc_drv_cmd_msg)
+
+enum sxe_cmd_type {
+ SXE_CMD_TYPE_CLI,
+ SXE_CMD_TYPE_DRV,
+ SXE_CMD_TYPE_UNKNOWN,
+};
+
+typedef struct sxe_hdc_cmd_hdr {
+ U8 cmd_type;
+ U8 cmd_sub_type;
+ U8 reserve[6];
+} sxe_hdc_cmd_hdr_s;
+
+
+
+typedef enum SxeFWState {
+ SXE_FW_START_STATE_UNDEFINED = 0x00,
+ SXE_FW_START_STATE_INIT_BASE = 0x10,
+ SXE_FW_START_STATE_SCAN_DEVICE = 0x20,
+ SXE_FW_START_STATE_FINISHED = 0x30,
+ SXE_FW_START_STATE_UPGRADE = 0x31,
+ SXE_FW_RUNNING_STATE_ABNOMAL = 0x40,
+ SXE_FW_START_STATE_MASK = 0xF0,
+} SxeFWState_e;
+
+typedef struct SxeFWStateInfo {
+ U8 socStatus;
+ char statBuff[32];
+} SxeFWStateInfo_s;
+
+
+typedef enum MsiEvt {
+ MSI_EVT_SOC_STATUS = 0x1,
+ MSI_EVT_HDC_FWOV = 0x2,
+ MSI_EVT_HDC_TIME_SYNC = 0x4,
+
+ MSI_EVT_MAX = 0x80000000,
+} MsiEvt_u;
+
+
+typedef enum SxeFwHdcState {
+ SXE_FW_HDC_TRANSACTION_IDLE = 0x01,
+ SXE_FW_HDC_TRANSACTION_BUSY,
+
+ SXE_FW_HDC_TRANSACTION_ERR,
+} SxeFwHdcState_e;
+
+enum sxe_hdc_cmd_opcode {
+ SXE_CMD_SET_WOL = 1,
+ SXE_CMD_LED_CTRL,
+ SXE_CMD_SFP_READ,
+ SXE_CMD_SFP_WRITE,
+ SXE_CMD_TX_DIS_CTRL = 5,
+ SXE_CMD_TINE_SYNC,
+ SXE_CMD_RATE_SELECT,
+ SXE_CMD_R0_MAC_GET,
+ SXE_CMD_LOG_EXPORT,
+ SXE_CMD_FW_VER_GET = 10,
+ SXE_CMD_PCS_SDS_INIT,
+ SXE_CMD_AN_SPEED_GET,
+ SXE_CMD_AN_CAP_GET,
+ SXE_CMD_GET_SOC_INFO,
+ SXE_CMD_MNG_RST = 15,
+
+ SXE_CMD_MAX,
+};
+
+enum sxe_hdc_cmd_errcode {
+ SXE_ERR_INVALID_PARAM = 1,
+};
+
+typedef struct sxe_hdc_drv_cmd_msg {
+
+ U16 opcode;
+ U16 errcode;
+ union dataLength {
+ U16 req_len;
+ U16 ack_len;
+ } length;
+ U8 reserve[8];
+ U64 traceid;
+ U8 body[0];
+} sxe_hdc_drv_cmd_msg_s;
+
+
+typedef struct sxe_sfp_rw_req {
+ U16 offset;
+ U16 len;
+ U8 write_data[0];
+} sxe_sfp_rw_req_s;
+
+
+typedef struct sxe_sfp_read_resp {
+ U16 len;
+ U8 resp[0];
+} sxe_sfp_read_resp_s;
+
+typedef enum sxe_sfp_rate {
+ SXE_SFP_RATE_1G = 0,
+ SXE_SFP_RATE_10G = 1,
+} sxe_sfp_rate_e;
+
+
+typedef struct sxe_sfp_rate_able {
+ sxe_sfp_rate_e rate;
+} sxe_sfp_rate_able_s;
+
+
+typedef struct sxe_spp_tx_able {
+ BOOL isDisable;
+} sxe_spp_tx_able_s;
+
+
+typedef struct sxe_default_mac_addr_resp {
+ U8 addr[SXE_MAC_ADDR_LEN];
+} sxe_default_mac_addr_resp_s;
+
+
+typedef struct sxe_mng_rst {
+ BOOL enable;
+} sxe_mng_rst_s;
+
+#endif
+
new file mode 100644
@@ -0,0 +1,1276 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_REGS_H__
+#define __SXE_REGS_H__
+
+#define SXE_LINKSEC_MAX_SC_COUNT 1
+#define SXE_LINKSEC_MAX_SA_COUNT 2
+
+#define SXE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
+
+
+#define SXE_REG_READ_FAIL 0xffffffffU
+#define SXE_REG_READ_RETRY 5
+#ifdef SXE_TEST
+#define SXE_PCI_MASTER_DISABLE_TIMEOUT (1)
+#else
+#define SXE_PCI_MASTER_DISABLE_TIMEOUT (800)
+#endif
+
+
+#define SXE_CTRL 0x00000
+#define SXE_STATUS 0x00008
+#define SXE_CTRL_EXT 0x00018
+
+
+#define SXE_CTRL_LNK_RST 0x00000008
+#define SXE_CTRL_RST 0x04000000
+
+#ifdef SXE_TEST
+#define SXE_CTRL_RST_MASK (0)
+#define SXE_CTRL_GIO_DIS (0)
+#else
+#define SXE_CTRL_RST_MASK (SXE_CTRL_LNK_RST | SXE_CTRL_RST)
+#define SXE_CTRL_GIO_DIS 0x00000004
+#endif
+
+
+#define SXE_STATUS_GIO 0x00080000
+
+
+#define SXE_CTRL_EXT_PFRSTD 0x00004000
+#define SXE_CTRL_EXT_NS_DIS 0x00010000
+#define SXE_CTRL_EXT_DRV_LOAD 0x10000000
+
+
+#define SXE_FCRTL(_i) (0x03220 + ((_i) * 4))
+#define SXE_FCRTH(_i) (0x03260 + ((_i) * 4))
+#define SXE_FCCFG 0x03D00
+
+
+#define SXE_FCRTL_XONE 0x80000000
+#define SXE_FCRTH_FCEN 0x80000000
+
+#define SXE_FCCFG_TFCE_802_3X 0x00000008
+#define SXE_FCCFG_TFCE_PRIORITY 0x00000010
+
+
+#define SXE_GCR_EXT 0x11050
+
+
+#define SXE_GCR_CMPL_TMOUT_MASK 0x0000F000
+#define SXE_GCR_CMPL_TMOUT_10ms 0x00001000
+#define SXE_GCR_CMPL_TMOUT_RESEND 0x00010000
+#define SXE_GCR_CAP_VER2 0x00040000
+#define SXE_GCR_EXT_MSIX_EN 0x80000000
+#define SXE_GCR_EXT_BUFFERS_CLEAR 0x40000000
+#define SXE_GCR_EXT_VT_MODE_16 0x00000001
+#define SXE_GCR_EXT_VT_MODE_32 0x00000002
+#define SXE_GCR_EXT_VT_MODE_64 0x00000003
+#define SXE_GCR_EXT_VT_MODE_MASK 0x00000003
+#define SXE_GCR_EXT_SRIOV (SXE_GCR_EXT_MSIX_EN | \
+ SXE_GCR_EXT_VT_MODE_64)
+
+#define SXE_PCI_DEVICE_STATUS 0x7A
+#define SXE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
+#define SXE_PCI_LINK_STATUS 0x82
+#define SXE_PCI_DEVICE_CONTROL2 0x98
+#define SXE_PCI_LINK_WIDTH 0x3F0
+#define SXE_PCI_LINK_WIDTH_1 0x10
+#define SXE_PCI_LINK_WIDTH_2 0x20
+#define SXE_PCI_LINK_WIDTH_4 0x40
+#define SXE_PCI_LINK_WIDTH_8 0x80
+#define SXE_PCI_LINK_SPEED 0xF
+#define SXE_PCI_LINK_SPEED_2500 0x1
+#define SXE_PCI_LINK_SPEED_5000 0x2
+#define SXE_PCI_LINK_SPEED_8000 0x3
+#define SXE_PCI_HEADER_TYPE_REGISTER 0x0E
+#define SXE_PCI_HEADER_TYPE_MULTIFUNC 0x80
+#define SXE_PCI_DEVICE_CONTROL2_16ms 0x0005
+
+#define SXE_PCIDEVCTRL2_TIMEO_MASK 0xf
+#define SXE_PCIDEVCTRL2_16_32ms_def 0x0
+#define SXE_PCIDEVCTRL2_50_100us 0x1
+#define SXE_PCIDEVCTRL2_1_2ms 0x2
+#define SXE_PCIDEVCTRL2_16_32ms 0x5
+#define SXE_PCIDEVCTRL2_65_130ms 0x6
+#define SXE_PCIDEVCTRL2_260_520ms 0x9
+#define SXE_PCIDEVCTRL2_1_2s 0xa
+#define SXE_PCIDEVCTRL2_4_8s 0xd
+#define SXE_PCIDEVCTRL2_17_34s 0xe
+
+
+#define SXE_EICR 0x00800
+#define SXE_EICS 0x00808
+#define SXE_EIMS 0x00880
+#define SXE_EIMC 0x00888
+#define SXE_EIAC 0x00810
+#define SXE_EIAM 0x00890
+#define SXE_EITRSEL 0x00894
+#define SXE_GPIE 0x00898
+#define SXE_IVAR(i) (0x00900 + (i) * 4)
+#define SXE_IVAR_MISC 0x00A00
+#define SXE_EICS_EX(i) (0x00A90 + (i) * 4)
+#define SXE_EIMS_EX(i) (0x00AA0 + (i) * 4)
+#define SXE_EIMC_EX(i) (0x00AB0 + (i) * 4)
+#define SXE_EIAM_EX(i) (0x00AD0 + (i) * 4)
+#define SXE_EITR(i) (((i) <= 23) ? (0x00820 + ((i) * 4)) : \
+ (0x012300 + (((i) - 24) * 4)))
+
+#define SXE_SPP_PROC 0x00AD8
+#define SXE_SPP_STATE 0x00AF4
+
+
+
+#define SXE_EICR_RTX_QUEUE 0x0000FFFF
+#define SXE_EICR_FLOW_NAV 0x00010000
+#define SXE_EICR_MAILBOX 0x00080000
+#define SXE_EICR_LSC 0x00100000
+#define SXE_EICR_LINKSEC 0x00200000
+#define SXE_EICR_ECC 0x10000000
+#define SXE_EICR_HDC 0x20000000
+#define SXE_EICR_TCP_TIMER 0x40000000
+#define SXE_EICR_OTHER 0x80000000
+
+
+#define SXE_EICS_RTX_QUEUE SXE_EICR_RTX_QUEUE
+#define SXE_EICS_FLOW_NAV SXE_EICR_FLOW_NAV
+#define SXE_EICS_MAILBOX SXE_EICR_MAILBOX
+#define SXE_EICS_LSC SXE_EICR_LSC
+#define SXE_EICS_ECC SXE_EICR_ECC
+#define SXE_EICS_HDC SXE_EICR_HDC
+#define SXE_EICS_TCP_TIMER SXE_EICR_TCP_TIMER
+#define SXE_EICS_OTHER SXE_EICR_OTHER
+
+
+#define SXE_EIMS_RTX_QUEUE SXE_EICR_RTX_QUEUE
+#define SXE_EIMS_FLOW_NAV SXE_EICR_FLOW_NAV
+#define SXE_EIMS_MAILBOX SXE_EICR_MAILBOX
+#define SXE_EIMS_LSC SXE_EICR_LSC
+#define SXE_EIMS_ECC SXE_EICR_ECC
+#define SXE_EIMS_HDC SXE_EICR_HDC
+#define SXE_EIMS_TCP_TIMER SXE_EICR_TCP_TIMER
+#define SXE_EIMS_OTHER SXE_EICR_OTHER
+#define SXE_EIMS_ENABLE_MASK (SXE_EIMS_RTX_QUEUE | SXE_EIMS_LSC | \
+ SXE_EIMS_TCP_TIMER | SXE_EIMS_OTHER)
+
+#define SXE_EIMC_FLOW_NAV SXE_EICR_FLOW_NAV
+#define SXE_EIMC_LSC SXE_EICR_LSC
+#define SXE_EIMC_HDC SXE_EICR_HDC
+
+
+#define SXE_GPIE_SPP0_EN 0x00000001
+#define SXE_GPIE_SPP1_EN 0x00000002
+#define SXE_GPIE_SPP2_EN 0x00000004
+#define SXE_GPIE_MSIX_MODE 0x00000010
+#define SXE_GPIE_OCD 0x00000020
+#define SXE_GPIE_EIMEN 0x00000040
+#define SXE_GPIE_EIAME 0x40000000
+#define SXE_GPIE_PBA_SUPPORT 0x80000000
+#define SXE_GPIE_VTMODE_MASK 0x0000C000
+#define SXE_GPIE_VTMODE_16 0x00004000
+#define SXE_GPIE_VTMODE_32 0x00008000
+#define SXE_GPIE_VTMODE_64 0x0000C000
+
+
+#define SXE_IVAR_ALLOC_VALID 0x80
+
+
+#define SXE_EITR_CNT_WDIS 0x80000000
+#define SXE_EITR_ITR_MASK 0x00000FF8
+#define SXE_EITR_ITR_SHIFT 2
+#define SXE_EITR_ITR_MAX (SXE_EITR_ITR_MASK >> SXE_EITR_ITR_SHIFT)
+
+
+#define SXE_EICR_GPI_SPP0 0x01000000
+#define SXE_EICR_GPI_SPP1 0x02000000
+#define SXE_EICR_GPI_SPP2 0x04000000
+#define SXE_EIMS_GPI_SPP0 SXE_EICR_GPI_SPP0
+#define SXE_EIMS_GPI_SPP1 SXE_EICR_GPI_SPP1
+#define SXE_EIMS_GPI_SPP2 SXE_EICR_GPI_SPP2
+
+
+#define SXE_SPP_PROC_SPP2_TRIGGER 0x00300000
+#define SXE_SPP_PROC_SPP2_TRIGGER_MASK 0xFFCFFFFF
+#define SXE_SPP_PROC_DELAY_US_MASK 0x0000FFFF
+#define SXE_SPP_PROC_DELAY_US 0x00000007
+
+
+#define SXE_IRQ_CLEAR_MASK 0xFFFFFFFF
+
+
+#define SXE_RXCSUM 0x05000
+#define SXE_RFCTL 0x05008
+#define SXE_FCTRL 0x05080
+#define SXE_EXVET 0x05078
+#define SXE_VLNCTRL 0x05088
+#define SXE_MCSTCTRL 0x05090
+#define SXE_ETQF(_i) (0x05128 + ((_i) * 4))
+#define SXE_ETQS(_i) (0x0EC00 + ((_i) * 4))
+#define SXE_SYNQF 0x0EC30
+#define SXE_MTA(_i) (0x05200 + ((_i) * 4))
+#define SXE_UTA(_i) (0x0F400 + ((_i) * 4))
+#define SXE_VFTA(_i) (0x0A000 + ((_i) * 4))
+#define SXE_RAL(_i) (0x0A200 + ((_i) * 8))
+#define SXE_RAH(_i) (0x0A204 + ((_i) * 8))
+#define SXE_MPSAR_LOW(_i) (0x0A600 + ((_i) * 8))
+#define SXE_MPSAR_HIGH(_i) (0x0A604 + ((_i) * 8))
+#define SXE_PSRTYPE(_i) (0x0EA00 + ((_i) * 4))
+#define SXE_RETA(_i) (0x0EB00 + ((_i) * 4))
+#define SXE_RSSRK(_i) (0x0EB80 + ((_i) * 4))
+#define SXE_RQTC 0x0EC70
+#define SXE_MRQC 0x0EC80
+#define SXE_IEOI 0x0F654
+#define SXE_PL 0x0F658
+#define SXE_LPL 0x0F65C
+
+
+#define SXE_ETQF_CNT 8
+#define SXE_MTA_CNT 128
+#define SXE_UTA_CNT 128
+#define SXE_VFTA_CNT 128
+#define SXE_RAR_CNT 128
+#define SXE_MPSAR_CNT 128
+
+
+#define SXE_EXVET_DEFAULT 0x81000000
+#define SXE_VLNCTRL_DEFAULT 0x8100
+#define SXE_IEOI_DEFAULT 0x060005DC
+#define SXE_PL_DEFAULT 0x3e000016
+#define SXE_LPL_DEFAULT 0x26000000
+
+
+#define SXE_RXCSUM_IPPCSE 0x00001000
+#define SXE_RXCSUM_PCSD 0x00002000
+
+
+#define SXE_RFCTL_LRO_DIS 0x00000020
+#define SXE_RFCTL_NFSW_DIS 0x00000040
+#define SXE_RFCTL_NFSR_DIS 0x00000080
+
+
+#define SXE_FCTRL_SBP 0x00000002
+#define SXE_FCTRL_MPE 0x00000100
+#define SXE_FCTRL_UPE 0x00000200
+#define SXE_FCTRL_BAM 0x00000400
+#define SXE_FCTRL_PMCF 0x00001000
+#define SXE_FCTRL_DPF 0x00002000
+
+
+#define SXE_VLNCTRL_VET 0x0000FFFF
+#define SXE_VLNCTRL_CFI 0x10000000
+#define SXE_VLNCTRL_CFIEN 0x20000000
+#define SXE_VLNCTRL_VFE 0x40000000
+#define SXE_VLNCTRL_VME 0x80000000
+
+#define SXE_EXVET_VET_EXT_SHIFT 16
+#define SXE_EXTENDED_VLAN (1 << 26)
+
+
+#define SXE_MCSTCTRL_MFE 4
+
+#define SXE_ETQF_FILTER_EAPOL 0
+#define SXE_ETQF_FILTER_1588 3
+#define SXE_ETQF_FILTER_FIP 4
+#define SXE_ETQF_FILTER_LLDP 5
+#define SXE_ETQF_FILTER_LACP 6
+#define SXE_ETQF_FILTER_FC 7
+#define SXE_MAX_ETQF_FILTERS 8
+#define SXE_ETQF_1588 0x40000000
+#define SXE_ETQF_FILTER_EN 0x80000000
+#define SXE_ETQF_POOL_ENABLE BIT(26)
+#define SXE_ETQF_POOL_SHIFT 20
+
+
+#define SXE_ETQS_RX_QUEUE 0x007F0000
+#define SXE_ETQS_RX_QUEUE_SHIFT 16
+#define SXE_ETQS_LLI 0x20000000
+#define SXE_ETQS_QUEUE_EN 0x80000000
+
+
+#define SXE_SYN_FILTER_ENABLE 0x00000001
+#define SXE_SYN_FILTER_QUEUE 0x000000FE
+#define SXE_SYN_FILTER_QUEUE_SHIFT 1
+#define SXE_SYN_FILTER_SYNQFP 0x80000000
+
+
+#define SXE_RAH_VIND_MASK 0x003C0000
+#define SXE_RAH_VIND_SHIFT 18
+#define SXE_RAH_AV 0x80000000
+#define SXE_CLEAR_VMDQ_ALL 0xFFFFFFFF
+
+
+#define SXE_PSRTYPE_TCPHDR 0x00000010
+#define SXE_PSRTYPE_UDPHDR 0x00000020
+#define SXE_PSRTYPE_IPV4HDR 0x00000100
+#define SXE_PSRTYPE_IPV6HDR 0x00000200
+#define SXE_PSRTYPE_L2HDR 0x00001000
+
+
+#define SXE_MRQC_RSSEN 0x00000001
+#define SXE_MRQC_MRQE_MASK 0xF
+#define SXE_MRQC_RT8TCEN 0x00000002
+#define SXE_MRQC_RT4TCEN 0x00000003
+#define SXE_MRQC_RTRSS8TCEN 0x00000004
+#define SXE_MRQC_RTRSS4TCEN 0x00000005
+#define SXE_MRQC_VMDQEN 0x00000008
+#define SXE_MRQC_VMDQRSS32EN 0x0000000A
+#define SXE_MRQC_VMDQRSS64EN 0x0000000B
+#define SXE_MRQC_VMDQRT8TCEN 0x0000000C
+#define SXE_MRQC_VMDQRT4TCEN 0x0000000D
+#define SXE_MRQC_RSS_FIELD_MASK 0xFFFF0000
+#define SXE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define SXE_MRQC_RSS_FIELD_IPV4 0x00020000
+#define SXE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000
+#define SXE_MRQC_RSS_FIELD_IPV6_EX 0x00080000
+#define SXE_MRQC_RSS_FIELD_IPV6 0x00100000
+#define SXE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+#define SXE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+#define SXE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+#define SXE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
+
+
+#define SXE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
+ (0x0D000 + (((_i) - 64) * 0x40)))
+#define SXE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
+ (0x0D004 + (((_i) - 64) * 0x40)))
+#define SXE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
+ (0x0D008 + (((_i) - 64) * 0x40)))
+#define SXE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
+ (0x0D010 + (((_i) - 64) * 0x40)))
+#define SXE_SRRCTL(_i) (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
+ (0x0D014 + (((_i) - 64) * 0x40)))
+#define SXE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
+ (0x0D018 + (((_i) - 64) * 0x40)))
+#define SXE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
+ (0x0D028 + (((_i) - 64) * 0x40)))
+#define SXE_LROCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
+ (0x0D02C + (((_i) - 64) * 0x40)))
+#define SXE_RDRXCTL 0x02F00
+#define SXE_RXCTRL 0x03000
+#define SXE_LRODBU 0x03028
+#define SXE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
+
+#define SXE_DRXCFG (0x03C20)
+
+
+#define SXE_RXDCTL_CNT 128
+
+
+#define SXE_RXDCTL_DEFAULT 0x40210
+
+
+#define SXE_SRRCTL_DROP_EN 0x10000000
+#define SXE_SRRCTL_BSIZEPKT_SHIFT (10)
+#define SXE_SRRCTL_BSIZEHDRSIZE_SHIFT (2)
+#define SXE_SRRCTL_DESCTYPE_DATA_ONEBUF 0x02000000
+#define SXE_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define SXE_SRRCTL_BSIZEHDR_MASK 0x00003F00
+
+
+#define SXE_RXDCTL_ENABLE 0x02000000
+#define SXE_RXDCTL_SWFLSH 0x04000000
+#define SXE_RXDCTL_VME 0x40000000
+#define SXE_RXDCTL_DESC_FIFO_AE_TH_SHIFT 8
+#define SXE_RXDCTL_PREFETCH_NUM_CFG_SHIFT 16
+
+
+#define SXE_LROCTL_LROEN 0x01
+#define SXE_LROCTL_MAXDESC_1 0x00
+#define SXE_LROCTL_MAXDESC_4 0x04
+#define SXE_LROCTL_MAXDESC_8 0x08
+#define SXE_LROCTL_MAXDESC_16 0x0C
+
+
+#define SXE_RDRXCTL_RDMTS_1_2 0x00000000
+#define SXE_RDRXCTL_RDMTS_EN 0x00200000
+#define SXE_RDRXCTL_CRCSTRIP 0x00000002
+#define SXE_RDRXCTL_PSP 0x00000004
+#define SXE_RDRXCTL_MVMEN 0x00000020
+#define SXE_RDRXCTL_DMAIDONE 0x00000008
+#define SXE_RDRXCTL_AGGDIS 0x00010000
+#define SXE_RDRXCTL_LROFRSTSIZE 0x003E0000
+#define SXE_RDRXCTL_LROLLIDIS 0x00800000
+#define SXE_RDRXCTL_LROACKC 0x02000000
+#define SXE_RDRXCTL_FCOE_WRFIX 0x04000000
+#define SXE_RDRXCTL_MBINTEN 0x10000000
+#define SXE_RDRXCTL_MDP_EN 0x20000000
+#define SXE_RDRXCTL_MPBEN 0x00000010
+
+#define SXE_RDRXCTL_MCEN 0x00000040
+
+
+
+#define SXE_RXCTRL_RXEN 0x00000001
+
+
+#define SXE_LRODBU_LROACKDIS 0x00000080
+
+
+#define SXE_DRXCFG_GSP_ZERO 0x00000002
+#define SXE_DRXCFG_DBURX_START 0x00000001
+
+
+#define SXE_DMATXCTL 0x04A80
+#define SXE_TDBAL(_i) (0x06000 + ((_i) * 0x40))
+#define SXE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
+#define SXE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
+#define SXE_TDH(_i) (0x06010 + ((_i) * 0x40))
+#define SXE_TDT(_i) (0x06018 + ((_i) * 0x40))
+#define SXE_TXDCTL(_i) (0x06028 + ((_i) * 0x40))
+#define SXE_PVFTDWBAL(p) (0x06038 + (0x40 * (p)))
+#define SXE_PVFTDWBAH(p) (0x0603C + (0x40 * (p)))
+#define SXE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4))
+#define SXE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4))
+#define SXE_MTQC 0x08120
+#define SXE_TXPBFCS 0x0CE00
+#define SXE_DTXCFG 0x0CE08
+#define SXE_DTMPCNT 0x0CE98
+
+
+#define SXE_DMATXCTL_DEFAULT 0x81000000
+
+
+#define SXE_DMATXCTL_TE 0x1
+#define SXE_DMATXCTL_GDV 0x8
+#define SXE_DMATXCTL_VT_SHIFT 16
+#define SXE_DMATXCTL_VT_MASK 0xFFFF0000
+
+
+#define SXE_TXDCTL_HTHRESH_SHIFT 8
+#define SXE_TXDCTL_WTHRESH_SHIFT 16
+#define SXE_TXDCTL_ENABLE 0x02000000
+#define SXE_TXDCTL_SWFLSH 0x04000000
+
+#define SXE_PVFTDWBAL_N(ring_per_pool, vf_idx, vf_ring_idx) \
+ SXE_PVFTDWBAL((ring_per_pool) * (vf_idx) + vf_ring_idx)
+#define SXE_PVFTDWBAH_N(ring_per_pool, vf_idx, vf_ring_idx) \
+ SXE_PVFTDWBAH((ring_per_pool) * (vf_idx) + vf_ring_idx)
+
+
+#define SXE_MTQC_RT_ENA 0x1
+#define SXE_MTQC_VT_ENA 0x2
+#define SXE_MTQC_64Q_1PB 0x0
+#define SXE_MTQC_32VF 0x8
+#define SXE_MTQC_64VF 0x4
+#define SXE_MTQC_8TC_8TQ 0xC
+#define SXE_MTQC_4TC_4TQ 0x8
+
+
+#define SXE_TFCS_PB0_MASK 0x1
+#define SXE_TFCS_PB1_MASK 0x2
+#define SXE_TFCS_PB2_MASK 0x4
+#define SXE_TFCS_PB3_MASK 0x8
+#define SXE_TFCS_PB4_MASK 0x10
+#define SXE_TFCS_PB5_MASK 0x20
+#define SXE_TFCS_PB6_MASK 0x40
+#define SXE_TFCS_PB7_MASK 0x80
+#define SXE_TFCS_PB_MASK 0xff
+
+
+#define SXE_DTXCFG_DBUTX_START 0x00000001
+#define SXE_DTXCFG_DBUTX_BUF_ALFUL_CFG 0x20
+
+
+#define SXE_RTRPCS 0x02430
+#define SXE_RTRPT4C(_i) (0x02140 + ((_i) * 4))
+#define SXE_RTRUP2TC 0x03020
+#define SXE_RTTDCS 0x04900
+#define SXE_RTTDQSEL 0x04904
+#define SXE_RTTDT1C 0x04908
+#define SXE_RTTDT2C(_i) (0x04910 + ((_i) * 4))
+#define SXE_RTTBCNRM 0x04980
+#define SXE_RTTBCNRC 0x04984
+#define SXE_RTTUP2TC 0x0C800
+#define SXE_RTTPCS 0x0CD00
+#define SXE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4))
+
+
+#define SXE_RTRPCS_RRM 0x00000002
+#define SXE_RTRPCS_RAC 0x00000004
+#define SXE_RTRPCS_ARBDIS 0x00000040
+
+
+#define SXE_RTRPT4C_MCL_SHIFT 12
+#define SXE_RTRPT4C_BWG_SHIFT 9
+#define SXE_RTRPT4C_GSP 0x40000000
+#define SXE_RTRPT4C_LSP 0x80000000
+
+
+#define SXE_RTRUP2TC_UP_SHIFT 3
+#define SXE_RTRUP2TC_UP_MASK 7
+
+
+#define SXE_RTTDCS_ARBDIS 0x00000040
+#define SXE_RTTDCS_TDPAC 0x00000001
+
+#define SXE_RTTDCS_VMPAC 0x00000002
+
+#define SXE_RTTDCS_TDRM 0x00000010
+#define SXE_RTTDCS_ARBDIS 0x00000040
+#define SXE_RTTDCS_BDPM 0x00400000
+#define SXE_RTTDCS_BPBFSM 0x00800000
+
+#define SXE_RTTDCS_SPEED_CHG 0x80000000
+
+
+#define SXE_RTTDT2C_MCL_SHIFT 12
+#define SXE_RTTDT2C_BWG_SHIFT 9
+#define SXE_RTTDT2C_GSP 0x40000000
+#define SXE_RTTDT2C_LSP 0x80000000
+
+
+#define SXE_RTTBCNRC_RS_ENA 0x80000000
+#define SXE_RTTBCNRC_RF_DEC_MASK 0x00003FFF
+#define SXE_RTTBCNRC_RF_INT_SHIFT 14
+#define SXE_RTTBCNRC_RF_INT_MASK \
+ (SXE_RTTBCNRC_RF_DEC_MASK << SXE_RTTBCNRC_RF_INT_SHIFT)
+
+
+#define SXE_RTTUP2TC_UP_SHIFT 3
+
+
+#define SXE_RTTPCS_TPPAC 0x00000020
+
+#define SXE_RTTPCS_ARBDIS 0x00000040
+#define SXE_RTTPCS_TPRM 0x00000100
+#define SXE_RTTPCS_ARBD_SHIFT 22
+#define SXE_RTTPCS_ARBD_DCB 0x4
+
+
+#define SXE_RTTPT2C_MCL_SHIFT 12
+#define SXE_RTTPT2C_BWG_SHIFT 9
+#define SXE_RTTPT2C_GSP 0x40000000
+#define SXE_RTTPT2C_LSP 0x80000000
+
+
+#define SXE_TPH_CTRL 0x11074
+#define SXE_TPH_TXCTRL(_i) (0x0600C + ((_i) * 0x40))
+#define SXE_TPH_RXCTRL(_i) (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
+ (0x0D00C + (((_i) - 64) * 0x40)))
+
+
+#define SXE_TPH_CTRL_ENABLE 0x00000000
+#define SXE_TPH_CTRL_DISABLE 0x00000001
+#define SXE_TPH_CTRL_MODE_CB1 0x00
+#define SXE_TPH_CTRL_MODE_CB2 0x02
+
+
+#define SXE_TPH_RXCTRL_DESC_TPH_EN BIT(5)
+#define SXE_TPH_RXCTRL_HEAD_TPH_EN BIT(6)
+#define SXE_TPH_RXCTRL_DATA_TPH_EN BIT(7)
+#define SXE_TPH_RXCTRL_DESC_RRO_EN BIT(9)
+#define SXE_TPH_RXCTRL_DATA_WRO_EN BIT(13)
+#define SXE_TPH_RXCTRL_HEAD_WRO_EN BIT(15)
+#define SXE_TPH_RXCTRL_CPUID_SHIFT 24
+
+#define SXE_TPH_TXCTRL_DESC_TPH_EN BIT(5)
+#define SXE_TPH_TXCTRL_DESC_RRO_EN BIT(9)
+#define SXE_TPH_TXCTRL_DESC_WRO_EN BIT(11)
+#define SXE_TPH_TXCTRL_DATA_RRO_EN BIT(13)
+#define SXE_TPH_TXCTRL_CPUID_SHIFT 24
+
+
+#define SXE_SECTXCTRL 0x08800
+#define SXE_SECTXSTAT 0x08804
+#define SXE_SECTXBUFFAF 0x08808
+#define SXE_SECTXMINIFG 0x08810
+#define SXE_SECRXCTRL 0x08D00
+#define SXE_SECRXSTAT 0x08D04
+#define SXE_LSECTXCTRL 0x08A04
+#define SXE_LSECTXSCL 0x08A08
+#define SXE_LSECTXSCH 0x08A0C
+#define SXE_LSECTXSA 0x08A10
+#define SXE_LSECTXPN(_n) (0x08A14 + (4 * (_n)))
+#define SXE_LSECTXKEY(_n, _m) (0x08A1C + ((0x10 * (_n)) + (4 * (_m))))
+#define SXE_LSECRXCTRL 0x08B04
+#define SXE_LSECRXSCL 0x08B08
+#define SXE_LSECRXSCH 0x08B0C
+#define SXE_LSECRXSA(_i) (0x08B10 + (4 * (_i)))
+#define SXE_LSECRXPN(_i) (0x08B18 + (4 * (_i)))
+#define SXE_LSECRXKEY(_n, _m) (0x08B20 + ((0x10 * (_n)) + (4 * (_m))))
+
+
+#define SXE_SECTXCTRL_SECTX_DIS 0x00000001
+#define SXE_SECTXCTRL_TX_DIS 0x00000002
+#define SXE_SECTXCTRL_STORE_FORWARD 0x00000004
+
+
+#define SXE_SECTXSTAT_SECTX_RDY 0x00000001
+#define SXE_SECTXSTAT_SECTX_OFF_DIS 0x00000002
+#define SXE_SECTXSTAT_ECC_TXERR 0x00000004
+
+
+#define SXE_SECRXCTRL_SECRX_DIS 0x00000001
+#define SXE_SECRXCTRL_RX_DIS 0x00000002
+#define SXE_SECRXCTRL_RP 0x00000080
+
+
+#define SXE_SECRXSTAT_SECRX_RDY 0x00000001
+#define SXE_SECRXSTAT_SECRX_OFF_DIS 0x00000002
+#define SXE_SECRXSTAT_ECC_RXERR 0x00000004
+
+#define SXE_SECTX_DCB_ENABLE_MASK 0x00001F00
+
+#define SXE_LSECTXCTRL_EN_MASK 0x00000003
+#define SXE_LSECTXCTRL_EN_SHIFT 0
+#define SXE_LSECTXCTRL_ES 0x00000010
+#define SXE_LSECTXCTRL_AISCI 0x00000020
+#define SXE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00
+#define SXE_LSECTXCTRL_PNTHRSH_SHIFT 8
+#define SXE_LSECTXCTRL_RSV_MASK 0x000000D8
+
+#define SXE_LSECRXCTRL_EN_MASK 0x0000000C
+#define SXE_LSECRXCTRL_EN_SHIFT 2
+#define SXE_LSECRXCTRL_DROP_EN 0x00000010
+#define SXE_LSECRXCTRL_DROP_EN_SHIFT 4
+#define SXE_LSECRXCTRL_PLSH 0x00000040
+#define SXE_LSECRXCTRL_PLSH_SHIFT 6
+#define SXE_LSECRXCTRL_RP 0x00000080
+#define SXE_LSECRXCTRL_RP_SHIFT 7
+#define SXE_LSECRXCTRL_RSV_MASK 0xFFFFFF33
+
+#define SXE_LSECTXSA_AN0_MASK 0x00000003
+#define SXE_LSECTXSA_AN0_SHIFT 0
+#define SXE_LSECTXSA_AN1_MASK 0x0000000C
+#define SXE_LSECTXSA_AN1_SHIFT 2
+#define SXE_LSECTXSA_SELSA 0x00000010
+#define SXE_LSECTXSA_SELSA_SHIFT 4
+#define SXE_LSECTXSA_ACTSA 0x00000020
+
+#define SXE_LSECRXSA_AN_MASK 0x00000003
+#define SXE_LSECRXSA_AN_SHIFT 0
+#define SXE_LSECRXSA_SAV 0x00000004
+#define SXE_LSECRXSA_SAV_SHIFT 2
+#define SXE_LSECRXSA_RETIRED 0x00000010
+#define SXE_LSECRXSA_RETIRED_SHIFT 4
+
+#define SXE_LSECRXSCH_PI_MASK 0xFFFF0000
+#define SXE_LSECRXSCH_PI_SHIFT 16
+
+#define SXE_LSECTXCTRL_DISABLE 0x0
+#define SXE_LSECTXCTRL_AUTH 0x1
+#define SXE_LSECTXCTRL_AUTH_ENCRYPT 0x2
+
+#define SXE_LSECRXCTRL_DISABLE 0x0
+#define SXE_LSECRXCTRL_CHECK 0x1
+#define SXE_LSECRXCTRL_STRICT 0x2
+#define SXE_LSECRXCTRL_DROP 0x3
+#define SXE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4
+
+
+
+#define SXE_IPSTXIDX 0x08900
+#define SXE_IPSTXSALT 0x08904
+#define SXE_IPSTXKEY(_i) (0x08908 + (4 * (_i)))
+#define SXE_IPSRXIDX 0x08E00
+#define SXE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i)))
+#define SXE_IPSRXSPI 0x08E14
+#define SXE_IPSRXIPIDX 0x08E18
+#define SXE_IPSRXKEY(_i) (0x08E1C + (4 * (_i)))
+#define SXE_IPSRXSALT 0x08E2C
+#define SXE_IPSRXMOD 0x08E30
+
+
+
+#define SXE_FNAVCTRL 0x0EE00
+#define SXE_FNAVHKEY 0x0EE68
+#define SXE_FNAVSKEY 0x0EE6C
+#define SXE_FNAVDIP4M 0x0EE3C
+#define SXE_FNAVSIP4M 0x0EE40
+#define SXE_FNAVTCPM 0x0EE44
+#define SXE_FNAVUDPM 0x0EE48
+#define SXE_FNAVIP6M 0x0EE74
+#define SXE_FNAVM 0x0EE70
+
+#define SXE_FNAVFREE 0x0EE38
+#define SXE_FNAVLEN 0x0EE4C
+#define SXE_FNAVUSTAT 0x0EE50
+#define SXE_FNAVFSTAT 0x0EE54
+#define SXE_FNAVMATCH 0x0EE58
+#define SXE_FNAVMISS 0x0EE5C
+
+#define SXE_FNAVSIPv6(_i) (0x0EE0C + ((_i) * 4))
+#define SXE_FNAVIPSA 0x0EE18
+#define SXE_FNAVIPDA 0x0EE1C
+#define SXE_FNAVPORT 0x0EE20
+#define SXE_FNAVVLAN 0x0EE24
+#define SXE_FNAVHASH 0x0EE28
+#define SXE_FNAVCMD 0x0EE2C
+
+
+#define SXE_FNAVCTRL_FLEX_SHIFT 16
+#define SXE_FNAVCTRL_MAX_LENGTH_SHIFT 24
+#define SXE_FNAVCTRL_FULL_THRESH_SHIFT 28
+#define SXE_FNAVCTRL_DROP_Q_SHIFT 8
+#define SXE_FNAVCTRL_PBALLOC_64K 0x00000001
+#define SXE_FNAVCTRL_PBALLOC_128K 0x00000002
+#define SXE_FNAVCTRL_PBALLOC_256K 0x00000003
+#define SXE_FNAVCTRL_INIT_DONE 0x00000008
+#define SXE_FNAVCTRL_SPECIFIC_MATCH 0x00000010
+#define SXE_FNAVCTRL_REPORT_STATUS 0x00000020
+#define SXE_FNAVCTRL_REPORT_STATUS_ALWAYS 0x00000080
+
+#define SXE_FNAVCTRL_FLEX_MASK (0x1F << SXE_FNAVCTRL_FLEX_SHIFT)
+
+#define SXE_FNAVTCPM_DPORTM_SHIFT 16
+
+#define SXE_FNAVM_VLANID 0x00000001
+#define SXE_FNAVM_VLANP 0x00000002
+#define SXE_FNAVM_POOL 0x00000004
+#define SXE_FNAVM_L4P 0x00000008
+#define SXE_FNAVM_FLEX 0x00000010
+#define SXE_FNAVM_DIPv6 0x00000020
+
+#define SXE_FNAVPORT_DESTINATION_SHIFT 16
+#define SXE_FNAVVLAN_FLEX_SHIFT 16
+#define SXE_FNAVHASH_SIG_SW_INDEX_SHIFT 16
+
+#define SXE_FNAVCMD_CMD_MASK 0x00000003
+#define SXE_FNAVCMD_CMD_ADD_FLOW 0x00000001
+#define SXE_FNAVCMD_CMD_REMOVE_FLOW 0x00000002
+#define SXE_FNAVCMD_CMD_QUERY_REM_FILT 0x00000003
+#define SXE_FNAVCMD_FILTER_VALID 0x00000004
+#define SXE_FNAVCMD_FILTER_UPDATE 0x00000008
+#define SXE_FNAVCMD_IPv6DMATCH 0x00000010
+#define SXE_FNAVCMD_L4TYPE_UDP 0x00000020
+#define SXE_FNAVCMD_L4TYPE_TCP 0x00000040
+#define SXE_FNAVCMD_L4TYPE_SCTP 0x00000060
+#define SXE_FNAVCMD_IPV6 0x00000080
+#define SXE_FNAVCMD_CLEARHT 0x00000100
+#define SXE_FNAVCMD_DROP 0x00000200
+#define SXE_FNAVCMD_INT 0x00000400
+#define SXE_FNAVCMD_LAST 0x00000800
+#define SXE_FNAVCMD_COLLISION 0x00001000
+#define SXE_FNAVCMD_QUEUE_EN 0x00008000
+#define SXE_FNAVCMD_FLOW_TYPE_SHIFT 5
+#define SXE_FNAVCMD_RX_QUEUE_SHIFT 16
+#define SXE_FNAVCMD_RX_TUNNEL_FILTER_SHIFT 23
+#define SXE_FNAVCMD_VT_POOL_SHIFT 24
+#define SXE_FNAVCMD_CMD_POLL 10
+#define SXE_FNAVCMD_TUNNEL_FILTER 0x00800000
+
+
+#define SXE_LXOFFRXCNT 0x041A8
+#define SXE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4))
+
+#define SXE_EPC_GPRC 0x050E0
+#define SXE_RXDGPC 0x02F50
+#define SXE_RXDGBCL 0x02F54
+#define SXE_RXDGBCH 0x02F58
+#define SXE_RXDDGPC 0x02F5C
+#define SXE_RXDDGBCL 0x02F60
+#define SXE_RXDDGBCH 0x02F64
+#define SXE_RXLPBKGPC 0x02F68
+#define SXE_RXLPBKGBCL 0x02F6C
+#define SXE_RXLPBKGBCH 0x02F70
+#define SXE_RXDLPBKGPC 0x02F74
+#define SXE_RXDLPBKGBCL 0x02F78
+#define SXE_RXDLPBKGBCH 0x02F7C
+
+#define SXE_RXTPCIN 0x02F88
+#define SXE_RXTPCOUT 0x02F8C
+#define SXE_RXPRDDC 0x02F9C
+
+#define SXE_TXDGPC 0x087A0
+#define SXE_TXDGBCL 0x087A4
+#define SXE_TXDGBCH 0x087A8
+#define SXE_TXSWERR 0x087B0
+#define SXE_TXSWITCH 0x087B4
+#define SXE_TXREPEAT 0x087B8
+#define SXE_TXDESCERR 0x087BC
+#define SXE_MNGPRC 0x040B4
+#define SXE_MNGPDC 0x040B8
+#define SXE_RQSMR(_i) (0x02300 + ((_i) * 4))
+#define SXE_TQSM(_i) (0x08600 + ((_i) * 4))
+#define SXE_QPRC(_i) (0x01030 + ((_i) * 0x40))
+#define SXE_QBRC_L(_i) (0x01034 + ((_i) * 0x40))
+#define SXE_QBRC_H(_i) (0x01038 + ((_i) * 0x40))
+
+
+#define SXE_QPRDC(_i) (0x01430 + ((_i) * 0x40))
+#define SXE_QPTC(_i) (0x08680 + ((_i) * 0x4))
+#define SXE_QBTC_L(_i) (0x08700 + ((_i) * 0x8))
+#define SXE_QBTC_H(_i) (0x08704 + ((_i) * 0x8))
+#define SXE_SSVPC 0x08780
+#define SXE_MNGPTC 0x0CF90
+#define SXE_MPC(_i) (0x03FA0 + ((_i) * 4))
+
+#define SXE_DBUDRTCICNT(_i) (0x03C6C + ((_i) * 4))
+#define SXE_DBUDRTCOCNT(_i) (0x03C8C + ((_i) * 4))
+#define SXE_DBUDRBDPCNT(_i) (0x03D20 + ((_i) * 4))
+#define SXE_DBUDREECNT(_i) (0x03D40 + ((_i) * 4))
+#define SXE_DBUDROFPCNT(_i) (0x03D60 + ((_i) * 4))
+#define SXE_DBUDTTCICNT(_i) (0x0CE54 + ((_i) * 4))
+#define SXE_DBUDTTCOCNT(_i) (0x0CE74 + ((_i) * 4))
+
+
+
+#define SXE_WUC 0x05800
+#define SXE_WUFC 0x05808
+#define SXE_WUS 0x05810
+#define SXE_IP6AT(_i) (0x05880 + ((_i) * 4))
+
+
+#define SXE_IP6AT_CNT 4
+
+
+#define SXE_WUC_PME_EN 0x00000002
+#define SXE_WUC_PME_STATUS 0x00000004
+#define SXE_WUC_WKEN 0x00000010
+#define SXE_WUC_APME 0x00000020
+
+
+#define SXE_WUFC_LNKC 0x00000001
+#define SXE_WUFC_MAG 0x00000002
+#define SXE_WUFC_EX 0x00000004
+#define SXE_WUFC_MC 0x00000008
+#define SXE_WUFC_BC 0x00000010
+#define SXE_WUFC_ARP 0x00000020
+#define SXE_WUFC_IPV4 0x00000040
+#define SXE_WUFC_IPV6 0x00000080
+#define SXE_WUFC_MNG 0x00000100
+
+
+
+
+#define SXE_TSCTRL 0x14800
+#define SXE_TSES 0x14804
+#define SXE_TSYNCTXCTL 0x14810
+#define SXE_TSYNCRXCTL 0x14820
+#define SXE_RXSTMPL 0x14824
+#define SXE_RXSTMPH 0x14828
+#define SXE_SYSTIML 0x14840
+#define SXE_SYSTIMM 0x14844
+#define SXE_SYSTIMH 0x14848
+#define SXE_TIMADJL 0x14850
+#define SXE_TIMADJH 0x14854
+#define SXE_TIMINC 0x14860
+
+
+#define SXE_TSYNCTXCTL_TXTT 0x0001
+#define SXE_TSYNCTXCTL_TEN 0x0010
+
+
+#define SXE_TSYNCRXCTL_RXTT 0x0001
+#define SXE_TSYNCRXCTL_REN 0x0010
+
+
+#define SXE_TSCTRL_TSSEL 0x00001
+#define SXE_TSCTRL_TSEN 0x00002
+#define SXE_TSCTRL_VER_2 0x00010
+#define SXE_TSCTRL_ONESTEP 0x00100
+#define SXE_TSCTRL_CSEN 0x01000
+#define SXE_TSCTRL_PTYP_ALL 0x00C00
+#define SXE_TSCTRL_L4_UNICAST 0x08000
+
+
+#define SXE_TSES_TXES 0x00200
+#define SXE_TSES_RXES 0x00800
+#define SXE_TSES_TXES_V1_SYNC 0x00000
+#define SXE_TSES_TXES_V1_DELAY_REQ 0x00100
+#define SXE_TSES_TXES_V1_ALL 0x00200
+#define SXE_TSES_RXES_V1_SYNC 0x00000
+#define SXE_TSES_RXES_V1_DELAY_REQ 0x00400
+#define SXE_TSES_RXES_V1_ALL 0x00800
+#define SXE_TSES_TXES_V2_ALL 0x00200
+#define SXE_TSES_RXES_V2_ALL 0x00800
+
+#define SXE_IV_SNS 0
+#define SXE_IV_NS 8
+#define SXE_INCPD 0
+#define SXE_BASE_INCVAL 8
+
+
+#define SXE_VT_CTL 0x051B0
+#define SXE_PFMAILBOX(_i) (0x04B00 + (4 * (_i)))
+
+#define SXE_PFMBICR(_i) (0x00710 + (4 * (_i)))
+#define SXE_VFLRE(i) ((i & 1) ? 0x001C0 : 0x00600)
+#define SXE_VFLREC(i) (0x00700 + (i * 4))
+#define SXE_VFRE(_i) (0x051E0 + ((_i) * 4))
+#define SXE_VFTE(_i) (0x08110 + ((_i) * 4))
+#define SXE_QDE (0x02F04)
+#define SXE_SPOOF(_i) (0x08200 + (_i) * 4)
+#define SXE_PFDTXGSWC 0x08220
+#define SXE_VMVIR(_i) (0x08000 + ((_i) * 4))
+#define SXE_VMOLR(_i) (0x0F000 + ((_i) * 4))
+#define SXE_VLVF(_i) (0x0F100 + ((_i) * 4))
+#define SXE_VLVFB(_i) (0x0F200 + ((_i) * 4))
+#define SXE_MRCTL(_i) (0x0F600 + ((_i) * 4))
+#define SXE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
+#define SXE_VMRVM(_i) (0x0F630 + ((_i) * 4))
+#define SXE_VMECM(_i) (0x08790 + ((_i) * 4))
+#define SXE_PFMBMEM(_i) (0x13000 + (64 * (_i)))
+
+
+#define SXE_VMOLR_CNT 64
+#define SXE_VLVF_CNT 64
+#define SXE_VLVFB_CNT 128
+#define SXE_MRCTL_CNT 4
+#define SXE_VMRVLAN_CNT 8
+#define SXE_VMRVM_CNT 8
+#define SXE_SPOOF_CNT 8
+#define SXE_VMVIR_CNT 64
+#define SXE_VFRE_CNT 2
+
+
+#define SXE_VMVIR_VLANA_MASK 0xC0000000
+#define SXE_VMVIR_VLAN_VID_MASK 0x00000FFF
+#define SXE_VMVIR_VLAN_UP_MASK 0x0000E000
+
+
+#define SXE_MRCTL_VPME 0x01
+
+#define SXE_MRCTL_UPME 0x02
+
+#define SXE_MRCTL_DPME 0x04
+
+#define SXE_MRCTL_VLME 0x08
+
+
+#define SXE_VT_CTL_DIS_DEFPL 0x20000000
+#define SXE_VT_CTL_REPLEN 0x40000000
+#define SXE_VT_CTL_VT_ENABLE 0x00000001
+#define SXE_VT_CTL_POOL_SHIFT 7
+#define SXE_VT_CTL_POOL_MASK (0x3F << SXE_VT_CTL_POOL_SHIFT)
+
+
+#define SXE_PFMAILBOX_STS 0x00000001
+#define SXE_PFMAILBOX_ACK 0x00000002
+#define SXE_PFMAILBOX_VFU 0x00000004
+#define SXE_PFMAILBOX_PFU 0x00000008
+#define SXE_PFMAILBOX_RVFU 0x00000010
+
+
+#define SXE_PFMBICR_VFREQ 0x00000001
+#define SXE_PFMBICR_VFACK 0x00010000
+#define SXE_PFMBICR_VFREQ_MASK 0x0000FFFF
+#define SXE_PFMBICR_VFACK_MASK 0xFFFF0000
+
+
+#define SXE_QDE_ENABLE (0x00000001)
+#define SXE_QDE_HIDE_VLAN (0x00000002)
+#define SXE_QDE_IDX_MASK (0x00007F00)
+#define SXE_QDE_IDX_SHIFT (8)
+#define SXE_QDE_WRITE (0x00010000)
+
+
+
+#define SXE_SPOOF_VLAN_SHIFT (8)
+
+
+#define SXE_PFDTXGSWC_VT_LBEN 0x1
+
+
+#define SXE_VMVIR_VLANA_DEFAULT 0x40000000
+#define SXE_VMVIR_VLANA_NEVER 0x80000000
+
+
+#define SXE_VMOLR_UPE 0x00400000
+#define SXE_VMOLR_VPE 0x00800000
+#define SXE_VMOLR_AUPE 0x01000000
+#define SXE_VMOLR_ROMPE 0x02000000
+#define SXE_VMOLR_ROPE 0x04000000
+#define SXE_VMOLR_BAM 0x08000000
+#define SXE_VMOLR_MPE 0x10000000
+
+
+#define SXE_VLVF_VIEN 0x80000000
+#define SXE_VLVF_ENTRIES 64
+#define SXE_VLVF_VLANID_MASK 0x00000FFF
+
+
+#define SXE_HDC_HOST_BASE 0x16000
+#define SXE_HDC_SW_LK (SXE_HDC_HOST_BASE + 0x00)
+#define SXE_HDC_PF_LK (SXE_HDC_HOST_BASE + 0x04)
+#define SXE_HDC_SW_OV (SXE_HDC_HOST_BASE + 0x08)
+#define SXE_HDC_FW_OV (SXE_HDC_HOST_BASE + 0x0C)
+#define SXE_HDC_PACKET_HEAD0 (SXE_HDC_HOST_BASE + 0x10)
+
+#define SXE_HDC_PACKET_DATA0 (SXE_HDC_HOST_BASE + 0x20)
+
+
+#define SXE_HDC_MSI_STATUS_REG 0x17000
+#define SXE_FW_STATUS_REG 0x17004
+#define SXE_DRV_STATUS_REG 0x17008
+#define SXE_FW_HDC_STATE_REG 0x1700C
+#define SXE_R0_MAC_ADDR_RAL 0x17010
+#define SXE_R0_MAC_ADDR_RAH 0x17014
+#define SXE_CRC_STRIP_REG 0x17018
+
+
+#define SXE_HDC_SW_LK_BIT 0x0001
+#define SXE_HDC_PF_LK_BIT 0x0003
+#define SXE_HDC_SW_OV_BIT 0x0001
+#define SXE_HDC_FW_OV_BIT 0x0001
+#define SXE_HDC_RELEASE_SW_LK 0x0000
+
+#define SXE_HDC_LEN_TO_REG(n) (n - 1)
+#define SXE_HDC_LEN_FROM_REG(n) (n + 1)
+
+
+#define SXE_RX_PKT_BUF_SIZE_SHIFT 10
+#define SXE_TX_PKT_BUF_SIZE_SHIFT 10
+
+#define SXE_RXIDX_TBL_SHIFT 1
+#define SXE_RXTXIDX_IPS_EN 0x00000001
+#define SXE_RXTXIDX_IDX_SHIFT 3
+#define SXE_RXTXIDX_READ 0x40000000
+#define SXE_RXTXIDX_WRITE 0x80000000
+
+
+#define SXE_KEEP_CRC_EN 0x00000001
+
+
+#define SXE_VMD_CTL 0x0581C
+
+
+#define SXE_VMD_CTL_POOL_EN 0x00000001
+#define SXE_VMD_CTL_POOL_FILTER 0x00000002
+
+
+#define SXE_FLCTRL 0x14300
+#define SXE_PFCTOP 0x14304
+#define SXE_FCTTV0 0x14310
+#define SXE_FCTTV(_i) (SXE_FCTTV0 + ((_i) * 4))
+#define SXE_FCRTV 0x14320
+#define SXE_TFCS 0x14324
+
+
+#define SXE_FCTRL_TFCE_MASK 0x0018
+#define SXE_FCTRL_TFCE_LFC_EN 0x0008
+#define SXE_FCTRL_TFCE_PFC_EN 0x0010
+#define SXE_FCTRL_TFCE_DPF_EN 0x0020
+#define SXE_FCTRL_RFCE_MASK 0x0300
+#define SXE_FCTRL_RFCE_LFC_EN 0x0100
+#define SXE_FCTRL_RFCE_PFC_EN 0x0200
+
+#define SXE_FCTRL_TFCE_FCEN_MASK 0x00FF0000
+#define SXE_FCTRL_TFCE_XONE_MASK 0xFF000000
+
+
+#define SXE_PFCTOP_FCT 0x8808
+#define SXE_PFCTOP_FCOP_MASK 0xFFFF0000
+#define SXE_PFCTOP_FCOP_PFC 0x01010000
+#define SXE_PFCTOP_FCOP_LFC 0x00010000
+
+
+#define SXE_COMCTRL 0x14400
+#define SXE_PCCTRL 0x14404
+#define SXE_LPBKCTRL 0x1440C
+#define SXE_MAXFS 0x14410
+#define SXE_SACONH 0x14420
+#define SXE_SACONL 0x14424
+#define SXE_VLANCTRL 0x14430
+#define SXE_VLANID 0x14434
+#define SXE_LINKS 0x14454
+#define SXE_FPGA_SDS_STS 0x14704
+#define SXE_MSCA 0x14500
+#define SXE_MSCD 0x14504
+
+#define SXE_HLREG0 0x04240
+#define SXE_MFLCN 0x04294
+#define SXE_MACC 0x04330
+
+#define SXE_PCS1GLSTA 0x0420C
+#define SXE_MFLCN 0x04294
+#define SXE_PCS1GANA 0x04850
+#define SXE_PCS1GANLP 0x04854
+
+
+#define SXE_LPBKCTRL_EN 0x00000001
+
+
+#define SXE_MAC_ADDR_SACONH_SHIFT 32
+#define SXE_MAC_ADDR_SACONL_MASK 0xFFFFFFFF
+
+
+#define SXE_PCS1GLSTA_AN_COMPLETE 0x10000
+#define SXE_PCS1GLSTA_AN_PAGE_RX 0x20000
+#define SXE_PCS1GLSTA_AN_TIMED_OUT 0x40000
+#define SXE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000
+#define SXE_PCS1GLSTA_AN_ERROR_RWS 0x100000
+
+#define SXE_PCS1GANA_SYM_PAUSE 0x100
+#define SXE_PCS1GANA_ASM_PAUSE 0x80
+
+
+#define SXE_LKSTS_PCS_LKSTS_UP 0x00000001
+#define SXE_LINK_UP_TIME 90
+#define SXE_AUTO_NEG_TIME 45
+
+
+#define SXE_MSCA_NP_ADDR_MASK 0x0000FFFF
+#define SXE_MSCA_NP_ADDR_SHIFT 0
+#define SXE_MSCA_DEV_TYPE_MASK 0x001F0000
+#define SXE_MSCA_DEV_TYPE_SHIFT 16
+#define SXE_MSCA_PHY_ADDR_MASK 0x03E00000
+#define SXE_MSCA_PHY_ADDR_SHIFT 21
+#define SXE_MSCA_OP_CODE_MASK 0x0C000000
+#define SXE_MSCA_OP_CODE_SHIFT 26
+#define SXE_MSCA_ADDR_CYCLE 0x00000000
+#define SXE_MSCA_WRITE 0x04000000
+#define SXE_MSCA_READ 0x0C000000
+#define SXE_MSCA_READ_AUTOINC 0x08000000
+#define SXE_MSCA_ST_CODE_MASK 0x30000000
+#define SXE_MSCA_ST_CODE_SHIFT 28
+#define SXE_MSCA_NEW_PROTOCOL 0x00000000
+#define SXE_MSCA_OLD_PROTOCOL 0x10000000
+#define SXE_MSCA_BYPASSRA_C45 0x40000000
+#define SXE_MSCA_MDI_CMD_ON_PROG 0x80000000
+
+
+#define MDIO_MSCD_RDATA_LEN 16
+#define MDIO_MSCD_RDATA_SHIFT 16
+
+
+#define SXE_CRCERRS 0x14A04
+#define SXE_ERRBC 0x14A10
+#define SXE_RLEC 0x14A14
+#define SXE_PRC64 0x14A18
+#define SXE_PRC127 0x14A1C
+#define SXE_PRC255 0x14A20
+#define SXE_PRC511 0x14A24
+#define SXE_PRC1023 0x14A28
+#define SXE_PRC1522 0x14A2C
+#define SXE_BPRC 0x14A30
+#define SXE_MPRC 0x14A34
+#define SXE_GPRC 0x14A38
+#define SXE_GORCL 0x14A3C
+#define SXE_GORCH 0x14A40
+#define SXE_RUC 0x14A44
+#define SXE_RFC 0x14A48
+#define SXE_ROC 0x14A4C
+#define SXE_RJC 0x14A50
+#define SXE_TORL 0x14A54
+#define SXE_TORH 0x14A58
+#define SXE_TPR 0x14A5C
+#define SXE_PRCPF(_i) (0x14A60 + ((_i) * 4))
+#define SXE_GPTC 0x14B00
+#define SXE_GOTCL 0x14B04
+#define SXE_GOTCH 0x14B08
+#define SXE_TPT 0x14B0C
+#define SXE_PTC64 0x14B10
+#define SXE_PTC127 0x14B14
+#define SXE_PTC255 0x14B18
+#define SXE_PTC511 0x14B1C
+#define SXE_PTC1023 0x14B20
+#define SXE_PTC1522 0x14B24
+#define SXE_MPTC 0x14B28
+#define SXE_BPTC 0x14B2C
+#define SXE_PFCT(_i) (0x14B30 + ((_i) * 4))
+
+#define SXE_MACCFG 0x0CE04
+#define SXE_MACCFG_PAD_EN 0x00000001
+
+
+#define SXE_COMCTRL_TXEN 0x0001
+#define SXE_COMCTRL_RXEN 0x0002
+#define SXE_COMCTRL_EDSEL 0x0004
+#define SXE_COMCTRL_SPEED_1G 0x0200
+#define SXE_COMCTRL_SPEED_10G 0x0300
+
+
+#define SXE_PCCTRL_TXCE 0x0001
+#define SXE_PCCTRL_RXCE 0x0002
+#define SXE_PCCTRL_PEN 0x0100
+#define SXE_PCCTRL_PCSC_ALL 0x30000
+
+
+#define SXE_MAXFS_TFSEL 0x0001
+#define SXE_MAXFS_RFSEL 0x0002
+#define SXE_MAXFS_MFS_MASK 0xFFFF0000
+#define SXE_MAXFS_MFS 0x40000000
+#define SXE_MAXFS_MFS_SHIFT 16
+
+
+#define SXE_LINKS_UP 0x00000001
+
+#define SXE_10G_LINKS_DOWN 0x00000006
+
+
+#define SXE_LINK_SPEED_UNKNOWN 0
+#define SXE_LINK_SPEED_10_FULL 0x0002
+#define SXE_LINK_SPEED_100_FULL 0x0008
+#define SXE_LINK_SPEED_1GB_FULL 0x0020
+#define SXE_LINK_SPEED_10GB_FULL 0x0080
+
+
+#define SXE_HLREG0_TXCRCEN 0x00000001
+#define SXE_HLREG0_RXCRCSTRP 0x00000002
+#define SXE_HLREG0_JUMBOEN 0x00000004
+#define SXE_HLREG0_TXPADEN 0x00000400
+#define SXE_HLREG0_TXPAUSEEN 0x00001000
+#define SXE_HLREG0_RXPAUSEEN 0x00004000
+#define SXE_HLREG0_LPBK 0x00008000
+#define SXE_HLREG0_MDCSPD 0x00010000
+#define SXE_HLREG0_CONTMDC 0x00020000
+#define SXE_HLREG0_CTRLFLTR 0x00040000
+#define SXE_HLREG0_PREPEND 0x00F00000
+#define SXE_HLREG0_PRIPAUSEEN 0x01000000
+#define SXE_HLREG0_RXPAUSERECDA 0x06000000
+#define SXE_HLREG0_RXLNGTHERREN 0x08000000
+#define SXE_HLREG0_RXPADSTRIPEN 0x10000000
+
+#define SXE_MFLCN_PMCF 0x00000001
+#define SXE_MFLCN_DPF 0x00000002
+#define SXE_MFLCN_RPFCE 0x00000004
+#define SXE_MFLCN_RFCE 0x00000008
+#define SXE_MFLCN_RPFCE_MASK 0x00000FF4
+#define SXE_MFLCN_RPFCE_SHIFT 4
+
+#define SXE_MACC_FLU 0x00000001
+#define SXE_MACC_FSV_10G 0x00030000
+#define SXE_MACC_FS 0x00040000
+
+#define SXE_DEFAULT_FCPAUSE 0xFFFF
+
+
+#define SXE_SAQF(_i) (0x0E000 + ((_i) * 4))
+#define SXE_DAQF(_i) (0x0E200 + ((_i) * 4))
+#define SXE_SDPQF(_i) (0x0E400 + ((_i) * 4))
+#define SXE_FTQF(_i) (0x0E600 + ((_i) * 4))
+#define SXE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4))
+
+#define SXE_MAX_FTQF_FILTERS 128
+#define SXE_FTQF_PROTOCOL_MASK 0x00000003
+#define SXE_FTQF_PROTOCOL_TCP 0x00000000
+#define SXE_FTQF_PROTOCOL_UDP 0x00000001
+#define SXE_FTQF_PROTOCOL_SCTP 2
+#define SXE_FTQF_PRIORITY_MASK 0x00000007
+#define SXE_FTQF_PRIORITY_SHIFT 2
+#define SXE_FTQF_POOL_MASK 0x0000003F
+#define SXE_FTQF_POOL_SHIFT 8
+#define SXE_FTQF_5TUPLE_MASK_MASK 0x0000001F
+#define SXE_FTQF_5TUPLE_MASK_SHIFT 25
+#define SXE_FTQF_SOURCE_ADDR_MASK 0x1E
+#define SXE_FTQF_DEST_ADDR_MASK 0x1D
+#define SXE_FTQF_SOURCE_PORT_MASK 0x1B
+#define SXE_FTQF_DEST_PORT_MASK 0x17
+#define SXE_FTQF_PROTOCOL_COMP_MASK 0x0F
+#define SXE_FTQF_POOL_MASK_EN 0x40000000
+#define SXE_FTQF_QUEUE_ENABLE 0x80000000
+
+#define SXE_SDPQF_DSTPORT 0xFFFF0000
+#define SXE_SDPQF_DSTPORT_SHIFT 16
+#define SXE_SDPQF_SRCPORT 0x0000FFFF
+
+#define SXE_L34T_IMIR_SIZE_BP 0x00001000
+#define SXE_L34T_IMIR_RESERVE 0x00080000
+#define SXE_L34T_IMIR_LLI 0x00100000
+#define SXE_L34T_IMIR_QUEUE 0x0FE00000
+#define SXE_L34T_IMIR_QUEUE_SHIFT 21
+
+#define SXE_VMTXSW(_i) (0x05180 + ((_i) * 4))
+#define SXE_VMTXSW_REGISTER_COUNT 2
+
+#define SXE_TXSTMP_SEL 0x14510
+#define SXE_TXSTMP_VAL 0x1451c
+
+#define SXE_TXTS_MAGIC0 0x005a005900580057
+#define SXE_TXTS_MAGIC1 0x005e005d005c005b
+
+#endif
new file mode 100644
@@ -0,0 +1,796 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_TYPE_H__
+#define __SXE_TYPE_H__
+
+#define SXE_TXD_CMD_EOP 0x01000000
+#define SXE_TXD_CMD_RS 0x08000000
+#define SXE_TXD_STAT_DD 0x00000001
+
+#define SXE_TXD_CMD (SXE_TXD_CMD_EOP | SXE_TXD_CMD_RS)
+
+
+typedef union sxe_adv_tx_desc {
+ struct {
+ U64 buffer_addr;
+ U32 cmd_type_len;
+ U32 olinfo_status;
+ } read;
+ struct {
+ U64 rsvd;
+ U32 nxtseq_seed;
+ U32 status;
+ } wb;
+} sxe_adv_tx_desc_u;
+
+typedef union sxe_adv_rx_desc {
+ struct {
+ U64 pkt_addr;
+ U64 hdr_addr;
+ } read;
+ struct {
+ struct {
+ union {
+ U32 data;
+ struct {
+ U16 pkt_info;
+ U16 hdr_info;
+ } hs_rss;
+ } lo_dword;
+ union {
+ U32 rss;
+ struct {
+ U16 ip_id;
+ U16 csum;
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ U32 status_error;
+ U16 length;
+ U16 vlan;
+ } upper;
+ } wb;
+} sxe_adv_rx_desc_u;
+
+#define SXE_RXD_STAT_DD 0x01
+#define SXE_RXD_STAT_EOP 0x02
+
+
+#define PCI_VENDOR_ID_STARS 0x1FF2
+#define SXE_DEV_ID_FPGA 0x1160
+
+
+#define SXE_CTRL 0x00000
+#define SXE_STATUS 0x00008
+#define SXE_CTRL_EXT 0x00018
+#define SXE_ESDP 0x00020
+#define SXE_EODSDP 0x00028
+
+#define SXE_I2CCTL_8259X 0x00028
+#define SXE_I2CCTL_X540 SXE_I2CCTL_8259X
+#define SXE_I2CCTL_X550 0x15F5C
+#define SXE_I2CCTL_X550EM_x SXE_I2CCTL_X550
+#define SXE_I2CCTL_X550EM_a SXE_I2CCTL_X550
+#define SXE_I2CCTL(_hw) SXE_BY_MAC((_hw), I2CCTL)
+
+#define SXE_LEDCTL 0x00200
+#define SXE_FRTIMER 0x00048
+#define SXE_TCPTIMER 0x0004C
+#define SXE_CORESPARE 0x00600
+#define SXE_EXVET 0x05078
+
+
+#define SXE_EICR 0x00800
+#define SXE_EICS 0x00808
+#define SXE_EIMS 0x00880
+#define SXE_EIMC 0x00888
+#define SXE_EIAC 0x00810
+#define SXE_EIAM 0x00890
+#define SXE_EICR_EX(_i) (0x00A80 + (_i) * 4)
+#define SXE_EICS_EX(_i) (0x00A90 + (_i) * 4)
+#define SXE_EIMS_EX(_i) (0x00AA0 + (_i) * 4)
+#define SXE_EIMC_EX(_i) (0x00AB0 + (_i) * 4)
+#define SXE_EIAM_EX(_i) (0x00AD0 + (_i) * 4)
+
+
+#define SXE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
+ (0x0D000 + (((_i) - 64) * 0x40)))
+#define SXE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
+ (0x0D004 + (((_i) - 64) * 0x40)))
+#define SXE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
+ (0x0D008 + (((_i) - 64) * 0x40)))
+#define SXE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
+ (0x0D010 + (((_i) - 64) * 0x40)))
+#define SXE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
+ (0x0D018 + (((_i) - 64) * 0x40)))
+#define SXE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
+ (0x0D028 + (((_i) - 64) * 0x40)))
+#define SXE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
+ (0x0D02C + (((_i) - 64) * 0x40)))
+#define SXE_RSCDBU 0x03028
+#define SXE_RDDCC 0x02F20
+#define SXE_RXMEMWRAP 0x03190
+#define SXE_STARCTRL 0x03024
+
+#define SXE_SRRCTL(_i) (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : (0x0D014 + (((_i) - 64) * 0x40)))
+
+#define SXE_DCA_RXCTRL(_i) (((_i) < 64) ? \
+ (0x0100C + ((_i) * 0x40)) : \
+ (0x0D00C + (((_i) - 64) * 0x40)))
+#define SXE_RDRXCTL 0x02F00
+#define SXE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
+#define SXE_DRXCFG 0x03C20
+#define SXE_RXCTRL 0x03000
+#define SXE_DROPEN 0x03D04
+#define SXE_RXPBSIZE_SHIFT 10
+#define SXE_DRXCFG_GSP_ZERO 0x00000002
+#define SXE_DRXCFG_DBURX_START 0x00000001
+
+
+#define SXE_RXCSUM 0x05000
+#define SXE_RFCTL 0x05008
+#define SXE_DRECCCTL 0x02F08
+#define SXE_DRECCCTL_DISABLE 0
+
+
+#define SXE_MTA(_i) (0x05200 + ((_i) * 4))
+#define SXE_RAL(_i) (0x0A200 + ((_i) * 8))
+#define SXE_RAH(_i) (0x0A204 + ((_i) * 8))
+#define SXE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
+#define SXE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
+
+
+#define SXE_PSRTYPE(_i) (0x0EA00 + ((_i) * 4))
+
+
+#define SXE_VFTA(_i) (0x0A000 + ((_i) * 4))
+
+
+#define SXE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
+#define SXE_FCTRL 0x05080
+#define SXE_VLNCTRL 0x05088
+#define SXE_MCSTCTRL 0x05090
+#define SXE_MRQC 0x0EC80
+#define SXE_SAQF(_i) (0x0E000 + ((_i) * 4))
+#define SXE_DAQF(_i) (0x0E200 + ((_i) * 4))
+#define SXE_SDPQF(_i) (0x0E400 + ((_i) * 4))
+#define SXE_FTQF(_i) (0x0E600 + ((_i) * 4))
+#define SXE_ETQF(_i) (0x05128 + ((_i) * 4))
+#define SXE_ETQS(_i) (0x0EC00 + ((_i) * 4))
+#define SXE_SYNQF 0x0EC30
+#define SXE_RQTC 0x0EC70
+#define SXE_MTQC 0x08120
+#define SXE_VLVF(_i) (0x0F100 + ((_i) * 4))
+#define SXE_VLVFB(_i) (0x0F200 + ((_i) * 4))
+#define SXE_VMVIR(_i) (0x08000 + ((_i) * 4))
+#define SXE_PFFLPL 0x050B0
+#define SXE_PFFLPH 0x050B4
+#define SXE_VT_CTL 0x051B0
+#define SXE_PFMAILBOX(_i) (0x04B00 + (4 * (_i)))
+#define SXE_PFMBMEM(_i) (0x13000 + (64 * (_i)))
+#define SXE_PFMBICR(_i) (0x00710 + (4 * (_i)))
+#define SXE_PFMBIMR(_i) (0x00720 + (4 * (_i)))
+#define SXE_VFRE(_i) (0x051E0 + ((_i) * 4))
+#define SXE_VFTE(_i) (0x08110 + ((_i) * 4))
+#define SXE_VMECM(_i) (0x08790 + ((_i) * 4))
+#define SXE_QDE 0x2F04
+#define SXE_VMTXSW(_i) (0x05180 + ((_i) * 4))
+#define SXE_VMOLR(_i) (0x0F000 + ((_i) * 4))
+#define SXE_UTA(_i) (0x0F400 + ((_i) * 4))
+#define SXE_MRCTL(_i) (0x0F600 + ((_i) * 4))
+#define SXE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
+#define SXE_VMRVM(_i) (0x0F630 + ((_i) * 4))
+#define SXE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4))
+#define SXE_WQBR_TX(_i) (0x8130 + ((_i) * 4))
+#define SXE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4))
+#define SXE_RXFECCERR0 0x051B8
+#define SXE_LLITHRESH 0x0EC90
+#define SXE_IMIR(_i) (0x05A80 + ((_i) * 4))
+#define SXE_IMIREXT(_i) (0x05AA0 + ((_i) * 4))
+#define SXE_IMIRVP 0x0EC60
+#define SXE_VMD_CTL 0x0581C
+#define SXE_RETA(_i) (0x0EB00 + ((_i) * 4))
+#define SXE_ERETA(_i) (0x0EE80 + ((_i) * 4))
+#define SXE_RSSRK(_i) (0x0EB80 + ((_i) * 4))
+
+
+#define SXE_TDBAL(_i) (0x06000 + ((_i) * 0x40))
+#define SXE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
+#define SXE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
+#define SXE_TDH(_i) (0x06010 + ((_i) * 0x40))
+#define SXE_TDT(_i) (0x06018 + ((_i) * 0x40))
+#define SXE_TXDCTL(_i) (0x06028 + ((_i) * 0x40))
+#define SXE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
+#define SXE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
+#define SXE_DTXCTL 0x07E00
+
+#define SXE_DMATXCTL 0x04A80
+#define SXE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4))
+#define SXE_PFDTXGSWC 0x08220
+#define SXE_DTXMXSZRQ 0x08100
+#define SXE_DTXTCPFLGL 0x04A88
+#define SXE_DTXTCPFLGH 0x04A8C
+#define SXE_LBDRPEN 0x0CA00
+#define SXE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4))
+
+#define SXE_DMATXCTL_TE 0x1
+#define SXE_DMATXCTL_NS 0x2
+#define SXE_DMATXCTL_GDV 0x8
+#define SXE_DMATXCTL_MDP_EN 0x20
+#define SXE_DMATXCTL_MBINTEN 0x40
+#define SXE_DMATXCTL_VT_SHIFT 16
+
+#define SXE_PFDTXGSWC_VT_LBEN 0x1
+
+
+#define SXE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
+#define SXE_TIPG 0x0CB00
+#define SXE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4))
+#define SXE_DTXCFG 0x0CE08
+#define SXE_MNGTXMAP 0x0CD10
+#define SXE_TIPG_FIBER_DEFAULT 3
+#define SXE_TXPBSIZE_SHIFT 10
+#define SXE_DTXCFG_DBUTX_START 0x00000001
+
+
+#define SXE_RTRPCS 0x02430
+#define SXE_RTTDCS 0x04900
+#define SXE_RTTDCS_ARBDIS 0x00000040
+#define SXE_RTTPCS 0x0CD00
+#define SXE_RTRUP2TC 0x03020
+#define SXE_RTTUP2TC 0x0C800
+#define SXE_RTRPT4C(_i) (0x02140 + ((_i) * 4))
+#define SXE_TXLLQ(_i) (0x082E0 + ((_i) * 4))
+#define SXE_RTRPT4S(_i) (0x02160 + ((_i) * 4))
+#define SXE_RTTDT2C(_i) (0x04910 + ((_i) * 4))
+#define SXE_RTTDT2S(_i) (0x04930 + ((_i) * 4))
+#define SXE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4))
+#define SXE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4))
+#define SXE_RTTDQSEL 0x04904
+#define SXE_RTTDT1C 0x04908
+#define SXE_RTTDT1S 0x0490C
+
+
+#define SXE_RTTQCNCR 0x08B00
+#define SXE_RTTQCNTG 0x04A90
+#define SXE_RTTBCNRD 0x0498C
+#define SXE_RTTQCNRR 0x0498C
+#define SXE_RTTDTECC 0x04990
+#define SXE_RTTDTECC_NO_BCN 0x00000100
+#define SXE_RTTBCNRC 0x04984
+#define SXE_RTTBCNRC_RS_ENA 0x80000000
+#define SXE_RTTBCNRC_RF_DEC_MASK 0x00003FFF
+#define SXE_RTTBCNRC_RF_INT_SHIFT 14
+#define SXE_RTTBCNRC_RF_INT_MASK (SXE_RTTBCNRC_RF_DEC_MASK << SXE_RTTBCNRC_RF_INT_SHIFT)
+#define SXE_RTTBCNRM 0x04980
+#define SXE_RTTQCNRM 0x04980
+
+
+#define SXE_MACCFG 0x0CE04
+
+
+#define SXE_GCR_EXT 0x11050
+#define SXE_GSCL_5_82599 0x11030
+#define SXE_GSCL_6_82599 0x11034
+#define SXE_GSCL_7_82599 0x11038
+#define SXE_GSCL_8_82599 0x1103C
+#define SXE_PHYADR_82599 0x11040
+#define SXE_PHYDAT_82599 0x11044
+#define SXE_PHYCTL_82599 0x11048
+#define SXE_PBACLR_82599 0x11068
+
+#define SXE_CIAA_8259X 0x11088
+
+
+#define SXE_CIAD_8259X 0x1108C
+
+
+#define SXE_PICAUSE 0x110B0
+#define SXE_PIENA 0x110B8
+#define SXE_CDQ_MBR_82599 0x110B4
+#define SXE_PCIESPARE 0x110BC
+#define SXE_MISC_REG_82599 0x110F0
+#define SXE_ECC_CTRL_0_82599 0x11100
+#define SXE_ECC_CTRL_1_82599 0x11104
+#define SXE_ECC_STATUS_82599 0x110E0
+#define SXE_BAR_CTRL_82599 0x110F4
+
+
+#define SXE_GCR_CMPL_TMOUT_MASK 0x0000F000
+#define SXE_GCR_CMPL_TMOUT_10ms 0x00001000
+#define SXE_GCR_CMPL_TMOUT_RESEND 0x00010000
+#define SXE_GCR_CAP_VER2 0x00040000
+
+#define SXE_GCR_EXT_MSIX_EN 0x80000000
+#define SXE_GCR_EXT_BUFFERS_CLEAR 0x40000000
+#define SXE_GCR_EXT_VT_MODE_16 0x00000001
+#define SXE_GCR_EXT_VT_MODE_32 0x00000002
+#define SXE_GCR_EXT_VT_MODE_64 0x00000003
+#define SXE_GCR_EXT_SRIOV (SXE_GCR_EXT_MSIX_EN | \
+ SXE_GCR_EXT_VT_MODE_64)
+
+
+#define SXE_PCS1GCFIG 0x04200
+#define SXE_PCS1GLCTL 0x04208
+#define SXE_PCS1GLSTA 0x0420C
+#define SXE_PCS1GDBG0 0x04210
+#define SXE_PCS1GDBG1 0x04214
+#define SXE_PCS1GANA 0x04218
+#define SXE_PCS1GANLP 0x0421C
+#define SXE_PCS1GANNP 0x04220
+#define SXE_PCS1GANLPNP 0x04224
+#define SXE_HLREG0 0x04240
+#define SXE_HLREG1 0x04244
+#define SXE_PAP 0x04248
+#define SXE_MACA 0x0424C
+#define SXE_APAE 0x04250
+#define SXE_ARD 0x04254
+#define SXE_AIS 0x04258
+#define SXE_MSCA 0x0425C
+#define SXE_MSRWD 0x04260
+#define SXE_MLADD 0x04264
+#define SXE_MHADD 0x04268
+#define SXE_MAXFRS 0x04268
+#define SXE_TREG 0x0426C
+#define SXE_PCSS1 0x04288
+#define SXE_PCSS2 0x0428C
+#define SXE_XPCSS 0x04290
+#define SXE_MFLCN 0x04294
+#define SXE_SERDESC 0x04298
+#define SXE_MAC_SGMII_BUSY 0x04298
+#define SXE_MACS 0x0429C
+#define SXE_AUTOC 0x042A0
+#define SXE_LINKS 0x042A4
+#define SXE_LINKS2 0x04324
+#define SXE_AUTOC2 0x042A8
+#define SXE_AUTOC3 0x042AC
+#define SXE_ANLP1 0x042B0
+#define SXE_ANLP2 0x042B4
+#define SXE_MACC 0x04330
+#define SXE_ATLASCTL 0x04800
+#define SXE_MMNGC 0x042D0
+#define SXE_ANLPNP1 0x042D4
+#define SXE_ANLPNP2 0x042D8
+#define SXE_KRPCSFC 0x042E0
+#define SXE_KRPCSS 0x042E4
+#define SXE_FECS1 0x042E8
+#define SXE_FECS2 0x042EC
+#define SXE_SMADARCTL 0x14F10
+#define SXE_MPVC 0x04318
+#define SXE_SGMIIC 0x04314
+
+
+#define SXE_COMCTRL 0x14400
+#define SXE_PCCTRL 0x14404
+#define SXE_LPBKCTRL 0x1440C
+#define SXE_MAXFS 0x14410
+#define SXE_SACONH 0x14420
+#define SXE_VLANCTRL 0x14430
+#define SXE_VLANID 0x14434
+#define SXE_VLANCTRL 0x14430
+#define SXE_FPAG_SDS_CON 0x14700
+
+
+#define SXE_COMCTRL_TXEN 0x0001
+#define SXE_COMCTRL_RXEN 0x0002
+#define SXE_COMCTRL_EDSEL 0x0004
+#define SXE_COMCTRL_SPEED_1G 0x0200
+#define SXE_COMCTRL_SPEED_10G 0x0300
+
+
+#define SXE_PCCTRL_TXCE 0x0001
+#define SXE_PCCTRL_RXCE 0x0002
+#define SXE_PCCTRL_PEN 0x0100
+#define SXE_PCCTRL_PCSC_ALL 0x30000
+
+
+#define SXE_MAXFS_TFSEL 0x0001
+#define SXE_MAXFS_RFSEL 0x0002
+#define SXE_MAXFS_MFS_MASK 0xFFFF0000
+#define SXE_MAXFS_MFS 0x40000000
+#define SXE_MAXFS_MFS_SHIFT 16
+
+
+#define SXE_FPGA_SDS_CON_FULL_DUPLEX_MODE 0x00200000
+#define SXE_FPGA_SDS_CON_ANRESTART 0x00008000
+#define SXE_FPGA_SDS_CON_AN_ENABLE 0x00001000
+
+
+#define SXE_RSCDBU_RSCSMALDIS_MASK 0x0000007F
+#define SXE_RSCDBU_RSCACKDIS 0x00000080
+
+
+#define SXE_RDRXCTL_RDMTS_1_2 0x00000000
+#define SXE_RDRXCTL_CRCSTRIP 0x00000002
+#define SXE_RDRXCTL_PSP 0x00000004
+#define SXE_RDRXCTL_MVMEN 0x00000020
+#define SXE_RDRXCTL_DMAIDONE 0x00000008
+#define SXE_RDRXCTL_AGGDIS 0x00010000
+#define SXE_RDRXCTL_RSCFRSTSIZE 0x003E0000
+#define SXE_RDRXCTL_RSCLLIDIS 0x00800000
+#define SXE_RDRXCTL_RSCACKC 0x02000000
+#define SXE_RDRXCTL_FCOE_WRFIX 0x04000000
+#define SXE_RDRXCTL_MBINTEN 0x10000000
+#define SXE_RDRXCTL_MDP_EN 0x20000000
+
+
+#define SXE_CTRL_GIO_DIS 0x00000004
+#define SXE_CTRL_LNK_RST 0x00000008
+#define SXE_CTRL_RST 0x04000000
+#define SXE_CTRL_RST_MASK (SXE_CTRL_LNK_RST | SXE_CTRL_RST)
+
+
+#define SXE_MHADD_MFS_MASK 0xFFFF0000
+#define SXE_MHADD_MFS_SHIFT 16
+
+
+#define SXE_CTRL_EXT_PFRSTD 0x00004000
+#define SXE_CTRL_EXT_NS_DIS 0x00010000
+#define SXE_CTRL_EXT_RO_DIS 0x00020000
+#define SXE_CTRL_EXT_DRV_LOAD 0x10000000
+
+
+#define SXE_TXPBSIZE_20KB 0x00005000
+#define SXE_TXPBSIZE_40KB 0x0000A000
+#define SXE_RXPBSIZE_48KB 0x0000C000
+#define SXE_RXPBSIZE_64KB 0x00010000
+#define SXE_RXPBSIZE_80KB 0x00014000
+#define SXE_RXPBSIZE_128KB 0x00020000
+#define SXE_RXPBSIZE_MAX 0x00080000
+#define SXE_TXPBSIZE_MAX 0x00028000
+
+#define SXE_TXPKT_SIZE_MAX 0xA
+#define SXE_MAX_PB 8
+
+
+#define SXE_HLREG0_TXCRCEN 0x00000001
+#define SXE_HLREG0_RXCRCSTRP 0x00000002
+#define SXE_HLREG0_JUMBOEN 0x00000004
+#define SXE_HLREG0_TXPADEN 0x00000400
+#define SXE_HLREG0_TXPAUSEEN 0x00001000
+#define SXE_HLREG0_RXPAUSEEN 0x00004000
+#define SXE_HLREG0_LPBK 0x00008000
+#define SXE_HLREG0_MDCSPD 0x00010000
+#define SXE_HLREG0_CONTMDC 0x00020000
+#define SXE_HLREG0_CTRLFLTR 0x00040000
+#define SXE_HLREG0_PREPEND 0x00F00000
+#define SXE_HLREG0_PRIPAUSEEN 0x01000000
+#define SXE_HLREG0_RXPAUSERECDA 0x06000000
+#define SXE_HLREG0_RXLNGTHERREN 0x08000000
+#define SXE_HLREG0_RXPADSTRIPEN 0x10000000
+
+
+#define SXE_VMOLR_UPE 0x00400000
+#define SXE_VMOLR_VPE 0x00800000
+#define SXE_VMOLR_AUPE 0x01000000
+#define SXE_VMOLR_ROMPE 0x02000000
+#define SXE_VMOLR_ROPE 0x04000000
+#define SXE_VMOLR_BAM 0x08000000
+#define SXE_VMOLR_MPE 0x10000000
+
+
+#define SXE_RXCSUM_IPPCSE 0x00001000
+#define SXE_RXCSUM_PCSD 0x00002000
+
+
+#define SXE_VMD_CTL_VMDQ_EN 0x00000001
+#define SXE_VMD_CTL_VMDQ_FILTER 0x00000002
+
+
+#define SXE_MACCFG_PAD_EN 0x00000001
+
+
+#define SXE_IRQ_CLEAR_MASK 0xFFFFFFFF
+
+
+#define SXE_STATUS_LAN_ID 0x0000000C
+#define SXE_STATUS_LAN_ID_SHIFT 2
+#define SXE_STATUS_GIO 0x00080000
+
+
+#define SXE_LINKS_KX_AN_COMP 0x80000000
+#define SXE_LINKS_UP 0x40000000
+#define SXE_LINKS_SPEED 0x20000000
+#define SXE_LINKS_MODE 0x18000000
+#define SXE_LINKS_RX_MODE 0x06000000
+#define SXE_LINKS_TX_MODE 0x01800000
+#define SXE_LINKS_XGXS_EN 0x00400000
+#define SXE_LINKS_SGMII_EN 0x02000000
+#define SXE_LINKS_PCS_1G_EN 0x00200000
+#define SXE_LINKS_1G_AN_EN 0x00100000
+#define SXE_LINKS_KX_AN_IDLE 0x00080000
+#define SXE_LINKS_1G_SYNC 0x00040000
+#define SXE_LINKS_10G_ALIGN 0x00020000
+#define SXE_LINKS_10G_LANE_SYNC 0x00017000
+#define SXE_LINKS_TL_FAULT 0x00001000
+#define SXE_LINKS_SIGNAL 0x00000F00
+
+
+#define SXE_PCI_DEVICE_STATUS 0x7A
+#define SXE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
+#define SXE_PCI_LINK_STATUS 0x82
+#define SXE_PCI_DEVICE_CONTROL2 0x98
+#define SXE_PCI_LINK_WIDTH 0x3F0
+#define SXE_PCI_LINK_WIDTH_1 0x10
+#define SXE_PCI_LINK_WIDTH_2 0x20
+#define SXE_PCI_LINK_WIDTH_4 0x40
+#define SXE_PCI_LINK_WIDTH_8 0x80
+#define SXE_PCI_LINK_SPEED 0xF
+#define SXE_PCI_LINK_SPEED_2500 0x1
+#define SXE_PCI_LINK_SPEED_5000 0x2
+#define SXE_PCI_LINK_SPEED_8000 0x3
+#define SXE_PCI_HEADER_TYPE_REGISTER 0x0E
+#define SXE_PCI_HEADER_TYPE_MULTIFUNC 0x80
+#define SXE_PCI_DEVICE_CONTROL2_16ms 0x0005
+
+#define SXE_PCIDEVCTRL2_TIMEO_MASK 0xf
+#define SXE_PCIDEVCTRL2_16_32ms_def 0x0
+#define SXE_PCIDEVCTRL2_50_100us 0x1
+#define SXE_PCIDEVCTRL2_1_2ms 0x2
+#define SXE_PCIDEVCTRL2_16_32ms 0x5
+#define SXE_PCIDEVCTRL2_65_130ms 0x6
+#define SXE_PCIDEVCTRL2_260_520ms 0x9
+#define SXE_PCIDEVCTRL2_1_2s 0xa
+#define SXE_PCIDEVCTRL2_4_8s 0xd
+#define SXE_PCIDEVCTRL2_17_34s 0xe
+
+
+#define SXE_PCI_MASTER_DISABLE_TIMEOUT 800
+
+
+#define SXE_RAH_VIND_MASK 0x003C0000
+#define SXE_RAH_VIND_SHIFT 18
+#define SXE_RAH_AV 0x80000000
+#define SXE_CLEAR_VMDQ_ALL 0xFFFFFFFF
+
+
+#define SXE_RFCTL_ISCSI_DIS 0x00000001
+#define SXE_RFCTL_ISCSI_DWC_MASK 0x0000003E
+#define SXE_RFCTL_ISCSI_DWC_SHIFT 1
+#define SXE_RFCTL_RSC_DIS 0x00000020
+#define SXE_RFCTL_NFSW_DIS 0x00000040
+#define SXE_RFCTL_NFSR_DIS 0x00000080
+#define SXE_RFCTL_NFS_VER_MASK 0x00000300
+#define SXE_RFCTL_NFS_VER_SHIFT 8
+#define SXE_RFCTL_NFS_VER_2 0
+#define SXE_RFCTL_NFS_VER_3 1
+#define SXE_RFCTL_NFS_VER_4 2
+#define SXE_RFCTL_IPV6_DIS 0x00000400
+#define SXE_RFCTL_IPV6_XSUM_DIS 0x00000800
+#define SXE_RFCTL_IPFRSP_DIS 0x00004000
+#define SXE_RFCTL_IPV6_EX_DIS 0x00010000
+#define SXE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+
+
+#define SXE_TXDCTL_ENABLE 0x02000000
+#define SXE_TXDCTL_SWFLSH 0x04000000
+#define SXE_TXDCTL_WTHRESH_SHIFT 16
+
+
+#define SXE_RXCTRL_RXEN 0x00000001
+#define SXE_RXCTRL_DMBYPS 0x00000002
+#define SXE_RXDCTL_ENABLE 0x02000000
+#define SXE_RXDCTL_SWFLSH 0x04000000
+
+
+#define SXE_RXDCTL_DESC_FIFO_AFUL_TH_MASK 0x0000001F
+#define SXE_RXDCTL_AFUL_CFG_ERR 0x00000020
+#define SXE_RXDCTL_DESC_FIFO_AE_TH_MASK 0x00001F00
+#define SXE_RXDCTL_DESC_FIFO_AE_TH_SHIFT 8
+#define SXE_RXDCTL_PREFETCH_NUM_CFG_MASK 0x001F0000
+#define SXE_RXDCTL_PREFETCH_NUM_CFG_SHIFT 16
+
+
+#define SXE_PCI_MASTER_DISABLE_TIMEOUT 800
+
+
+#define SXE_FCTRL_SBP 0x00000002
+#define SXE_FCTRL_MPE 0x00000100
+#define SXE_FCTRL_UPE 0x00000200
+#define SXE_FCTRL_BAM 0x00000400
+#define SXE_FCTRL_PMCF 0x00001000
+#define SXE_FCTRL_DPF 0x00002000
+
+
+#define SXE_QDE_ENABLE 0x00000001
+#define SXE_QDE_HIDE_VLAN 0x00000002
+#define SXE_QDE_IDX_MASK 0x00007F00
+#define SXE_QDE_IDX_SHIFT 8
+#define SXE_QDE_WRITE 0x00010000
+
+#define SXE_TXD_POPTS_IXSM 0x01
+#define SXE_TXD_POPTS_TXSM 0x02
+#define SXE_TXD_CMD_EOP 0x01000000
+#define SXE_TXD_CMD_IFCS 0x02000000
+#define SXE_TXD_CMD_IC 0x04000000
+#define SXE_TXD_CMD_RS 0x08000000
+#define SXE_TXD_CMD_DEXT 0x20000000
+#define SXE_TXD_CMD_VLE 0x40000000
+#define SXE_TXD_STAT_DD 0x00000001
+
+
+#define SXE_SRRCTL_BSIZEPKT_SHIFT 10
+#define SXE_SRRCTL_RDMTS_SHIFT 22
+#define SXE_SRRCTL_RDMTS_MASK 0x01C00000
+#define SXE_SRRCTL_DROP_EN 0x10000000
+#define SXE_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define SXE_SRRCTL_BSIZEHDR_MASK 0x00003F00
+#define SXE_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define SXE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define SXE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+#define SXE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define SXE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define SXE_SRRCTL_DESCTYPE_MASK 0x0E000000
+
+#define SXE_RXDPS_HDRSTAT_HDRSP 0x00008000
+#define SXE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
+
+#define SXE_RXDADV_RSSTYPE_MASK 0x0000000F
+#define SXE_RXDADV_PKTTYPE_MASK 0x0000FFF0
+#define SXE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
+#define SXE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
+#define SXE_RXDADV_RSCCNT_MASK 0x001E0000
+#define SXE_RXDADV_RSCCNT_SHIFT 17
+#define SXE_RXDADV_HDRBUFLEN_SHIFT 5
+#define SXE_RXDADV_SPLITHEADER_EN 0x00001000
+#define SXE_RXDADV_SPH 0x8000
+
+
+#define SXE_ADVTXD_DTYP_DATA 0x00300000
+#define SXE_ADVTXD_DCMD_IFCS SXE_TXD_CMD_IFCS
+#define SXE_ADVTXD_DCMD_DEXT SXE_TXD_CMD_DEXT
+#define SXE_ADVTXD_PAYLEN_SHIFT 14
+
+
+#define SXE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
+
+
+#define SXE_ERR_EEPROM -1
+#define SXE_ERR_EEPROM_CHECKSUM -2
+#define SXE_ERR_PHY -3
+#define SXE_ERR_CONFIG -4
+#define SXE_ERR_PARAM -5
+#define SXE_ERR_MAC_TYPE -6
+#define SXE_ERR_UNKNOWN_PHY -7
+#define SXE_ERR_LINK_SETUP -8
+#define SXE_ERR_ADAPTER_STOPPED -9
+#define SXE_ERR_INVALID_MAC_ADDR -10
+#define SXE_ERR_DEVICE_NOT_SUPPORTED -11
+#define SXE_ERR_MASTER_REQUESTS_PENDING -12
+#define SXE_ERR_INVALID_LINK_SETTINGS -13
+#define SXE_ERR_AUTONEG_NOT_COMPLETE -14
+#define SXE_ERR_RESET_FAILED -15
+#define SXE_ERR_SWFW_SYNC -16
+#define SXE_ERR_PHY_ADDR_INVALID -17
+#define SXE_ERR_I2C -18
+#define SXE_ERR_SFP_NOT_SUPPORTED -19
+#define SXE_ERR_SFP_NOT_PRESENT -20
+#define SXE_ERR_SFP_NO_INIT_SEQ_PRESENT -21
+#define SXE_ERR_NO_SAN_ADDR_PTR -22
+#define SXE_ERR_FDIR_REINIT_FAILED -23
+#define SXE_ERR_EEPROM_VERSION -24
+#define SXE_ERR_NO_SPACE -25
+#define SXE_ERR_OVERTEMP -26
+#define SXE_ERR_FC_NOT_NEGOTIATED -27
+#define SXE_ERR_FC_NOT_SUPPORTED -28
+#define SXE_ERR_SFP_SETUP_NOT_COMPLETE -30
+#define SXE_ERR_PBA_SECTION -31
+#define SXE_ERR_INVALID_ARGUMENT -32
+#define SXE_ERR_HOST_INTERFACE_COMMAND -33
+#define SXE_ERR_FDIR_CMD_INCOMPLETE -38
+#define SXE_ERR_FW_RESP_INVALID -39
+#define SXE_ERR_TOKEN_RETRY -40
+#define SXE_NOT_IMPLEMENTED 0x7FFFFFFF
+
+#define SXE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4))
+#define SXE_FUSES0_300MHZ BIT(5)
+#define SXE_FUSES0_REV_MASK (3u << 6)
+
+#define SXE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
+#define SXE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200)
+#define SXE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
+#define SXE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
+#define SXE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248)
+#define SXE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0)
+#define SXE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C)
+#define SXE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
+#define SXE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638)
+#define SXE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00)
+#define SXE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00)
+#define SXE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054)
+#define SXE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
+#define SXE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
+
+#define SXE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR BIT(20)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SFI_10G_LR (0x2 << 20)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SGMII_EN BIT(25)
+#define SXE_KRM_PMD_FLX_MASK_ST20_AN37_EN BIT(26)
+#define SXE_KRM_PMD_FLX_MASK_ST20_AN_EN BIT(27)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SPEED_10M ~(0x7 << 28)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SPEED_100M BIT(28)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SPEED_1G (0x2 << 28)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SPEED_10G (0x3 << 28)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SPEED_AN (0x4 << 28)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SPEED_2_5G (0x7 << 28)
+#define SXE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK (0x7 << 28)
+#define SXE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART BIT(31)
+
+#define SXE_KRM_PORT_CAR_GEN_CTRL_NELB_32B BIT(9)
+#define SXE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS BIT(11)
+
+#define SXE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (7u << 8)
+#define SXE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2u << 8)
+#define SXE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4u << 8)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN BIT(12)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN BIT(13)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ BIT(14)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC BIT(15)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX BIT(16)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR BIT(18)
+#define SXE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX BIT(24)
+#define SXE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR BIT(26)
+#define SXE_KRM_LINK_S1_MAC_AN_COMPLETE BIT(28)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_ENABLE BIT(29)
+#define SXE_KRM_LINK_CTRL_1_TETH_AN_RESTART BIT(31)
+
+#define SXE_KRM_AN_CNTL_1_SYM_PAUSE BIT(28)
+#define SXE_KRM_AN_CNTL_1_ASM_PAUSE BIT(29)
+
+#define SXE_KRM_AN_CNTL_8_LINEAR BIT(0)
+#define SXE_KRM_AN_CNTL_8_LIMITING BIT(1)
+
+#define SXE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE BIT(10)
+#define SXE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE BIT(11)
+#define SXE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D BIT(12)
+#define SXE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D BIT(19)
+
+#define SXE_KRM_DSP_TXFFE_STATE_C0_EN BIT(6)
+#define SXE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN BIT(15)
+#define SXE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN BIT(16)
+
+#define SXE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL BIT(4)
+#define SXE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS BIT(2)
+
+#define SXE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (3u << 16)
+
+#define SXE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN BIT(1)
+#define SXE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN BIT(2)
+#define SXE_KRM_TX_COEFF_CTRL_1_CZERO_EN BIT(3)
+#define SXE_KRM_TX_COEFF_CTRL_1_OVRRD_EN BIT(31)
+
+#define SXE_SB_IOSF_INDIRECT_CTRL 0x00011144
+#define SXE_SB_IOSF_INDIRECT_DATA 0x00011148
+
+#define SXE_SB_IOSF_CTRL_ADDR_SHIFT 0
+#define SXE_SB_IOSF_CTRL_ADDR_MASK 0xFF
+#define SXE_SB_IOSF_CTRL_RESP_STAT_SHIFT 18
+#define SXE_SB_IOSF_CTRL_RESP_STAT_MASK \
+ (0x3 << SXE_SB_IOSF_CTRL_RESP_STAT_SHIFT)
+#define SXE_SB_IOSF_CTRL_CMPL_ERR_SHIFT 20
+#define SXE_SB_IOSF_CTRL_CMPL_ERR_MASK \
+ (0xFF << SXE_SB_IOSF_CTRL_CMPL_ERR_SHIFT)
+#define SXE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28
+#define SXE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7
+#define SXE_SB_IOSF_CTRL_BUSY_SHIFT 31
+#define SXE_SB_IOSF_CTRL_BUSY BIT(SXE_SB_IOSF_CTRL_BUSY_SHIFT)
+#define SXE_SB_IOSF_TARGET_KR_PHY 0
+
+#define SXE_NW_MNG_IF_SEL 0x00011178
+#define SXE_NW_MNG_IF_SEL_MDIO_ACT BIT(1)
+#define SXE_NW_MNG_IF_SEL_PHY_SPEED_10M BIT(17)
+#define SXE_NW_MNG_IF_SEL_PHY_SPEED_100M BIT(18)
+#define SXE_NW_MNG_IF_SEL_PHY_SPEED_1G BIT(19)
+#define SXE_NW_MNG_IF_SEL_PHY_SPEED_2_5G BIT(20)
+#define SXE_NW_MNG_IF_SEL_PHY_SPEED_10G BIT(21)
+#define SXE_NW_MNG_IF_SEL_SGMII_ENABLE BIT(25)
+#define SXE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24)
+#define SXE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3
+#define SXE_NW_MNG_IF_SEL_MDIO_PHY_ADD \
+ (0x1F << SXE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT)
+
+#endif
+
new file mode 100644
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_VER_H__
+#define __SXE_VER_H__
+
+#define SXE_VERSION "0.0.0.0"
+#define SXE_COMMIT_ID "51935d6"
+#define SXE_BRANCH "feature/sagitta-1.3.0-P3-dpdk_patch_rwy"
+#define SXE_BUILD_TIME "2024-09-05 21:49:55"
+
+
+#define SXE_DRV_NAME "sxe"
+#define SXEVF_DRV_NAME "sxevf"
+#define SXE_DRV_LICENSE "GPL v2"
+#define SXE_DRV_AUTHOR "sxe"
+#define SXEVF_DRV_AUTHOR "sxevf"
+#define SXE_DRV_DESCRIPTION "sxe driver"
+#define SXEVF_DRV_DESCRIPTION "sxevf driver"
+
+
+#define SXE_FW_NAME "soc"
+#define SXE_FW_ARCH "arm32"
+
+#ifndef PS3_CFG_RELEASE
+#define PS3_SXE_FW_BUILD_MODE "debug"
+#else
+#define PS3_SXE_FW_BUILD_MODE "release"
+#endif
+
+#endif
new file mode 100644
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (C), 2020, Wuxi Stars Micro System Technologies Co., Ltd.
+
+cflags += ['-DSXE_DPDK']
+cflags += ['-DSXE_HOST_DRIVER']
+cflags += ['-DSXE_DPDK_L4_FEATURES']
+cflags += ['-DSXE_DPDK_SRIOV']
+cflags += ['-DSXE_DPDK_FILTER_CTRL']
+cflags += ['-DSXE_DPDK_MACSEC']
+cflags += ['-DSXE_DPDK_TM']
+cflags += ['-DSXE_DPDK_SIMD']
+
+#subdir('base')
+#objs = [base_objs]
+
+deps += ['hash']
+sources = files(
+ 'pf/sxe_main.c',
+ 'pf/sxe_filter.c',
+ 'pf/sxe_flow_ctrl.c',
+ 'pf/sxe_irq.c',
+ 'pf/sxe_ethdev.c',
+ 'pf/sxe_offload.c',
+ 'pf/sxe_queue.c',
+ 'pf/sxe_rx.c',
+ 'pf/sxe_tx.c',
+ 'pf/sxe_stats.c',
+ 'pf/sxe_pmd_hdc.c',
+ 'pf/sxe_phy.c',
+ 'pf/sxe_ptp.c',
+ 'pf/sxe_vf.c',
+ 'pf/sxe_dcb.c',
+ 'pf/sxe_filter_ctrl.c',
+ 'pf/sxe_fnav.c',
+ 'pf/sxe_tm.c',
+ 'pf/sxe_macsec.c',
+ 'vf/sxevf_main.c',
+ 'vf/sxevf_filter.c',
+ 'vf/sxevf_irq.c',
+ 'vf/sxevf_msg.c',
+ 'vf/sxevf_ethdev.c',
+ 'vf/sxevf_stats.c',
+ 'vf/sxevf_rx.c',
+ 'vf/sxevf_tx.c',
+ 'vf/sxevf_queue.c',
+ 'vf/sxevf_offload.c',
+ 'base/sxe_queue_common.c',
+ 'base/sxe_rx_common.c',
+ 'base/sxe_tx_common.c',
+ 'base/sxe_offload_common.c',
+ 'base/sxe_common.c',
+ 'base/sxe_hw.c',
+ 'base/sxevf_hw.c',
+)
+
+testpmd_sources = files('sxe_testpmd.c')
+
+if arch_subdir == 'x86'
+ sources += files('pf/sxe_vec_sse.c')
+elif arch_subdir == 'arm'
+ sources += files('pf/sxe_vec_neon.c')
+endif
+
+includes += include_directories('base')
+includes += include_directories('pf')
+includes += include_directories('vf')
+includes += include_directories('include/sxe/')
+includes += include_directories('include/')
+
new file mode 100644
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __PMD_SXE_H__
+#define __PMD_SXE_H__
+
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef int32_t s32;
+
+s32 rte_pmd_sxe_tx_loopback_set(u16 port, u8 on);
+
+s32 rte_pmd_sxe_tc_bw_set(u8 port,
+ u8 tc_num, u8 *bw_weight);
+
+s32 rte_pmd_sxe_macsec_enable(u16 port, u8 en, u8 rp_en);
+
+s32 rte_pmd_sxe_macsec_disable(u16 port);
+
+s32 rte_pmd_sxe_macsec_txsc_configure(u16 port, u8 *mac);
+
+s32 rte_pmd_sxe_macsec_rxsc_configure(u16 port, u8 *mac, u16 pi);
+
+s32 rte_pmd_sxe_macsec_txsa_configure(u16 port, u8 sa_idx, u8 an,
+ u32 pn, u8 *keys);
+
+s32 rte_pmd_sxe_macsec_rxsa_configure(u16 port, u8 sa_idx, u8 an,
+ u32 pn, u8 *keys);
+
+#endif
+
new file mode 100644
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#ifndef __SXE_H__
+#define __SXE_H__
+
+#include <rte_pci.h>
+#include <rte_time.h>
+
+#include "sxe_types.h"
+#include "sxe_filter.h"
+#include "sxe_irq.h"
+#include "sxe_stats.h"
+#include "sxe_phy.h"
+#include "sxe_vf.h"
+#include "sxe_dcb.h"
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_MACSEC
+#include "sxe_macsec.h"
+#endif
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+#include "sxe_filter_ctrl.h"
+#include "sxe_fnav.h"
+#endif
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_TM
+#include "sxe_tm.h"
+#endif
+
+struct sxe_hw;
+struct sxe_vlan_context;
+
+#define SXE_LPBK_DISABLED 0x0
+#define SXE_LPBK_ENABLED 0x1
+
+#define PCI_VENDOR_ID_STARS 0x1FF2
+#define SXE_DEV_ID_ASIC 0x10a1
+
+#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC_ADDR(x) ((u8 *)(x))[0], ((u8 *)(x))[1], \
+ ((u8 *)(x))[2], ((u8 *)(x))[3], \
+ ((u8 *)(x))[4], ((u8 *)(x))[5]
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p) rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p) \
+ do { \
+ } while (0)
+#endif
+
+#if 1
+#define RTE_PMD_USE_PREFETCH
+#endif
+
+#ifdef RTE_PMD_USE_PREFETCH
+#define rte_sxe_prefetch(p) rte_prefetch0(p)
+#else
+#define rte_sxe_prefetch(p) do {} while (0)
+#endif
+
+struct sxe_ptp_context {
+ struct rte_timecounter systime_tc;
+ struct rte_timecounter rx_tstamp_tc;
+ struct rte_timecounter tx_tstamp_tc;
+ u32 tx_hwtstamp_sec;
+ u32 tx_hwtstamp_nsec;
+};
+
+struct sxe_adapter {
+ struct sxe_hw hw;
+
+ struct sxe_irq_context irq_ctxt;
+
+ struct sxe_vlan_context vlan_ctxt;
+ struct sxe_mac_filter_context mac_filter_ctxt;
+#ifdef RTE_ADAPTER_HAVE_FNAV_CONF
+ struct rte_eth_fdir_conf fnav_conf;
+#endif
+ struct sxe_ptp_context ptp_ctxt;
+ struct sxe_phy_context phy_ctxt;
+ struct sxe_virtual_context vt_ctxt;
+
+ struct sxe_stats_info stats_info;
+ struct sxe_dcb_context dcb_ctxt;
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_MACSEC
+ struct sxe_macsec_context macsec_ctxt;
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_TM
+ struct sxe_tm_context tm_ctxt;
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+ struct sxe_filter_context filter_ctxt;
+
+ struct sxe_fnav_context fnav_ctxt;
+#endif
+
+ bool rx_batch_alloc_allowed;
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+ bool rx_vec_allowed;
+#endif
+ s8 name[PCI_PRI_STR_SIZE+1];
+
+ u32 mtu;
+
+ bool rss_reta_updated;
+
+ rte_atomic32_t link_thread_running;
+ pthread_t link_thread_tid;
+ bool is_stopped;
+};
+
+s32 sxe_hw_reset(struct sxe_hw *hw);
+
+void sxe_hw_start(struct sxe_hw *hw);
+
+bool is_sxe_supported(struct rte_eth_dev *dev);
+
+#endif
new file mode 100644
@@ -0,0 +1,967 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include "sxe.h"
+#include "sxe_logs.h"
+#include "sxe_hw.h"
+#include "sxe_phy.h"
+#include "sxe_errno.h"
+#include "sxe_offload.h"
+#include "sxe_ethdev.h"
+#include "sxe_compat_version.h"
+#include "rte_pmd_sxe.h"
+
+#define DCB_RX_CONFIG 1
+#define DCB_TX_CONFIG 1
+
+#define DCB_CREDIT_QUANTUM 64
+#define MAX_CREDIT_REFILL 511
+#define MAX_CREDIT 4095
+
+void sxe_dcb_init(struct rte_eth_dev *dev)
+{
+ u8 i;
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_dcb_config *cfg = &adapter->dcb_ctxt.config;
+ struct sxe_tc_config *tc;
+ u8 dcb_max_tc = SXE_DCB_MAX_TRAFFIC_CLASS;
+
+ memset(cfg, 0, sizeof(struct sxe_dcb_config));
+
+ cfg->num_tcs.pg_tcs = dcb_max_tc;
+ cfg->num_tcs.pfc_tcs = dcb_max_tc;
+ for (i = 0; i < dcb_max_tc; i++) {
+ tc = &cfg->tc_config[i];
+ tc->channel[DCB_PATH_TX].bwg_id = i;
+ tc->channel[DCB_PATH_TX].bwg_percent =
+ (u8)(100/dcb_max_tc + (i & 1));
+ tc->channel[DCB_PATH_RX].bwg_id = i;
+ tc->channel[DCB_PATH_RX].bwg_percent =
+ (u8)(100/dcb_max_tc + (i & 1));
+ tc->pfc_type = pfc_disabled;
+ }
+
+ tc = &cfg->tc_config[0];
+ tc->channel[DCB_PATH_TX].up_to_tc_bitmap = 0xFF;
+ tc->channel[DCB_PATH_RX].up_to_tc_bitmap = 0xFF;
+ for (i = 0; i < MAX_BW_GROUP; i++) {
+ cfg->bwg_link_percent[DCB_PATH_TX][i] = 100;
+ cfg->bwg_link_percent[DCB_PATH_RX][i] = 100;
+ }
+ cfg->rx_pba_config = SXE_DCB_PBA_EQUAL;
+ cfg->pfc_mode_enable = false;
+ cfg->vmdq_active = true;
+ cfg->round_robin_enable = false;
+
+}
+
+static u8 sxe_dcb_get_tc_from_up(struct sxe_dcb_config *cfg,
+ u8 direction, u8 up)
+{
+ struct sxe_tc_config *tc_config = &cfg->tc_config[0];
+ u8 prio_mask = BIT(up);
+ u8 tc = cfg->num_tcs.pg_tcs;
+
+ if (!tc)
+ goto l_ret;
+
+ for (tc--; tc; tc--) {
+ if (prio_mask & tc_config[tc].channel[direction].up_to_tc_bitmap)
+ break;
+ }
+
+l_ret:
+ LOG_DEBUG("up[%u] to tc[%u]\n", up, tc);
+ return tc;
+}
+
+static void sxe_dcb_up2tc_map_parse(struct sxe_dcb_config *cfg,
+ u8 direction, u8 *map)
+{
+ u8 up;
+
+ for (up = 0; up < MAX_USER_PRIORITY; up++) {
+ map[up] = sxe_dcb_get_tc_from_up(cfg, direction, up);
+ LOG_DEBUG("up[%u] --- up2tc_map[%u]\n", up, map[up]);
+ }
+
+}
+
+s32 sxe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_pfc_conf *pfc_conf)
+{
+ s32 ret;
+ u32 rx_buf_size;
+ u32 max_high_water;
+ u8 tc_idx;
+ u8 up2tc_map[MAX_USER_PRIORITY] = { 0 };
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+
+ struct sxe_dcb_config *dcb_config = &adapter->dcb_ctxt.config;
+
+ static const enum sxe_fc_mode fcmode[] = {
+ SXE_FC_NONE,
+ SXE_FC_RX_PAUSE,
+ SXE_FC_TX_PAUSE,
+ SXE_FC_FULL,
+ };
+
+ PMD_INIT_FUNC_TRACE();
+
+ sxe_dcb_up2tc_map_parse(dcb_config, DCB_PATH_RX, up2tc_map);
+ tc_idx = up2tc_map[pfc_conf->priority];
+ rx_buf_size = sxe_hw_rx_pkt_buf_size_get(hw, tc_idx);
+ PMD_LOG_DEBUG(INIT, "Rx packet buffer size = 0x%x", rx_buf_size);
+
+ max_high_water = (rx_buf_size -
+ RTE_ETHER_MAX_LEN) >> SXE_RX_PKT_BUF_SIZE_SHIFT;
+ if ((pfc_conf->fc.high_water > max_high_water) ||
+ (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
+ PMD_LOG_ERR(INIT, "Invalid high/low water setup value in KB, "
+ "high water=0x%x, low water=0x%x",
+ pfc_conf->fc.high_water, pfc_conf->fc.low_water);
+ PMD_LOG_ERR(INIT, "High_water must <= 0x%x", max_high_water);
+ ret = -EINVAL;
+ goto l_end;
+ }
+
+ sxe_hw_fc_requested_mode_set(hw, fcmode[pfc_conf->fc.mode]);
+ sxe_hw_fc_pause_time_set(hw, pfc_conf->fc.pause_time);
+ sxe_hw_fc_send_xon_set(hw, pfc_conf->fc.send_xon);
+ sxe_hw_fc_tc_low_water_mark_set(hw, tc_idx, pfc_conf->fc.low_water);
+ sxe_hw_fc_tc_high_water_mark_set(hw, tc_idx, pfc_conf->fc.high_water);
+
+ ret = sxe_pfc_enable(adapter, tc_idx);
+
+ if ((ret == 0) || (ret == SXE_ERR_FC_NOT_NEGOTIATED)) {
+ PMD_LOG_DEBUG(INIT, "pfc set end ret = %d", ret);
+ ret = 0;
+ goto l_end;
+ }
+
+ PMD_LOG_ERR(INIT, "sxe_dcb_pfc_enable = 0x%x", ret);
+ ret = -EIO;
+l_end:
+ return ret;
+}
+
+s32 sxe_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_dcb_config *dcb_config = &adapter->dcb_ctxt.config;
+
+ struct sxe_tc_config *tc;
+ struct rte_eth_dcb_tc_queue_mapping *tc_queue;
+ u8 tcs_num;
+ u8 i, j;
+
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
+ dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
+ else
+ dcb_info->nb_tcs = 1;
+
+ tc_queue = &dcb_info->tc_queue;
+ tcs_num = dcb_info->nb_tcs;
+
+ if (dcb_config->vmdq_active) {
+ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
+ dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
+
+ if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
+ for (j = 0; j < tcs_num; j++) {
+ tc_queue->tc_rxq[0][j].base = j;
+ tc_queue->tc_rxq[0][j].nb_queue = 1;
+ tc_queue->tc_txq[0][j].base = j;
+ tc_queue->tc_txq[0][j].nb_queue = 1;
+ }
+ } else {
+ for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+ for (j = 0; j < tcs_num; j++) {
+ tc_queue->tc_rxq[i][j].base =
+ i * tcs_num + j;
+ tc_queue->tc_rxq[i][j].nb_queue = 1;
+ tc_queue->tc_txq[i][j].base =
+ i * tcs_num + j;
+ tc_queue->tc_txq[i][j].nb_queue = 1;
+ }
+ }
+ }
+ } else {
+ struct rte_eth_dcb_rx_conf *rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
+ dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
+
+ if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+ }
+
+ dcb_info->tc_queue.tc_txq[0][0].base = 0;
+ dcb_info->tc_queue.tc_txq[0][1].base = 64;
+ dcb_info->tc_queue.tc_txq[0][2].base = 96;
+ dcb_info->tc_queue.tc_txq[0][3].base = 112;
+ dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
+ dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+ } else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+ }
+
+ dcb_info->tc_queue.tc_txq[0][0].base = 0;
+ dcb_info->tc_queue.tc_txq[0][1].base = 32;
+ dcb_info->tc_queue.tc_txq[0][2].base = 64;
+ dcb_info->tc_queue.tc_txq[0][3].base = 80;
+ dcb_info->tc_queue.tc_txq[0][4].base = 96;
+ dcb_info->tc_queue.tc_txq[0][5].base = 104;
+ dcb_info->tc_queue.tc_txq[0][6].base = 112;
+ dcb_info->tc_queue.tc_txq[0][7].base = 120;
+ dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
+ }
+ }
+
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ dcb_info->tc_bws[i] = tc->channel[DCB_PATH_TX].bwg_percent;
+ }
+
+ return 0;
+}
+
+static void sxe_dcb_vmdq_rx_param_get(struct rte_eth_dev *dev,
+ struct sxe_dcb_config *dcb_config)
+{
+ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+ struct sxe_tc_config *tc;
+ u8 i, j;
+
+ if (vmdq_rx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+ dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+ dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
+ } else {
+ dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+ dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
+ }
+
+ for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->channel[DCB_PATH_RX].up_to_tc_bitmap = 0;
+ }
+
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = vmdq_rx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->channel[DCB_PATH_RX].up_to_tc_bitmap |=
+ (u8)(1 << i);
+ }
+
+}
+
+void sxe_dcb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_vmdq_dcb_conf *cfg;
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ enum rte_eth_nb_pools pools_num;
+ u16 i;
+
+ PMD_INIT_FUNC_TRACE();
+ cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+ pools_num = cfg->nb_queue_pools;
+
+ if (pools_num != RTE_ETH_16_POOLS && pools_num != RTE_ETH_32_POOLS) {
+ sxe_rss_disable(dev);
+ return;
+ }
+
+ sxe_hw_dcb_vmdq_mq_configure(hw, pools_num);
+
+ sxe_hw_dcb_vmdq_default_pool_configure(hw,
+ cfg->enable_default_pool,
+ cfg->default_pool);
+
+ sxe_hw_dcb_vmdq_up_2_tc_configure(hw, cfg->dcb_tc);
+
+ sxe_hw_dcb_vmdq_vlan_configure(hw, pools_num);
+
+ for (i = 0; i < cfg->nb_pool_maps; i++) {
+ sxe_hw_dcb_vmdq_pool_configure(hw,
+ i, cfg->pool_map[i].vlan_id,
+ cfg->pool_map[i].pools);
+ }
+
+}
+
+static void sxe_dcb_rx_param_get(struct rte_eth_dev *dev,
+ struct sxe_dcb_config *dcb_config)
+{
+ struct rte_eth_dcb_rx_conf *rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+ struct sxe_tc_config *tc;
+ u8 i, j;
+
+ dcb_config->num_tcs.pg_tcs = (u8)rx_conf->nb_tcs;
+ dcb_config->num_tcs.pfc_tcs = (u8)rx_conf->nb_tcs;
+
+ for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->channel[DCB_PATH_RX].up_to_tc_bitmap = 0;
+ }
+
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = rx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->channel[DCB_PATH_RX].up_to_tc_bitmap |=
+ (u8)(1 << i);
+ }
+
+}
+
+static void sxe_dcb_rx_hw_configure(struct rte_eth_dev *dev,
+ struct sxe_dcb_config *dcb_config)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+
+ PMD_INIT_FUNC_TRACE();
+ sxe_hw_dcb_rx_configure(hw, dcb_config->vmdq_active,
+ RTE_ETH_DEV_SRIOV(dev).active,
+ dcb_config->num_tcs.pg_tcs);
+}
+
+static void sxe_dcb_vmdq_tx_param_get(struct rte_eth_dev *dev,
+ struct sxe_dcb_config *dcb_config)
+{
+ struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+ struct sxe_tc_config *tc;
+ u8 i, j;
+
+ if (vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
+ dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
+ dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
+ } else {
+ dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
+ dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
+ }
+
+ for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->channel[DCB_PATH_TX].up_to_tc_bitmap = 0;
+ }
+
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = vmdq_tx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->channel[DCB_PATH_TX].up_to_tc_bitmap |=
+ (u8)(1 << i);
+ }
+
+}
+
+static void sxe_dcb_vmdq_tx_hw_configure(struct rte_eth_dev *dev,
+ struct sxe_dcb_config *dcb_config)
+{
+ struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ sxe_hw_pool_xmit_enable(hw, 0, (u8)vmdq_tx_conf->nb_queue_pools);
+
+ sxe_hw_dcb_tx_configure(hw, dcb_config->vmdq_active,
+ dcb_config->num_tcs.pg_tcs);
+}
+
+static void sxe_dcb_tx_param_get(struct rte_eth_dev *dev,
+ struct sxe_dcb_config *dcb_config)
+{
+ struct rte_eth_dcb_tx_conf *tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
+ struct sxe_tc_config *tc;
+ u8 i, j;
+
+ dcb_config->num_tcs.pg_tcs = (u8)tx_conf->nb_tcs;
+ dcb_config->num_tcs.pfc_tcs = (u8)tx_conf->nb_tcs;
+
+ for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->channel[DCB_PATH_TX].up_to_tc_bitmap = 0;
+ }
+
+ for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = tx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->channel[DCB_PATH_TX].up_to_tc_bitmap |=
+ (u8)(1 << i);
+ }
+
+}
+
+static u32 sxe_dcb_min_credit_get(u32 max_frame)
+{
+
+ return ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
+ DCB_CREDIT_QUANTUM;
+
+}
+
+static u16 sxe_dcb_cee_tc_link_percent_get(
+ struct sxe_dcb_config *cee_config,
+ u8 direction, u8 tc_index)
+{
+ u8 bw_percent;
+ u16 link_percentage;
+ struct sxe_tc_bw_alloc *tc_info;
+
+ tc_info = &cee_config->tc_config[tc_index].channel[direction];
+ link_percentage =
+ cee_config->bwg_link_percent[direction][tc_info->bwg_id];
+ bw_percent = tc_info->bwg_percent;
+
+ link_percentage = (link_percentage * bw_percent) / 100;
+
+ return link_percentage;
+}
+
+static u32 sxe_dcb_cee_min_link_percent_get(
+ struct sxe_dcb_config *cee_config, u8 direction)
+{
+ u8 tc_index;
+ u16 link_percentage;
+ u32 min_link_percent = 100;
+
+ for (tc_index = 0; tc_index < MAX_TRAFFIC_CLASS; tc_index++) {
+ link_percentage = sxe_dcb_cee_tc_link_percent_get(
+ cee_config, direction, tc_index);
+
+ if (link_percentage && link_percentage < min_link_percent)
+ min_link_percent = link_percentage;
+ }
+
+ return min_link_percent;
+}
+
+static s32 sxe_dcb_cee_tc_credits_calculate(struct sxe_hw *hw,
+ struct sxe_dcb_config *cee_config,
+ u32 max_frame, u8 direction)
+{
+ s32 ret = 0;
+ struct sxe_adapter *adapter = hw->adapter;
+ struct sxe_tc_bw_alloc *tc_info;
+ u32 min_credit;
+ u32 total_credit;
+ u32 min_link_percent;
+ u32 credit_refill;
+ u32 credit_max;
+ u16 link_percentage;
+ u8 tc_index;
+
+ LOG_DEBUG_BDF("cee_config[%p] input max_frame[%u] direction[%s]\n",
+ cee_config, max_frame, direction ? "RX" : "TX");
+
+ min_credit = sxe_dcb_min_credit_get(max_frame);
+ LOG_DEBUG_BDF("cee_config[%p] max_frame[%u] got min_credit[%u]\n",
+ cee_config, max_frame, min_credit);
+
+ min_link_percent = sxe_dcb_cee_min_link_percent_get(cee_config, direction);
+ LOG_DEBUG_BDF("cee_config[%p] direction[%s] got min_link_percent[%u]\n",
+ cee_config, direction ? "RX" : "TX", min_link_percent);
+
+ total_credit = (min_credit / min_link_percent) + 1;
+ LOG_DEBUG_BDF("cee_config[%p] total_credit=%u\n", cee_config, total_credit);
+
+ for (tc_index = 0; tc_index < MAX_TRAFFIC_CLASS; tc_index++) {
+ tc_info = &cee_config->tc_config[tc_index].channel[direction];
+
+ link_percentage = sxe_dcb_cee_tc_link_percent_get(
+ cee_config, direction, tc_index);
+ LOG_DEBUG_BDF("tc[%u] bwg_percent=%u, link_percentage=%u\n",
+ tc_index, tc_info->bwg_percent, link_percentage);
+
+ if (tc_info->bwg_percent > 0 && link_percentage == 0)
+ link_percentage = 1;
+
+ tc_info->link_percent = (u8)link_percentage;
+
+ credit_refill = min(link_percentage * total_credit,
+ (u32)MAX_CREDIT_REFILL);
+
+ if (credit_refill < min_credit)
+ credit_refill = min_credit;
+
+ tc_info->data_credits_refill = (u16)credit_refill;
+ LOG_DEBUG_BDF("tc[%u] credit_refill=%u\n",
+ tc_index, credit_refill);
+
+ credit_max = (link_percentage * MAX_CREDIT) / 100;
+
+ if (credit_max < min_credit)
+ credit_max = min_credit;
+ LOG_DEBUG_BDF("tc[%u] credit_max=%u\n",
+ tc_index, credit_max);
+
+ if (direction == DCB_PATH_TX)
+ cee_config->tc_config[tc_index].desc_credits_max =
+ (u16)credit_max;
+
+ tc_info->data_credits_max = (u16)credit_max;
+ }
+
+ return ret;
+}
+
+static void sxe_dcb_cee_refill_parse(struct sxe_dcb_config *cfg,
+ u8 direction, u16 *refill)
+{
+ u32 tc;
+ struct sxe_tc_config *tc_config = &cfg->tc_config[0];
+
+ for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
+ refill[tc] = tc_config[tc].channel[direction].data_credits_refill;
+ LOG_DEBUG("tc[%u] --- refill[%u]\n", tc, refill[tc]);
+ }
+
+}
+
+static void sxe_dcb_cee_max_credits_parse(struct sxe_dcb_config *cfg,
+ u16 *max_credits)
+{
+ u32 tc;
+ struct sxe_tc_config *tc_config = &cfg->tc_config[0];
+
+ for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
+ max_credits[tc] = tc_config[tc].desc_credits_max;
+ LOG_DEBUG("tc[%u] --- max_credits[%u]\n", tc, max_credits[tc]);
+ }
+
+}
+
+static void sxe_dcb_cee_bwgid_parse(struct sxe_dcb_config *cfg,
+ u8 direction, u8 *bwgid)
+{
+ u32 tc;
+ struct sxe_tc_config *tc_config = &cfg->tc_config[0];
+
+ for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
+ bwgid[tc] = tc_config[tc].channel[direction].bwg_id;
+ LOG_DEBUG("tc[%u] --- bwgid[%u]\n", tc, bwgid[tc]);
+ }
+
+}
+
+static void sxe_dcb_cee_prio_parse(struct sxe_dcb_config *cfg,
+ u8 direction, u8 *ptype)
+{
+ u32 tc;
+ struct sxe_tc_config *tc_config = &cfg->tc_config[0];
+
+ for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
+ ptype[tc] = tc_config[tc].channel[direction].prio_type;
+ LOG_DEBUG("tc[%u] --- ptype[%u]\n", tc, ptype[tc]);
+ }
+
+}
+
+static void sxe_dcb_cee_pfc_parse(struct sxe_dcb_config *cfg,
+ u8 *map, u8 *pfc_en)
+{
+ u32 up;
+ struct sxe_tc_config *tc_config = &cfg->tc_config[0];
+
+ for (*pfc_en = 0, up = 0; up < MAX_TRAFFIC_CLASS; up++) {
+ if (tc_config[map[up]].pfc_type != pfc_disabled)
+ *pfc_en |= BIT(up);
+ }
+ LOG_DEBUG("cfg[%p] pfc_en[0x%x]\n", cfg, *pfc_en);
+
+}
+
+static s32 sxe_dcb_tc_stats_configure(struct sxe_hw *hw,
+ struct sxe_dcb_config *dcb_config)
+{
+ s32 ret;
+ u8 tc_count = 8;
+ bool vmdq_active = false;
+
+ if (dcb_config != NULL) {
+ tc_count = dcb_config->num_tcs.pg_tcs;
+ vmdq_active = dcb_config->vmdq_active;
+ }
+
+ if (!((tc_count == 8 && vmdq_active == false) || tc_count == 4)) {
+ ret = -SXE_ERR_PARAM;
+ PMD_LOG_ERR(INIT, "dcb tc stats configure failed, "
+ "tc_num = %u, vmdq_active = %s",
+ tc_count, vmdq_active ? "on" : "off");
+ goto l_end;
+ }
+
+ sxe_hw_dcb_tc_stats_configure(hw, tc_count, vmdq_active);
+
+l_end:
+ return ret;
+}
+
+static void sxe_dcb_rx_mq_mode_configure(struct rte_eth_dev *dev,
+ struct sxe_dcb_config *dcb_config,
+ u8 *rx_configed)
+{
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case RTE_ETH_MQ_RX_VMDQ_DCB:
+ dcb_config->vmdq_active = true;
+ *rx_configed = DCB_RX_CONFIG;
+
+ sxe_dcb_vmdq_rx_param_get(dev, dcb_config);
+ sxe_dcb_vmdq_rx_hw_configure(dev);
+ break;
+ case RTE_ETH_MQ_RX_DCB:
+ case RTE_ETH_MQ_RX_DCB_RSS:
+ dcb_config->vmdq_active = false;
+ *rx_configed = DCB_RX_CONFIG;
+
+ sxe_dcb_rx_param_get(dev, dcb_config);
+ sxe_dcb_rx_hw_configure(dev, dcb_config);
+ break;
+ default:
+ PMD_LOG_ERR(INIT, "Incorrect DCB RX mode configuration");
+ break;
+ }
+
+}
+
+static void sxe_dcb_tx_mq_mode_configure(struct rte_eth_dev *dev,
+ struct sxe_dcb_config *dcb_config,
+ u8 *tx_configed)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+
+ switch (dev->data->dev_conf.txmode.mq_mode) {
+ case RTE_ETH_MQ_TX_VMDQ_DCB:
+ dcb_config->vmdq_active = true;
+ *tx_configed = DCB_TX_CONFIG;
+
+ sxe_dcb_vmdq_tx_param_get(dev, dcb_config);
+ sxe_dcb_vmdq_tx_hw_configure(dev, dcb_config);
+ break;
+
+ case RTE_ETH_MQ_TX_DCB:
+ dcb_config->vmdq_active = false;
+ *tx_configed = DCB_TX_CONFIG;
+
+ sxe_dcb_tx_param_get(dev, dcb_config);
+ sxe_hw_dcb_tx_configure(hw, dcb_config->vmdq_active,
+ dcb_config->num_tcs.pg_tcs);
+ break;
+ default:
+ PMD_LOG_ERR(INIT, "Incorrect DCB TX mode configuration");
+ break;
+ }
+
+}
+
+static void sxe_dcb_bwg_percentage_alloc(struct rte_eth_dev *dev,
+ struct sxe_dcb_config *dcb_config, u8 *map)
+{
+ u8 i;
+ struct sxe_tc_config *tc;
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_bw_config *bw_conf = &adapter->dcb_ctxt.bw_config;
+
+ u8 nb_tcs = dcb_config->num_tcs.pfc_tcs;
+
+ if (nb_tcs == RTE_ETH_4_TCS) {
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ if (map[i] >= nb_tcs) {
+ PMD_LOG_INFO(DRV, "map[up-%u] to tc[%u] not exist, "
+ "change to tc 0", i, map[i]);
+ map[i] = 0;
+ }
+ }
+
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ if (bw_conf->tc_num != nb_tcs) {
+ tc->channel[DCB_PATH_TX].bwg_percent =
+ (u8)(100 / nb_tcs);
+ }
+ tc->channel[DCB_PATH_RX].bwg_percent =
+ (u8)(100 / nb_tcs);
+ }
+ for (; i < MAX_TRAFFIC_CLASS; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->channel[DCB_PATH_TX].bwg_percent = 0;
+ tc->channel[DCB_PATH_RX].bwg_percent = 0;
+ }
+ } else {
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ if (bw_conf->tc_num != nb_tcs) {
+ tc->channel[DCB_PATH_TX].bwg_percent =
+ (u8)(100 / nb_tcs + (i & 1));
+ }
+
+ tc->channel[DCB_PATH_RX].bwg_percent =
+ (u8)(100 / nb_tcs + (i & 1));
+ }
+ }
+
+}
+
+static void sxe_dcb_rx_pkt_buf_configure(struct sxe_hw *hw,
+ u16 rx_buffer_size, u8 tcs_num)
+{
+ u8 i;
+ u16 pbsize;
+
+ pbsize = (u16)(rx_buffer_size / tcs_num);
+
+ for (i = 0; i < tcs_num; i++)
+ sxe_hw_rx_pkt_buf_size_set(hw, i, pbsize);
+
+ for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
+ sxe_hw_rx_pkt_buf_size_set(hw, i, 0);
+
+}
+
+static void sxe_dcb_tx_pkt_buf_configure(struct sxe_hw *hw, u8 tcs_num)
+{
+ sxe_hw_tx_pkt_buf_switch(hw, false);
+
+ sxe_hw_tx_pkt_buf_size_configure(hw, tcs_num);
+ sxe_hw_tx_pkt_buf_thresh_configure(hw, tcs_num, true);
+
+ sxe_hw_tx_pkt_buf_switch(hw, true);
+}
+
+static void sxe_dcb_rx_configure(struct rte_eth_dev *dev,
+ struct sxe_dcb_config *dcb_config, u8 *map)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ u8 tsa[MAX_TRAFFIC_CLASS] = {0};
+ u8 bwgid[MAX_TRAFFIC_CLASS] = {0};
+ u16 refill[MAX_TRAFFIC_CLASS] = {0};
+ u16 max[MAX_TRAFFIC_CLASS] = {0};
+
+ sxe_dcb_rx_pkt_buf_configure(hw, SXE_RX_PKT_BUF_SIZE, dcb_config->num_tcs.pg_tcs);
+
+ sxe_dcb_cee_refill_parse(dcb_config, DCB_PATH_RX, refill);
+ sxe_dcb_cee_bwgid_parse(dcb_config, DCB_PATH_RX, bwgid);
+ sxe_dcb_cee_prio_parse(dcb_config, DCB_PATH_RX, tsa);
+ sxe_dcb_cee_max_credits_parse(dcb_config, max);
+
+ sxe_hw_dcb_rx_bw_alloc_configure(hw, refill, max,
+ bwgid, tsa, map, MAX_USER_PRIORITY);
+}
+
+static void sxe_dcb_tx_configure(struct rte_eth_dev *dev,
+ struct sxe_dcb_config *dcb_config, u8 *map)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ u8 tsa[MAX_TRAFFIC_CLASS] = {0};
+ u8 bwgid[MAX_TRAFFIC_CLASS] = {0};
+ u16 refill[MAX_TRAFFIC_CLASS] = {0};
+ u16 max[MAX_TRAFFIC_CLASS] = {0};
+
+ sxe_dcb_tx_pkt_buf_configure(hw, dcb_config->num_tcs.pg_tcs);
+
+ sxe_dcb_cee_refill_parse(dcb_config, DCB_PATH_TX, refill);
+ sxe_dcb_cee_max_credits_parse(dcb_config, max);
+ sxe_dcb_cee_bwgid_parse(dcb_config, DCB_PATH_TX, bwgid);
+ sxe_dcb_cee_prio_parse(dcb_config, DCB_PATH_TX, tsa);
+
+ sxe_hw_dcb_tx_desc_bw_alloc_configure(hw, refill, max, bwgid, tsa);
+ sxe_hw_dcb_tx_data_bw_alloc_configure(hw, refill, max,
+ bwgid, tsa, map, MAX_USER_PRIORITY);
+
+}
+
+static void sxe_dcb_pfc_configure(struct sxe_hw *hw,
+ struct sxe_dcb_config *dcb_config,
+ u8 *map)
+{
+ u8 nb_tcs = dcb_config->num_tcs.pg_tcs;
+ u16 pbsize;
+ u8 i, pfc_en;
+ struct sxe_tc_config *tc;
+
+ pbsize = (u16)(SXE_RX_PKT_BUF_SIZE / nb_tcs);
+ for (i = 0; i < nb_tcs; i++) {
+ sxe_hw_fc_tc_high_water_mark_set(hw, i, (pbsize * 3) / 4);
+ sxe_hw_fc_tc_low_water_mark_set(hw, i, pbsize / 4);
+
+ tc = &dcb_config->tc_config[i];
+ tc->pfc_type = pfc_enabled_full;
+ }
+
+ sxe_dcb_cee_pfc_parse(dcb_config, map, &pfc_en);
+ if (dcb_config->num_tcs.pfc_tcs == RTE_ETH_4_TCS)
+ pfc_en &= 0x0F;
+
+ sxe_hw_dcb_pfc_configure(hw, pfc_en, map, MAX_USER_PRIORITY);
+
+}
+
+static void sxe_dcb_hw_configure(struct rte_eth_dev *dev,
+ struct sxe_dcb_config *dcb_config)
+{
+ u8 rx_configed = 0;
+ u8 tx_configed = 0;
+ u8 map[MAX_TRAFFIC_CLASS] = {0};
+ u32 max_frame = dev->data->mtu + SXE_ETH_DEAD_LOAD;
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+
+ sxe_dcb_rx_mq_mode_configure(dev, dcb_config, &rx_configed);
+ sxe_dcb_tx_mq_mode_configure(dev, dcb_config, &tx_configed);
+
+ sxe_dcb_up2tc_map_parse(dcb_config, DCB_PATH_RX, map);
+
+ sxe_dcb_bwg_percentage_alloc(dev, dcb_config, map);
+
+ sxe_dcb_cee_tc_credits_calculate(hw, dcb_config, max_frame, DCB_PATH_TX);
+ sxe_dcb_cee_tc_credits_calculate(hw, dcb_config, max_frame, DCB_PATH_RX);
+
+ if (rx_configed)
+ sxe_dcb_rx_configure(dev, dcb_config, map);
+
+ if (tx_configed)
+ sxe_dcb_tx_configure(dev, dcb_config, map);
+
+ sxe_dcb_tc_stats_configure(hw, dcb_config);
+
+ if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT)
+ sxe_dcb_pfc_configure(hw, dcb_config, map);
+
+}
+
+void sxe_dcb_configure(struct rte_eth_dev *dev)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+
+ struct sxe_dcb_config *dcb_cfg = &adapter->dcb_ctxt.config;
+ struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if ((dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_DCB) &&
+ (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB) &&
+ (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB_RSS)) {
+ PMD_LOG_INFO(INIT, "dcb config failed, cause mq_mode=0x%x",
+ (u8)dev_conf->rxmode.mq_mode);
+ return;
+ }
+
+ if (dev->data->nb_rx_queues > RTE_ETH_DCB_NUM_QUEUES) {
+ PMD_LOG_INFO(INIT, "dcb config failed, cause nb_rx_queues=%u > %u",
+ dev->data->nb_rx_queues, RTE_ETH_DCB_NUM_QUEUES);
+ return;
+ }
+
+ sxe_dcb_hw_configure(dev, dcb_cfg);
+
+}
+
+s32 rte_pmd_sxe_tc_bw_set(u8 port,
+ u8 tc_num, u8 *bw_weight)
+{
+ struct sxe_adapter *adapter;
+ struct rte_eth_dev *dev;
+ struct sxe_dcb_config *dcb_config;
+ struct sxe_tc_config *tc;
+ struct rte_eth_conf *eth_conf;
+ struct sxe_bw_config *bw_conf;
+ u8 i;
+ u8 nb_tcs;
+ u16 sum;
+ s32 ret = 0;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_sxe_supported(dev)) {
+ ret = -ENOTSUP;
+ goto l_end;
+ }
+
+ if (tc_num > MAX_TRAFFIC_CLASS) {
+ PMD_LOG_ERR(DRV, "TCs should be no more than %d.",
+ MAX_TRAFFIC_CLASS);
+ ret = -EINVAL;
+ goto l_end;
+ }
+
+ adapter = dev->data->dev_private;
+ dcb_config = &adapter->dcb_ctxt.config;
+ bw_conf = &adapter->dcb_ctxt.bw_config;
+ eth_conf = &dev->data->dev_conf;
+
+ if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
+ nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
+ } else if (eth_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
+ if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
+ RTE_ETH_32_POOLS) {
+ nb_tcs = RTE_ETH_4_TCS;
+ } else {
+ nb_tcs = RTE_ETH_8_TCS;
+ }
+ } else {
+ nb_tcs = 1;
+ }
+
+ if (nb_tcs != tc_num) {
+ PMD_LOG_ERR(DRV,
+ "Weight should be set for all %d enabled TCs.",
+ nb_tcs);
+ ret = -EINVAL;
+ goto l_end;
+ }
+
+ sum = 0;
+ for (i = 0; i < nb_tcs; i++)
+ sum += bw_weight[i];
+
+ if (sum != 100) {
+ PMD_LOG_ERR(DRV,
+ "The summary of the TC weight should be 100.");
+ ret = -EINVAL;
+ goto l_end;
+ }
+
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->channel[DCB_PATH_TX].bwg_percent = bw_weight[i];
+ }
+
+ for (; i < MAX_TRAFFIC_CLASS; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->channel[DCB_PATH_TX].bwg_percent = 0;
+ }
+
+ bw_conf->tc_num = nb_tcs;
+
+l_end:
+ return ret;
+}
new file mode 100644
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_DCB_H__
+#define __SXE_DCB_H__
+#include <stdbool.h>
+
+#define PBA_STRATEGY_EQUAL (0)
+#define PBA_STRATEGY_WEIGHTED (1)
+#define MAX_BW_GROUP 8
+#define MAX_USER_PRIORITY 8
+#define SXE_DCB_MAX_TRAFFIC_CLASS 8
+
+enum sxe_dcb_strict_prio_type {
+ DCB_PRIO_NONE = 0,
+ DCB_PRIO_GROUP,
+ DCB_PRIO_LINK
+};
+enum {
+ DCB_PATH_TX = 0,
+ DCB_PATH_RX = 1,
+ DCB_PATH_NUM = DCB_PATH_RX + 1,
+};
+
+enum sxe_dcb_tsa {
+ sxe_dcb_tsa_ets = 0,
+ sxe_dcb_tsa_group_strict_cee,
+ sxe_dcb_tsa_strict
+};
+
+enum sxe_dcb_pba_config {
+ SXE_DCB_PBA_EQUAL = PBA_STRATEGY_EQUAL,
+ SXE_DCB_PBA_80_48 = PBA_STRATEGY_WEIGHTED
+};
+
+struct sxe_dcb_num_tcs {
+ u8 pg_tcs;
+ u8 pfc_tcs;
+};
+
+struct sxe_tc_bw_alloc {
+ u8 bwg_id;
+ u8 bwg_percent;
+ u8 link_percent;
+ u8 up_to_tc_bitmap;
+ u16 data_credits_refill;
+ u16 data_credits_max;
+ enum sxe_dcb_strict_prio_type prio_type;
+};
+
+enum sxe_dcb_pfc_type {
+ pfc_disabled = 0,
+ pfc_enabled_full,
+ pfc_enabled_tx,
+ pfc_enabled_rx
+};
+
+struct sxe_tc_config {
+ struct sxe_tc_bw_alloc channel[DCB_PATH_NUM];
+ enum sxe_dcb_pfc_type pfc_type;
+
+ u16 desc_credits_max;
+ u8 tc;
+};
+
+struct sxe_dcb_config {
+ struct sxe_tc_config tc_config[SXE_DCB_MAX_TRAFFIC_CLASS];
+ struct sxe_dcb_num_tcs num_tcs;
+ u8 bwg_link_percent[DCB_PATH_NUM][MAX_BW_GROUP];
+ bool pfc_mode_enable;
+ bool round_robin_enable;
+
+ enum sxe_dcb_pba_config rx_pba_config;
+ bool vmdq_active;
+};
+
+struct sxe_bw_config {
+ u8 tc_num;
+};
+
+struct sxe_dcb_context {
+ struct sxe_dcb_config config;
+ struct sxe_bw_config bw_config;
+};
+
+void sxe_dcb_init(struct rte_eth_dev *dev);
+
+s32 sxe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_pfc_conf *pfc_conf);
+
+s32 sxe_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info);
+
+void sxe_dcb_configure(struct rte_eth_dev *dev);
+
+void sxe_dcb_vmdq_rx_hw_configure(struct rte_eth_dev *dev);
+
+#endif
new file mode 100644
@@ -0,0 +1,1102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_bus_pci.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#elif defined DPDK_21_11_5
+#include <rte_bus_pci.h>
+#include <ethdev_driver.h>
+#include <rte_dev.h>
+#include <ethdev_pci.h>
+#else
+#include <bus_pci_driver.h>
+#include <ethdev_driver.h>
+#include <dev_driver.h>
+#include <ethdev_pci.h>
+#endif
+
+#include <rte_ethdev.h>
+#include <rte_pmd_sxe.h>
+#include <rte_alarm.h>
+
+#include "sxe_types.h"
+#include "sxe_logs.h"
+#include "sxe_compat_platform.h"
+#include "sxe_errno.h"
+#include "sxe.h"
+#include "sxe_hw.h"
+#include "sxe_ethdev.h"
+#include "sxe_filter.h"
+#include "sxe_rx.h"
+#include "sxe_tx.h"
+#include "sxe_offload.h"
+#include "sxe_queue.h"
+#include "sxe_irq.h"
+#include "sxe_stats.h"
+#include "sxe_phy.h"
+#include "sxe_pmd_hdc.h"
+#include "sxe_flow_ctrl.h"
+#include "sxe_ptp.h"
+#include "sxe_cli.h"
+#include "drv_msg.h"
+#include "sxe_vf.h"
+#include "sxe_dcb.h"
+#include "sxe_version.h"
+#include "sxe_compat_version.h"
+#include <rte_string_fns.h>
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_TM
+#include "sxe_tm.h"
+#endif
+
+#define SXE_DEFAULT_MTU 1500
+#define SXE_ETH_HLEN 14
+#define SXE_ETH_FCS_LEN 4
+#define SXE_ETH_FRAME_LEN 1514
+
+#define SXE_ETH_MAX_LEN (RTE_ETHER_MTU + SXE_ETH_OVERHEAD)
+
+static const struct rte_eth_desc_lim sxe_rx_desc_lim = {
+ .nb_max = SXE_MAX_RING_DESC,
+ .nb_min = SXE_MIN_RING_DESC,
+ .nb_align = SXE_RX_DESC_RING_ALIGN,
+};
+
+static const struct rte_eth_desc_lim sxe_tx_desc_lim = {
+ .nb_max = SXE_MAX_RING_DESC,
+ .nb_min = SXE_MIN_RING_DESC,
+ .nb_align = SXE_TX_DESC_RING_ALIGN,
+ .nb_seg_max = SXE_TX_MAX_SEG,
+ .nb_mtu_seg_max = SXE_TX_MAX_SEG,
+};
+
+static s32 sxe_dev_reset(struct rte_eth_dev *eth_dev);
+
+static s32 sxe_dev_configure(struct rte_eth_dev *dev)
+{
+ s32 ret;
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_irq_context *irq = &adapter->irq_ctxt;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Rx mode check */
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+ PMD_LOG_DEBUG(INIT, "rx offload rss hash");
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+ }
+
+ /* Multi queue mode check */
+ ret = sxe_mq_mode_check(dev);
+ if (ret != 0) {
+ PMD_LOG_ERR(INIT, "sxe mq mode check fails with %d.",
+ ret);
+ goto l_end;
+ }
+
+ irq->action |= SXE_IRQ_LINK_UPDATE;
+
+ /* Default use batch alloc */
+ adapter->rx_batch_alloc_allowed = true;
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+ adapter->rx_vec_allowed = true;
+#endif
+
+l_end:
+ return ret;
+}
+
+static void sxe_txrx_start(struct rte_eth_dev *dev)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+
+ sxe_hw_rx_cap_switch_on(hw);
+
+ sxe_hw_mac_txrx_enable(hw);
+
+}
+
+static s32 sxe_link_configure(struct rte_eth_dev *dev)
+{
+ s32 ret = 0;
+ bool link_up = false;
+ u32 conf_speeds;
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+
+ /* Disable loopback */
+ sxe_hw_loopback_switch(hw, false);
+
+ sxe_sfp_tx_laser_enable(adapter);
+
+ dev->data->dev_link.link_status = link_up;
+
+ /* Rate of obtaining user configuration */
+ ret = sxe_conf_speed_get(dev, &conf_speeds);
+ if (ret) {
+ PMD_LOG_ERR(INIT, "invalid link setting");
+ goto l_end;
+ }
+
+ if (adapter->phy_ctxt.sfp_info.multispeed_fiber)
+ ret = sxe_multispeed_sfp_link_configure(dev, conf_speeds, false);
+ else
+ ret = sxe_sfp_link_configure(dev);
+
+ if (ret) {
+ PMD_LOG_ERR(INIT, "link config failed, speed=%x",
+ conf_speeds);
+ ret = -EIO;
+ goto l_end;
+ }
+
+l_end:
+ return ret;
+}
+
+static s32 sxe_loopback_pcs_init(struct sxe_adapter *adapter,
+ sxe_pcs_mode_e mode, u32 max_frame)
+{
+ s32 ret;
+ sxe_pcs_cfg_s pcs_cfg;
+ struct sxe_hw *hw = &adapter->hw;
+ struct sxe_irq_context *irq = &adapter->irq_ctxt;
+
+ pcs_cfg.mode = mode;
+ pcs_cfg.mtu = max_frame;
+ ret = sxe_driver_cmd_trans(hw, SXE_CMD_PCS_SDS_INIT,
+ (void *)&pcs_cfg, sizeof(pcs_cfg),
+ NULL, 0);
+ irq->to_pcs_init = false;
+ if (ret) {
+ LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:pcs init\n", ret);
+ goto l_end;
+ }
+
+ /* Set flow control mac address */
+ sxe_fc_mac_addr_set(adapter);
+
+ LOG_INFO_BDF("mode:%u max_frame:0x%x loopback pcs init done.\n",
+ mode, max_frame);
+l_end:
+ return ret;
+}
+
+static s32 sxe_loopback_configure(struct sxe_adapter *adapter)
+{
+ s32 ret;
+ u32 max_frame = SXE_DEFAULT_MTU + SXE_ETH_DEAD_LOAD;
+
+ (void)sxe_sfp_tx_laser_disable(adapter);
+
+ /* Initialize sds and pcs modules */
+ ret = sxe_loopback_pcs_init(adapter, SXE_PCS_MODE_10GBASE_KR_WO, max_frame);
+ if (ret) {
+ LOG_ERROR_BDF("pcs sds init failed, mode=%d, ret=%d\n",
+ SXE_PCS_MODE_10GBASE_KR_WO, ret);
+ goto l_out;
+ }
+
+ ret = sxe_loopback_pcs_init(adapter, SXE_PCS_MODE_LPBK_PHY_TX2RX, max_frame);
+ if (ret) {
+ LOG_ERROR_BDF("pcs sds init failed, mode=%d, ret=%d\n",
+ SXE_PCS_MODE_LPBK_PHY_TX2RX, ret);
+ goto l_out;
+ }
+
+ usleep_range(10000, 20000);
+
+ LOG_DEBUG_BDF("loolback configure success max_frame:0x%x.", max_frame);
+
+l_out:
+ return ret;
+
+}
+
+static s32 sxe_dev_start(struct rte_eth_dev *dev)
+{
+ s32 ret;
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *handle = SXE_PCI_INTR_HANDLE(pci_dev);
+ struct sxe_irq_context *irq = &adapter->irq_ctxt;
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_MACSEC
+ struct sxe_macsec_context *macsec_ctxt = &adapter->macsec_ctxt;
+#endif
+
+ ret = sxe_fw_time_sync(hw);
+
+ sxe_wait_setup_link_complete(dev, 0);
+
+ rte_intr_disable(handle);
+
+ adapter->is_stopped = false;
+
+ ret = sxe_phy_init(adapter);
+ if (ret == -SXE_ERR_SFF_NOT_SUPPORTED) {
+ PMD_LOG_ERR(INIT, "sfp is not sfp+, not supported, ret=%d\n", ret);
+ ret = -EPERM;
+ goto l_end;
+ } else if (ret) {
+ PMD_LOG_ERR(INIT, "phy init failed, ret=%d", ret);
+ }
+
+ ret = sxe_hw_reset(hw);
+ if (ret < 0) {
+ PMD_LOG_ERR(INIT, "hw init failed, ret=%d", ret);
+ goto l_end;
+ }
+
+ sxe_hw_start(hw);
+
+ sxe_mac_addr_set(dev, &dev->data->mac_addrs[0]);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+ sxe_hw_pf_rst_done_set(hw);
+
+ /* Configure virtualization */
+ sxe_vt_configure(dev);
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+ if (SXE_DEV_FNAV_CONF(dev)->mode != RTE_FDIR_MODE_NONE) {
+ ret = sxe_fnav_filter_configure(dev);
+ if (ret) {
+ PMD_LOG_ERR(INIT, "fnav config fail.");
+ goto l_end;
+ }
+ }
+#endif
+
+ sxe_tx_configure(dev);
+
+ ret = sxe_rx_configure(dev);
+ if (ret) {
+ PMD_LOG_ERR(INIT, "unable to initialize RX hardware");
+ goto l_error;
+ }
+
+ ret = sxe_irq_configure(dev);
+ if (ret) {
+ PMD_LOG_ERR(INIT, "irq config fail.");
+ goto l_error;
+ }
+
+ sxe_vlan_filter_configure(dev);
+
+ sxe_queue_stats_map_restore(dev);
+
+ sxe_txrx_start(dev);
+
+ irq->to_pcs_init = true;
+
+ if (dev->data->dev_conf.lpbk_mode == SXE_LPBK_DISABLED) {
+ sxe_link_configure(dev);
+ } else if (dev->data->dev_conf.lpbk_mode == SXE_LPBK_ENABLED) {
+ sxe_loopback_configure(adapter);
+ } else {
+ ret = -ENOTSUP;
+ PMD_LOG_ERR(INIT, "unsupport loopback mode:%u.",
+ dev->data->dev_conf.lpbk_mode);
+ goto l_end;
+ }
+
+ sxe_link_update(dev, false);
+
+ ret = sxe_flow_ctrl_enable(dev);
+ if (ret < 0) {
+ PMD_LOG_ERR(INIT, "enable flow ctrl err");
+ goto l_error;
+ }
+
+ sxe_dcb_configure(dev);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_MACSEC
+ if (macsec_ctxt->offload_en)
+ sxe_macsec_enable(dev, macsec_ctxt);
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+ sxe_filter_restore(dev);
+#endif
+
+l_end:
+ return ret;
+
+l_error:
+ PMD_LOG_ERR(INIT, "dev start err, ret=%d", ret);
+ sxe_irq_vec_free(handle);
+ sxe_txrx_queues_clear(dev, adapter->rx_batch_alloc_allowed);
+ ret = -EIO;
+ goto l_end;
+}
+
+#ifdef DPDK_19_11_6
+static void sxe_dev_stop(struct rte_eth_dev *dev)
+#else
+static s32 sxe_dev_stop(struct rte_eth_dev *dev)
+#endif
+{
+ s32 ret = 0;
+ s32 num;
+ struct rte_eth_link link;
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (adapter->is_stopped) {
+ LOG_ERROR("adapter[%p] is stopped", adapter);
+ goto l_end;
+ }
+
+ sxe_hw_all_irq_disable(hw);
+
+ sxe_sfp_tx_laser_disable(adapter);
+
+ sxe_wait_setup_link_complete(dev, 0);
+
+ ret = sxe_hw_reset(hw);
+ if (ret < 0) {
+ PMD_LOG_ERR(INIT, "hw init failed, ret=%d", ret);
+ goto l_end;
+ }
+
+ sxe_mac_addr_set(dev, &dev->data->mac_addrs[0]);
+
+ sxe_irq_disable(dev);
+
+ sxe_txrx_queues_clear(dev, adapter->rx_batch_alloc_allowed);
+
+ dev->data->scattered_rx = 0;
+ dev->data->lro = 0;
+
+ memset(&link, 0, sizeof(link));
+ rte_eth_linkstatus_set(dev, &link);
+
+ adapter->rss_reta_updated = false;
+
+ dev->data->dev_started = 0;
+ adapter->is_stopped = true;
+
+ num = rte_eal_alarm_cancel(sxe_event_irq_delayed_handler, dev);
+ if (num > 0)
+ sxe_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+
+ LOG_DEBUG_BDF("dev stop success.");
+
+l_end:
+#ifdef DPDK_19_11_6
+ LOG_DEBUG_BDF("at end of dev stop.");
+#else
+ return ret;
+#endif
+}
+
+#ifdef DPDK_19_11_6
+static void sxe_dev_close(struct rte_eth_dev *dev)
+#else
+static s32 sxe_dev_close(struct rte_eth_dev *dev)
+#endif
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ s32 ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ PMD_LOG_INFO(INIT, "not primary, do nothing");
+ goto l_end;
+ }
+
+ sxe_hw_hdc_drv_status_set(hw, (u32)false);
+
+ ret = sxe_hw_reset(hw);
+ if (ret < 0) {
+ PMD_LOG_ERR(INIT, "hw init failed, ret=%d", ret);
+ goto l_end;
+ }
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+ sxe_hw_pf_rst_done_set(hw);
+#endif
+
+#ifdef DPDK_19_11_6
+ sxe_dev_stop(dev);
+#else
+ ret = sxe_dev_stop(dev);
+ if (ret)
+ PMD_LOG_ERR(INIT, "dev stop fail.(err:%d)", ret);
+#endif
+
+ sxe_queues_free(dev);
+
+ sxe_mac_addr_set(dev, &adapter->mac_filter_ctxt.def_mac_addr);
+ sxe_irq_uninit(dev);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+ sxe_vt_uninit(dev);
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+ sxe_fnav_filter_uninit(dev);
+ sxe_fivetuple_filter_uninit(dev);
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_TM
+ sxe_tm_ctxt_uninit(dev);
+#endif
+
+l_end:
+#ifdef DPDK_19_11_6
+ LOG_DEBUG_BDF("at end of dev close.");
+#else
+ return ret;
+#endif
+}
+
+static s32 sxe_dev_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+
+ dev_info->max_rx_queues = SXE_HW_TXRX_RING_NUM_MAX;
+ dev_info->max_tx_queues = SXE_HW_TXRX_RING_NUM_MAX;
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE)
+ dev_info->max_tx_queues = SXE_HW_TX_NONE_MODE_Q_NUM;
+ }
+
+ dev_info->min_rx_bufsize = 1024;
+ dev_info->max_rx_pktlen = 15872;
+ dev_info->max_mac_addrs = SXE_UC_ENTRY_NUM_MAX;
+ dev_info->max_hash_mac_addrs = SXE_HASH_UC_NUM_MAX;
+ dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->max_mtu = dev_info->max_rx_pktlen - SXE_ETH_OVERHEAD;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+ dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
+ dev_info->vmdq_queue_num = dev_info->max_rx_queues;
+
+ dev_info->rx_queue_offload_capa = sxe_rx_queue_offload_capa_get(dev);
+ dev_info->rx_offload_capa = (sxe_rx_port_offload_capa_get(dev) |
+ dev_info->rx_queue_offload_capa);
+ dev_info->tx_queue_offload_capa = sxe_tx_queue_offload_capa_get(dev);
+ dev_info->tx_offload_capa = sxe_tx_port_offload_capa_get(dev);
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = SXE_DEFAULT_RX_PTHRESH,
+ .hthresh = SXE_DEFAULT_RX_HTHRESH,
+ .wthresh = SXE_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = SXE_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = SXE_DEFAULT_TX_PTHRESH,
+ .hthresh = SXE_DEFAULT_TX_HTHRESH,
+ .wthresh = SXE_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = SXE_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = SXE_DEFAULT_TX_RSBIT_THRESH,
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = sxe_rx_desc_lim;
+ dev_info->tx_desc_lim = sxe_tx_desc_lim;
+
+ dev_info->hash_key_size = SXE_HKEY_MAX_INDEX * sizeof(u32);
+ dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
+ dev_info->flow_type_rss_offloads = SXE_RSS_OFFLOAD_ALL;
+
+ dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+
+ dev_info->default_rxportconf.burst_size = 32;
+ dev_info->default_txportconf.burst_size = 32;
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ dev_info->default_rxportconf.ring_size = 256;
+ dev_info->default_txportconf.ring_size = 256;
+
+ return 0;
+}
+
+static s32 sxe_mtu_set(struct rte_eth_dev *dev, u16 mtu)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct rte_eth_dev_info dev_info;
+ u32 frame_size = mtu + SXE_ETH_OVERHEAD;
+ struct rte_eth_dev_data *dev_data = dev->data;
+ s32 ret;
+
+ ret = sxe_dev_infos_get(dev, &dev_info);
+ if (ret != 0) {
+ PMD_LOG_ERR(INIT, "get dev info fails with ret=%d", ret);
+ goto l_end;
+ }
+
+ if (mtu < RTE_ETHER_MTU || frame_size > dev_info.max_rx_pktlen) {
+ PMD_LOG_ERR(INIT, "mtu=%u < %u or frame_size=%u > max_rx_pktlen=%u",
+ mtu, RTE_ETHER_MTU, frame_size, dev_info.max_rx_pktlen);
+ ret = -EINVAL;
+ goto l_end;
+ }
+
+ if (dev_data->dev_started && !dev_data->scattered_rx &&
+ (frame_size + 2 * SXE_VLAN_TAG_SIZE >
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
+ PMD_LOG_ERR(INIT, "stop port first.");
+ ret = -EINVAL;
+ goto l_end;
+ }
+
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+ if (frame_size > SXE_ETH_MAX_LEN) {
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ } else {
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ }
+
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+#endif
+ adapter->mtu = mtu;
+ PMD_LOG_NOTICE(DRV, "mtu set success, take effect after port-restart.");
+
+l_end:
+ return ret;
+}
+
+static int sxe_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs)
+{
+ s32 ret = 0;
+ u32 *data = regs->data;
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ u32 length = sxe_hw_all_regs_group_num_get();
+
+ if (data == NULL) {
+ regs->length = length;
+ regs->width = sizeof(uint32_t);
+ goto l_end;
+ }
+
+ if ((regs->length == 0) || (regs->length == length)) {
+ sxe_hw_all_regs_group_read(hw, data);
+
+ goto l_end;
+ }
+
+ ret = -ENOTSUP;
+ LOG_ERROR("get regs: inval param: regs_len=%u, regs->data=%p, "
+ "regs_offset=%u, regs_width=%u, regs_version=%u",
+ regs->length, regs->data,
+ regs->offset, regs->width,
+ regs->version);
+
+l_end:
+ return ret;
+}
+
+static s32 sxe_led_reset(struct rte_eth_dev *dev)
+{
+ s32 ret;
+ s32 resp;
+ struct sxe_led_ctrl ctrl;
+ struct sxe_adapter *adapter = (struct sxe_adapter *)(dev->data->dev_private);
+ struct sxe_hw *hw = &adapter->hw;
+
+ ctrl.mode = SXE_IDENTIFY_LED_RESET;
+ ctrl.duration = 0;
+
+ ret = sxe_driver_cmd_trans(hw, SXE_CMD_LED_CTRL,
+ (void *)&ctrl, sizeof(ctrl),
+ (void *)&resp, sizeof(resp));
+ if (ret) {
+ LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:led reset", ret);
+ ret = -EIO;
+ } else {
+ LOG_DEBUG_BDF("led reset success");
+ }
+
+ return ret;
+}
+
+static s32 sxe_led_ctrl(struct sxe_adapter *adapter, bool is_on)
+{
+ s32 ret;
+ s32 resp;
+ struct sxe_led_ctrl ctrl;
+ struct sxe_hw *hw = &adapter->hw;
+
+ ctrl.mode = (true == is_on) ? SXE_IDENTIFY_LED_ON : SXE_IDENTIFY_LED_OFF;
+ ctrl.duration = 0;
+
+ ret = sxe_driver_cmd_trans(hw, SXE_CMD_LED_CTRL,
+ (void *)&ctrl, sizeof(ctrl),
+ (void *)&resp, sizeof(resp));
+ if (ret) {
+ LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:led ctrl\n", ret);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int sxe_led_on(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ struct sxe_adapter *adapter = dev->data->dev_private;
+
+ ret = sxe_led_ctrl(adapter, true);
+
+ return ret;
+}
+
+static int sxe_led_off(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ struct sxe_adapter *adapter = dev->data->dev_private;
+
+ ret = sxe_led_ctrl(adapter, false);
+
+ return ret;
+}
+
+static int sxe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
+ size_t fw_size)
+{
+ int ret;
+ sxe_version_resp_s resp;
+ struct sxe_adapter *adapter = (struct sxe_adapter *)(dev->data->dev_private);
+ struct sxe_hw *hw = &adapter->hw;
+
+ ret = sxe_driver_cmd_trans(hw, SXE_CMD_FW_VER_GET,
+ NULL, 0,
+ (void *)&resp, sizeof(resp));
+ if (ret) {
+ LOG_ERROR_BDF("get version failed, ret=%d\n", ret);
+ ret = -EIO;
+ goto l_end;
+ }
+
+ ret = snprintf(fw_version, fw_size, "%s", resp.fw_version);
+ if (ret < 0) {
+ ret = -EINVAL;
+ goto l_end;
+ }
+
+ ret += 1;
+
+ if (fw_size >= (size_t)ret)
+ ret = 0;
+
+l_end:
+ return ret;
+}
+
+static const struct eth_dev_ops sxe_eth_dev_ops = {
+ .dev_configure = sxe_dev_configure,
+ .dev_start = sxe_dev_start,
+ .dev_stop = sxe_dev_stop,
+ .dev_close = sxe_dev_close,
+ .dev_reset = sxe_dev_reset,
+
+ .rx_queue_start = sxe_rx_queue_start,
+ .rx_queue_stop = sxe_rx_queue_stop,
+ .rx_queue_setup = sxe_rx_queue_setup,
+ .rx_queue_release = sxe_rx_queue_release,
+ .rxq_info_get = sxe_rx_queue_info_get,
+ .dev_infos_get = sxe_dev_infos_get,
+
+ .tx_queue_start = sxe_tx_queue_start,
+ .tx_queue_stop = sxe_tx_queue_stop,
+ .tx_queue_setup = sxe_tx_queue_setup,
+ .tx_queue_release = sxe_tx_queue_release,
+ .tx_done_cleanup = sxe_tx_done_cleanup,
+ .txq_info_get = sxe_tx_queue_info_get,
+
+ .promiscuous_enable = sxe_promiscuous_enable,
+ .promiscuous_disable = sxe_promiscuous_disable,
+ .allmulticast_enable = sxe_allmulticast_enable,
+ .allmulticast_disable = sxe_allmulticast_disable,
+
+ .rx_queue_intr_enable = sxe_rx_queue_intr_enable,
+ .rx_queue_intr_disable = sxe_rx_queue_intr_disable,
+
+ .mtu_set = sxe_mtu_set,
+ .reta_update = sxe_rss_reta_update,
+ .reta_query = sxe_rss_reta_query,
+ .rss_hash_update = sxe_rss_hash_update,
+ .rss_hash_conf_get = sxe_rss_hash_conf_get,
+
+ .mac_addr_add = sxe_mac_addr_add,
+ .mac_addr_remove = sxe_mac_addr_remove,
+ .mac_addr_set = sxe_mac_addr_set,
+
+ .uc_hash_table_set = sxe_uc_hash_table_set,
+ .uc_all_hash_table_set = sxe_uc_all_hash_table_set,
+
+ .set_mc_addr_list = sxe_set_mc_addr_list,
+
+ .stats_get = sxe_eth_stats_get,
+ .stats_reset = sxe_stats_reset,
+
+ .xstats_get = sxe_xstats_get,
+ .xstats_reset = sxe_xstats_reset,
+ .xstats_get_by_id = sxe_xstats_get_by_id,
+ .xstats_get_names = sxe_xstats_names_get,
+ .xstats_get_names_by_id = sxe_xstats_names_get_by_id,
+ .queue_stats_mapping_set = sxe_queue_stats_mapping_set,
+
+ .get_module_info = sxe_get_module_info,
+ .get_module_eeprom = sxe_get_module_eeprom,
+
+ .flow_ctrl_get = sxe_flow_ctrl_get,
+ .flow_ctrl_set = sxe_flow_ctrl_set,
+ .priority_flow_ctrl_set = sxe_priority_flow_ctrl_set,
+
+ .timesync_enable = sxe_timesync_enable,
+ .timesync_disable = sxe_timesync_disable,
+ .timesync_read_rx_timestamp = sxe_timesync_read_rx_timestamp,
+ .timesync_read_tx_timestamp = sxe_timesync_read_tx_timestamp,
+ .timesync_adjust_time = sxe_timesync_adjust_time,
+ .timesync_read_time = sxe_timesync_read_time,
+ .timesync_write_time = sxe_timesync_write_time,
+
+ .vlan_filter_set = sxe_vlan_filter_set,
+ .vlan_tpid_set = sxe_vlan_tpid_set,
+ .vlan_offload_set = sxe_vlan_offload_set,
+ .vlan_strip_queue_set = sxe_vlan_strip_queue_set,
+
+ .get_reg = sxe_get_regs,
+
+ .dev_set_link_up = sxe_dev_set_link_up,
+ .dev_set_link_down = sxe_dev_set_link_down,
+ .dev_led_on = sxe_led_on,
+ .dev_led_off = sxe_led_off,
+ .link_update = sxe_link_update,
+
+ .dev_supported_ptypes_get = sxe_dev_supported_ptypes_get,
+
+ .get_dcb_info = sxe_get_dcb_info,
+
+ .set_queue_rate_limit = sxe_queue_rate_limit_set,
+ .fw_version_get = sxe_fw_version_get,
+
+#ifdef ETH_DEV_MIRROR_RULE
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+ .mirror_rule_set = sxe_mirror_rule_set,
+ .mirror_rule_reset = sxe_mirror_rule_reset,
+#endif
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+#ifdef ETH_DEV_OPS_FILTER_CTRL
+ .filter_ctrl = sxe_filter_ctrl,
+#else
+ .flow_ops_get = sxe_flow_ops_get,
+#endif
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_TM
+ .tm_ops_get = sxe_tm_ops_get,
+#endif
+
+#ifdef ETH_DEV_OPS_MONITOR
+ .get_monitor_addr = sxe_monitor_addr_get,
+#endif
+#ifdef ETH_DEV_OPS_HAS_DESC_RELATE
+ .rx_queue_count = sxe_rx_queue_count,
+ .rx_descriptor_status = sxe_rx_descriptor_status,
+ .tx_descriptor_status = sxe_tx_descriptor_status,
+#ifdef ETH_DEV_RX_DESC_DONE
+ .rx_descriptor_done = sxe_rx_descriptor_done,
+#endif
+#endif
+};
+
+static s32 sxe_hw_base_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct sxe_adapter *adapter = eth_dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ s32 ret;
+
+ hw->reg_base_addr = (void *)pci_dev->mem_resource[0].addr;
+ PMD_LOG_INFO(INIT, "eth_dev[%u] got reg_base_addr=%p",
+ eth_dev->data->port_id, hw->reg_base_addr);
+ hw->adapter = adapter;
+
+ strlcpy(adapter->name, pci_dev->device.name, sizeof(adapter->name) -1);
+
+ sxe_hw_hdc_drv_status_set(hw, (u32)true);
+
+ ret = sxe_phy_init(adapter);
+ if (ret == -SXE_ERR_SFF_NOT_SUPPORTED) {
+ PMD_LOG_ERR(INIT, "sfp is not sfp+, not supported, ret=%d\n", ret);
+ ret = -EPERM;
+ goto l_out;
+ } else if (ret) {
+ PMD_LOG_ERR(INIT, "phy init failed, ret=%d\n", ret);
+ }
+
+ ret = sxe_hw_reset(hw);
+ if (ret) {
+ PMD_LOG_ERR(INIT, "hw init failed, ret=%d", ret);
+ goto l_out;
+ } else {
+ sxe_hw_start(hw);
+ }
+
+ ret = sxe_mac_addr_init(eth_dev);
+ if (ret) {
+ PMD_LOG_ERR(INIT, "mac addr init fail, ret=%d", ret);
+ goto l_out;
+ }
+
+ sxe_hw_fc_base_init(hw);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+ sxe_hw_pf_rst_done_set(hw);
+#endif
+
+l_out:
+ if (ret)
+ sxe_hw_hdc_drv_status_set(hw, (u32)false);
+
+ return ret;
+}
+
+void sxe_secondary_proc_init(struct rte_eth_dev *eth_dev,
+ bool rx_batch_alloc_allowed, bool *rx_vec_allowed)
+{
+ __sxe_secondary_proc_init(eth_dev, rx_batch_alloc_allowed, rx_vec_allowed);
+
+}
+
+static void sxe_ethdev_mac_mem_free(struct rte_eth_dev *eth_dev)
+{
+ struct sxe_adapter *adapter = eth_dev->data->dev_private;
+
+ if (eth_dev->data->mac_addrs) {
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ }
+
+ if (eth_dev->data->hash_mac_addrs) {
+ rte_free(eth_dev->data->hash_mac_addrs);
+ eth_dev->data->hash_mac_addrs = NULL;
+ }
+
+ if (adapter->mac_filter_ctxt.uc_addr_table) {
+ rte_free(adapter->mac_filter_ctxt.uc_addr_table);
+ adapter->mac_filter_ctxt.uc_addr_table = NULL;
+ }
+
+}
+
+#ifdef DPDK_19_11_6
+static void sxe_pf_init(struct sxe_adapter *adapter)
+{
+ memset(&adapter->vlan_ctxt, 0, sizeof(adapter->vlan_ctxt));
+ memset(&adapter->mac_filter_ctxt.uta_hash_table, 0,
+ sizeof(adapter->mac_filter_ctxt.uta_hash_table));
+ memset(&adapter->dcb_ctxt.config, 0, sizeof(adapter->dcb_ctxt.config));
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+ memset(&adapter->filter_ctxt, 0, sizeof(adapter->filter_ctxt));
+#endif
+
+}
+#endif
+
+s32 sxe_ethdev_init(struct rte_eth_dev *eth_dev, void *param __rte_unused)
+{
+ s32 ret = 0;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct sxe_adapter *adapter = eth_dev->data->dev_private;
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+ struct sxe_filter_context *filter_info = &adapter->filter_ctxt;
+#endif
+
+ eth_dev->dev_ops = &sxe_eth_dev_ops;
+
+#ifndef ETH_DEV_OPS_HAS_DESC_RELATE
+ eth_dev->rx_queue_count = sxe_rx_queue_count;
+ eth_dev->rx_descriptor_status = sxe_rx_descriptor_status;
+ eth_dev->tx_descriptor_status = sxe_tx_descriptor_status;
+#ifdef ETH_DEV_RX_DESC_DONE
+ eth_dev->rx_descriptor_done = sxe_rx_descriptor_done;
+#endif
+#endif
+
+ eth_dev->rx_pkt_burst = &sxe_pkts_recv;
+ eth_dev->tx_pkt_burst = &sxe_pkts_xmit_with_offload;
+ eth_dev->tx_pkt_prepare = &sxe_prep_pkts;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SIMD
+ sxe_secondary_proc_init(eth_dev, adapter->rx_batch_alloc_allowed,
+ &adapter->rx_vec_allowed);
+#else
+ bool rx_vec_allowed = 0;
+ sxe_secondary_proc_init(eth_dev, adapter->rx_batch_alloc_allowed,
+ &rx_vec_allowed);
+#endif
+ goto l_out;
+ }
+
+ rte_atomic32_clear(&adapter->link_thread_running);
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+#ifdef DPDK_19_11_6
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+ sxe_pf_init(adapter);
+#endif
+ ret = sxe_hw_base_init(eth_dev);
+ if (ret) {
+ PMD_LOG_ERR(INIT, "hw base init fail.(err:%d)", ret);
+ goto l_out;
+ }
+
+ sxe_led_reset(eth_dev);
+
+ sxe_dcb_init(eth_dev);
+
+ /* Reset stats info */
+ sxe_stats_reset(eth_dev);
+
+ sxe_queue_stats_map_reset(eth_dev);
+
+
+#ifdef SET_AUTOFILL_QUEUE_XSTATS
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+#endif
+
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+ sxe_vt_init(eth_dev);
+#endif
+ adapter->mtu = RTE_ETHER_MTU;
+
+ sxe_irq_init(eth_dev);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+ memset(filter_info, 0, sizeof(struct sxe_filter_context));
+ TAILQ_INIT(&filter_info->fivetuple_list);
+ ret = sxe_fnav_filter_init(eth_dev);
+ if (ret) {
+ sxe_ethdev_mac_mem_free(eth_dev);
+ sxe_irq_uninit(eth_dev);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+ sxe_vt_uninit(eth_dev);
+#endif
+ goto l_out;
+ }
+#endif
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_TM
+ sxe_tm_ctxt_init(eth_dev);
+#endif
+
+ PMD_LOG_INFO(INIT, "sxe eth dev init done.");
+
+l_out:
+ return ret;
+}
+
+s32 sxe_ethdev_uninit(struct rte_eth_dev *eth_dev)
+{
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ PMD_LOG_INFO(INIT, "not primary process ,do nothing");
+ goto l_end;
+ }
+
+ sxe_dev_close(eth_dev);
+
+ sxe_ethdev_mac_mem_free(eth_dev);
+
+l_end:
+ return 0;
+}
+
+static s32 sxe_dev_reset(struct rte_eth_dev *eth_dev)
+{
+ s32 ret;
+
+ if (eth_dev->data->sriov.active) {
+ ret = -ENOTSUP;
+ PMD_LOG_ERR(INIT, "sriov activated, not support reset pf port[%u]",
+ eth_dev->data->port_id);
+ goto l_end;
+ }
+
+ ret = sxe_ethdev_uninit(eth_dev);
+ if (ret) {
+ PMD_LOG_ERR(INIT, "port[%u] dev uninit failed",
+ eth_dev->data->port_id);
+ goto l_end;
+ }
+
+ ret = sxe_ethdev_init(eth_dev, NULL);
+ if (ret) {
+ PMD_LOG_ERR(INIT, "port[%u] dev init failed",
+ eth_dev->data->port_id);
+ }
+
+l_end:
+ return ret;
+}
+
+s32 rte_pmd_sxe_tx_loopback_set(u16 port, u8 on)
+{
+ struct rte_eth_dev *dev;
+ struct sxe_adapter *adapter;
+ s32 ret = 0;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_sxe_supported(dev)) {
+ ret = -ENOTSUP;
+ PMD_LOG_ERR(DRV, "port:%u not support tx loopback set.", port);
+ goto l_out;
+ }
+
+ if (on > 1) {
+ ret = -EINVAL;
+ PMD_LOG_ERR(DRV, "port:%u invalid user configure value:%u.",
+ port, on);
+ goto l_out;
+ }
+
+ adapter = dev->data->dev_private;
+
+ sxe_hw_vt_pool_loopback_switch(&adapter->hw, on);
+
+ PMD_LOG_ERR(DRV, "port:%u set tx loopback:%u success.", port, on);
+
+l_out:
+ return ret;
+
+}
+
new file mode 100644
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_ETHDEV_H__
+#define __SXE_ETHDEV_H__
+
+#include "sxe.h"
+
+#define SXE_MMW_SIZE_DEFAULT 0x4
+#define SXE_MMW_SIZE_JUMBO_FRAME 0x14
+#define SXE_MAX_JUMBO_FRAME_SIZE 0x2600
+
+#define SXE_ETH_MAX_LEN (RTE_ETHER_MTU + SXE_ETH_OVERHEAD)
+
+#define SXE_HKEY_MAX_INDEX 10
+#define SXE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
+#define SXE_ETH_DEAD_LOAD (SXE_ETH_OVERHEAD + 2 * SXE_VLAN_TAG_SIZE)
+
+struct sxe_adapter;
+s32 sxe_ethdev_init(struct rte_eth_dev *eth_dev, void *param __rte_unused);
+
+s32 sxe_ethdev_uninit(struct rte_eth_dev *eth_dev);
+
+void sxe_secondary_proc_init(struct rte_eth_dev *eth_dev,
+ bool rx_batch_alloc_allowed, bool *rx_vec_allowed);
+
+#endif
new file mode 100644
@@ -0,0 +1,797 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#include <rte_bus_pci.h>
+#elif defined DPDK_21_11_5
+#include <ethdev_driver.h>
+#include <rte_bus_pci.h>
+#else
+#include <ethdev_driver.h>
+#include <bus_pci_driver.h>
+#endif
+
+#include <rte_malloc.h>
+#include <rte_ethdev.h>
+
+#include "sxe_filter.h"
+#include "sxe_logs.h"
+#include "sxe.h"
+#include "sxe_queue.h"
+#include "drv_msg.h"
+#include "sxe_pmd_hdc.h"
+#include "sxe_cli.h"
+#include "sxe_compat_version.h"
+
+#define PF_POOL_INDEX(p) (p)
+
+#define SXE_STRIP_BITMAP_SET(h, q) \
+ do { \
+ u32 idx = (q) / (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+ u32 bit = (q) % (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+ (h)->strip_bitmap[idx] |= 1 << bit;\
+ } while (0)
+
+#define SXE_STRIP_BITMAP_CLEAR(h, q) \
+ do {\
+ u32 idx = (q) / (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+ u32 bit = (q) % (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+ (h)->strip_bitmap[idx] &= ~(1 << bit);\
+ } while (0)
+
+#define SXE_STRIP_BITMAP_GET(h, q, r) \
+ do {\
+ u32 idx = (q) / (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+ u32 bit = (q) % (sizeof((h)->strip_bitmap[0]) * BYTE_BIT_NUM); \
+ (r) = (h)->strip_bitmap[idx] >> bit & 1;\
+ } while (0)
+
+static s32 sxe_get_mac_addr_from_fw(struct sxe_adapter *adapter,
+ u8 *mac_addr)
+{
+ s32 ret;
+ struct sxe_default_mac_addr_resp mac;
+ struct sxe_hw *hw = &adapter->hw;
+
+ /* Get default mac address from firmware */
+ ret = sxe_driver_cmd_trans(hw, SXE_CMD_R0_MAC_GET, NULL, 0,
+ (void *)&mac, sizeof(mac));
+ if (ret) {
+ LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:mac addr get\n", ret);
+ ret = -EIO;
+ } else {
+ memcpy(mac_addr, mac.addr, SXE_MAC_ADDR_LEN);
+ }
+
+ return ret;
+}
+
+static void sxe_default_mac_addr_get(struct sxe_adapter *adapter)
+{
+ s32 ret;
+ struct rte_ether_addr mac_addr = { {0} };
+
+ ret = sxe_get_mac_addr_from_fw(adapter, mac_addr.addr_bytes);
+ if (ret || !rte_is_valid_assigned_ether_addr(&mac_addr)) {
+ LOG_DEBUG("invalid default mac addr:"MAC_FMT" result:%d\n",
+ MAC_ADDR(mac_addr.addr_bytes), ret);
+ return;
+ }
+
+ LOG_DEBUG("default mac addr = "MAC_FMT"\n", MAC_ADDR(mac_addr.addr_bytes));
+ rte_ether_addr_copy(&mac_addr, &adapter->mac_filter_ctxt.def_mac_addr);
+ rte_ether_addr_copy(&mac_addr, &adapter->mac_filter_ctxt.fc_mac_addr);
+
+}
+
+static u8 sxe_sw_uc_entry_add(struct sxe_adapter *adapter, u8 index,
+ u8 *mac_addr)
+{
+ u8 i;
+ struct sxe_uc_addr_table *uc_table = adapter->mac_filter_ctxt.uc_addr_table;
+
+ for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
+ if (!uc_table[i].used) {
+ uc_table[i].used = true;
+ uc_table[i].rar_idx = i;
+ uc_table[i].original_index = index;
+ uc_table[i].type = SXE_PF;
+ rte_memcpy(uc_table[i].addr, mac_addr, SXE_MAC_ADDR_LEN);
+ break;
+ }
+ }
+
+ return i;
+}
+
+static u8 sxe_sw_uc_entry_del(struct sxe_adapter *adapter, u8 index)
+{
+ u8 i;
+ struct sxe_uc_addr_table *uc_table = adapter->mac_filter_ctxt.uc_addr_table;
+
+ for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
+ if (!uc_table[i].used || (uc_table[i].type != SXE_PF))
+ continue;
+
+ if (uc_table[i].original_index == index) {
+ uc_table[i].used = false;
+ break;
+ }
+ }
+
+ return i;
+}
+
+u8 sxe_sw_uc_entry_vf_add(struct sxe_adapter *adapter,
+ u8 vf_idx, u8 *mac_addr, bool macvlan)
+{
+ u8 i;
+ struct sxe_uc_addr_table *uc_table = adapter->mac_filter_ctxt.uc_addr_table;
+
+ for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
+ if (!uc_table[i].used) {
+ uc_table[i].used = true;
+ uc_table[i].rar_idx = i;
+ uc_table[i].vf_idx = vf_idx;
+ uc_table[i].type = macvlan ? SXE_VF_MACVLAN : SXE_VF;
+ rte_memcpy(uc_table[i].addr, mac_addr, SXE_MAC_ADDR_LEN);
+ break;
+ }
+ }
+
+ return i;
+}
+
+s32 sxe_sw_uc_entry_vf_del(struct sxe_adapter *adapter, u8 vf_idx,
+ bool macvlan)
+{
+ u8 i;
+ struct sxe_uc_addr_table *uc_table = adapter->mac_filter_ctxt.uc_addr_table;
+
+ for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) {
+ if (!uc_table[i].used || (uc_table[i].type == SXE_PF))
+ continue;
+
+ if (uc_table[i].vf_idx == vf_idx) {
+ uc_table[i].used = false;
+ sxe_hw_uc_addr_del(&adapter->hw, i);
+ if (!macvlan)
+ break;
+ }
+ }
+
+ return 0;
+}
+
+s32 sxe_mac_addr_init(struct rte_eth_dev *eth_dev)
+{
+ struct sxe_adapter *adapter = eth_dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ s32 ret = 0;
+ u8 rar_idx;
+
+ eth_dev->data->mac_addrs = rte_zmalloc("sxe",
+ RTE_ETHER_ADDR_LEN * SXE_UC_ENTRY_NUM_MAX, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ LOG_ERROR("mac addr allocate %u B fail.",
+ RTE_ETHER_ADDR_LEN * SXE_UC_ENTRY_NUM_MAX);
+ ret = -ENOMEM;
+ goto l_out;
+ }
+
+ eth_dev->data->hash_mac_addrs = rte_zmalloc("sxe",
+ RTE_ETHER_ADDR_LEN * SXE_UTA_ENTRY_NUM_MAX, 0);
+ if (eth_dev->data->hash_mac_addrs == NULL) {
+ LOG_ERROR("uta table allocate %u B fail.",
+ RTE_ETHER_ADDR_LEN * SXE_UTA_ENTRY_NUM_MAX);
+ ret = -ENOMEM;
+ goto l_free_mac_addr;
+ }
+
+ adapter->mac_filter_ctxt.uc_addr_table = rte_zmalloc("sxe",
+ sizeof(struct sxe_uc_addr_table) * SXE_UC_ENTRY_NUM_MAX, 0);
+ if (adapter->mac_filter_ctxt.uc_addr_table == NULL) {
+ LOG_ERROR("uc table allocate %lu B fail.",
+ sizeof(struct sxe_uc_addr_table) * SXE_UC_ENTRY_NUM_MAX);
+ ret = -ENOMEM;
+ goto l_free_hash_mac;
+ }
+
+ sxe_default_mac_addr_get(adapter);
+
+ rte_ether_addr_copy(&adapter->mac_filter_ctxt.def_mac_addr,
+ eth_dev->data->mac_addrs);
+
+ rte_ether_addr_copy(&adapter->mac_filter_ctxt.def_mac_addr,
+ &adapter->mac_filter_ctxt.cur_mac_addr);
+
+ rar_idx = sxe_sw_uc_entry_add(adapter, 0, adapter->mac_filter_ctxt.def_mac_addr.addr_bytes);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+ sxe_hw_uc_addr_add(hw, rar_idx,
+ adapter->mac_filter_ctxt.def_mac_addr.addr_bytes,
+ sxe_vf_num_get(eth_dev));
+#else
+ sxe_hw_uc_addr_add(hw, rar_idx,
+ adapter->mac_filter_ctxt.def_mac_addr.addr_bytes,
+ 0);
+#endif
+
+l_out:
+ return ret;
+
+l_free_hash_mac:
+ rte_free(eth_dev->data->hash_mac_addrs);
+ eth_dev->data->hash_mac_addrs = NULL;
+
+l_free_mac_addr:
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ goto l_out;
+}
+
+s32 sxe_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ u32 flt_ctrl;
+
+ flt_ctrl = sxe_hw_rx_mode_get(hw);
+ PMD_LOG_DEBUG(DRV, "read flt_ctrl=0x%x\n", flt_ctrl);
+
+ flt_ctrl |= (SXE_FCTRL_UPE | SXE_FCTRL_MPE);
+
+ PMD_LOG_DEBUG(DRV, "write flt_ctrl=0x%x\n", flt_ctrl);
+ sxe_hw_rx_mode_set(hw, flt_ctrl);
+
+ return 0;
+}
+
+s32 sxe_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ u32 flt_ctrl;
+
+ flt_ctrl = sxe_hw_rx_mode_get(hw);
+ PMD_LOG_DEBUG(DRV, "read flt_ctrl=0x%x\n", flt_ctrl);
+
+ flt_ctrl &= (~SXE_FCTRL_UPE);
+ if (dev->data->all_multicast == 1)
+ flt_ctrl |= SXE_FCTRL_MPE;
+ else
+ flt_ctrl &= (~SXE_FCTRL_MPE);
+
+ PMD_LOG_DEBUG(DRV, "write flt_ctrl=0x%x\n", flt_ctrl);
+ sxe_hw_rx_mode_set(hw, flt_ctrl);
+
+ return 0;
+}
+
+s32 sxe_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ u32 flt_ctrl;
+
+ flt_ctrl = sxe_hw_rx_mode_get(hw);
+ PMD_LOG_DEBUG(DRV, "read flt_ctrl=0x%x\n", flt_ctrl);
+
+ flt_ctrl |= SXE_FCTRL_MPE;
+
+ PMD_LOG_DEBUG(DRV, "write flt_ctrl=0x%x\n", flt_ctrl);
+ sxe_hw_rx_mode_set(hw, flt_ctrl);
+
+ return 0;
+}
+
+s32 sxe_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ u32 flt_ctrl;
+
+ if (dev->data->promiscuous == 1) {
+ PMD_LOG_DEBUG(DRV, "promiscuous is enable, allmulticast must be enabled.\n");
+ goto l_out;
+ }
+
+ flt_ctrl = sxe_hw_rx_mode_get(hw);
+ PMD_LOG_DEBUG(DRV, "read flt_ctrl=0x%x\n", flt_ctrl);
+
+ flt_ctrl &= (~SXE_FCTRL_MPE);
+
+ PMD_LOG_DEBUG(DRV, "write flt_ctrl=0x%x\n", flt_ctrl);
+ sxe_hw_rx_mode_set(hw, flt_ctrl);
+
+l_out:
+ return 0;
+}
+
+s32 sxe_mac_addr_add(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr,
+ u32 index, u32 pool)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ s32 ret;
+ u8 rar_idx = sxe_sw_uc_entry_add(adapter, index, mac_addr->addr_bytes);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+ ret = sxe_hw_uc_addr_add(hw, rar_idx,
+ mac_addr->addr_bytes, pool);
+#else
+ ret = sxe_hw_uc_addr_add(hw, rar_idx,
+ mac_addr->addr_bytes, sxe_vf_num_get(dev));
+#endif
+ if (ret) {
+ LOG_ERROR("rar_idx:%u pool:%u mac_addr:"MAC_FMT
+ "add fail.(err:%d)",
+ rar_idx, pool,
+ MAC_ADDR(mac_addr->addr_bytes), ret);
+ goto l_out;
+ }
+
+ PMD_LOG_INFO(DRV, "rar_idx:%u pool:%u mac_addr:"MAC_FMT" add done",
+ rar_idx, pool,
+ MAC_ADDR(mac_addr->addr_bytes));
+
+l_out:
+ return ret;
+}
+
+void sxe_mac_addr_remove(struct rte_eth_dev *dev, u32 index)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ s32 ret;
+ u8 rar_idx = sxe_sw_uc_entry_del(adapter, index);
+
+ ret = sxe_hw_uc_addr_del(hw, rar_idx);
+ if (ret) {
+ PMD_LOG_ERR(DRV, "rar_idx:%u remove fail.(err:%d)",
+ rar_idx, ret);
+ return;
+ }
+
+ PMD_LOG_INFO(DRV, "rar_idx:%u mac_addr:"MAC_FMT" remove done",
+ rar_idx,
+ MAC_ADDR(&dev->data->mac_addrs[rar_idx]));
+
+}
+
+void sxe_fc_mac_addr_set(struct sxe_adapter *adapter)
+{
+ struct sxe_hw *hw = &adapter->hw;
+
+ sxe_hw_fc_mac_addr_set(hw,
+ adapter->mac_filter_ctxt.fc_mac_addr.addr_bytes);
+
+}
+
+s32 sxe_mac_addr_set(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr)
+{
+ u8 pool_idx;
+ struct sxe_adapter *adapter = dev->data->dev_private;
+
+ sxe_mac_addr_remove(dev, 0);
+
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ pool_idx = pci_dev->max_vfs;
+#else
+ pool_idx = 0;
+#endif
+
+ sxe_mac_addr_add(dev, mac_addr, 0, pool_idx);
+ rte_ether_addr_copy(mac_addr, &adapter->mac_filter_ctxt.fc_mac_addr);
+
+ sxe_fc_mac_addr_set(adapter);
+
+ PMD_LOG_INFO(DRV, "pool:%u mac_addr:"MAC_FMT" set to be cur mac addr done",
+ pool_idx,
+ MAC_ADDR(mac_addr));
+
+ return 0;
+}
+
+static void sxe_hash_mac_addr_parse(u8 *mac_addr, u16 *reg_idx,
+ u16 *bit_idx)
+{
+ u16 extracted;
+
+ extracted = ((mac_addr[4] >> 4) |
+ (((u16)mac_addr[5]) << 4));
+
+ extracted &= SXE_MAC_ADDR_EXTRACT_MASK;
+
+ *reg_idx = (extracted >> SXE_MAC_ADDR_SHIFT) & SXE_MAC_ADDR_REG_MASK;
+
+ *bit_idx = extracted & SXE_MAC_ADDR_BIT_MASK;
+
+ PMD_LOG_DEBUG(DRV, "mac_addr:"MAC_FMT" hash reg_idx:%u bit_idx:%u",
+ MAC_ADDR(mac_addr), *reg_idx, *bit_idx);
+
+}
+
+s32 sxe_uc_hash_table_set(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr, u8 on)
+{
+ u16 bit_idx;
+ u16 reg_idx;
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ struct sxe_mac_filter_context *mac_filter = &adapter->mac_filter_ctxt;
+ u32 value;
+ s32 ret = 0;
+
+ sxe_hash_mac_addr_parse(mac_addr->addr_bytes, ®_idx, &bit_idx);
+
+ value = (mac_filter->uta_hash_table[reg_idx] >> bit_idx) & 0x1;
+ if (value == on)
+ goto l_out;
+
+ value = sxe_hw_uta_hash_table_get(hw, reg_idx);
+ if (on) {
+ mac_filter->uta_used_count++;
+ value |= (0x1 << bit_idx);
+ mac_filter->uta_hash_table[reg_idx] |= (0x1 << bit_idx);
+ } else {
+ mac_filter->uta_used_count--;
+ value &= ~(0x1 << bit_idx);
+ mac_filter->uta_hash_table[reg_idx] &= ~(0x1 << bit_idx);
+ }
+
+ sxe_hw_uta_hash_table_set(hw, reg_idx, value);
+
+ PMD_LOG_INFO(DRV, "mac_addr:"MAC_FMT" uta reg_idx:%u bit_idx:%u"
+ " %s done, uta_used_count:%u",
+ MAC_ADDR(mac_addr->addr_bytes),
+ reg_idx, bit_idx,
+ on ? "set" : "clear",
+ mac_filter->uta_used_count);
+
+l_out:
+ return ret;
+}
+
+s32 sxe_uc_all_hash_table_set(struct rte_eth_dev *dev, u8 on)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ struct sxe_mac_filter_context *mac_filter = &adapter->mac_filter_ctxt;
+ u32 value;
+ u8 i;
+
+ value = on ? (~0) : 0;
+
+ for (i = 0; i < SXE_UTA_ENTRY_NUM_MAX; i++) {
+ mac_filter->uta_hash_table[i] = value;
+ sxe_hw_uta_hash_table_set(hw, i, value);
+ }
+
+ PMD_LOG_INFO(DRV, "uta table all entry %s done.",
+ on ? "set" : "clear");
+
+ return 0;
+}
+
+s32 sxe_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mc_addr_list,
+ u32 nb_mc_addr)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ struct sxe_mac_filter_context *mac_filter = &adapter->mac_filter_ctxt;
+ u32 i;
+ u16 bit_idx;
+ u16 reg_idx;
+
+ memset(&mac_filter->mta_hash_table, 0, sizeof(mac_filter->mta_hash_table));
+ for (i = 0; i < nb_mc_addr; i++) {
+ sxe_hash_mac_addr_parse(mc_addr_list->addr_bytes, ®_idx, &bit_idx);
+ mc_addr_list++;
+ mac_filter->mta_hash_table[reg_idx] |= (0x1 << bit_idx);
+ }
+
+ for (i = 0; i < SXE_MTA_ENTRY_NUM_MAX; i++)
+ sxe_hw_mta_hash_table_set(hw, i, mac_filter->mta_hash_table[i]);
+
+ if (nb_mc_addr)
+ sxe_hw_mc_filter_enable(hw);
+
+ PMD_LOG_INFO(DRV, "mc addr list cnt:%u set to mta done.", nb_mc_addr);
+
+ return 0;
+}
+
+s32 sxe_vlan_filter_set(struct rte_eth_dev *eth_dev, u16 vlan_id, s32 on)
+{
+ struct sxe_adapter *adapter = eth_dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ struct sxe_vlan_context *vlan_ctxt = &adapter->vlan_ctxt;
+ u8 reg_idx;
+ u8 bit_idx;
+ u32 value;
+
+ reg_idx = (vlan_id >> SXE_VLAN_ID_SHIFT) & SXE_VLAN_ID_REG_MASK;
+ bit_idx = (vlan_id & SXE_VLAN_ID_BIT_MASK);
+
+ value = sxe_hw_vlan_filter_array_read(hw, reg_idx);
+ if (on)
+ value |= (1 << bit_idx);
+ else
+ value &= ~(1 << bit_idx);
+
+ sxe_hw_vlan_filter_array_write(hw, reg_idx, value);
+
+ vlan_ctxt->vlan_hash_table[reg_idx] = value;
+
+ PMD_LOG_INFO(DRV, "vlan_id:0x%x on:%d set done", vlan_id, on);
+
+ return 0;
+}
+
+static void sxe_vlan_tpid_write(struct sxe_hw *hw, u16 tpid)
+{
+ u32 value;
+
+ value = sxe_hw_vlan_type_get(hw);
+ value = (value & (~SXE_VLNCTRL_VET)) | tpid;
+ sxe_hw_vlan_type_set(hw, value);
+
+ value = sxe_hw_txctl_vlan_type_get(hw);
+ value = (value & (~SXE_DMATXCTL_VT_MASK)) |
+ (tpid << SXE_DMATXCTL_VT_SHIFT);
+ sxe_hw_txctl_vlan_type_set(hw, value);
+
+}
+
+s32 sxe_vlan_tpid_set(struct rte_eth_dev *eth_dev,
+ enum rte_vlan_type vlan_type, u16 tpid)
+{
+ struct sxe_adapter *adapter = eth_dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ s32 ret = 0;
+ u32 txctl;
+ bool double_vlan;
+
+ txctl = sxe_hw_txctl_vlan_type_get(hw);
+ double_vlan = txctl & SXE_DMATXCTL_GDV;
+
+ switch (vlan_type) {
+ case RTE_ETH_VLAN_TYPE_INNER:
+ if (double_vlan) {
+ sxe_vlan_tpid_write(hw, tpid);
+ } else {
+ ret = -ENOTSUP;
+ PMD_LOG_ERR(DRV, "unsupport inner vlan without "
+ "global double vlan.");
+ }
+ break;
+ case RTE_ETH_VLAN_TYPE_OUTER:
+ if (double_vlan) {
+ sxe_hw_vlan_ext_type_set(hw,
+ (tpid << SXE_EXVET_VET_EXT_SHIFT));
+ } else {
+ sxe_vlan_tpid_write(hw, tpid);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ PMD_LOG_ERR(DRV, "Unsupported VLAN type %d", vlan_type);
+ break;
+ }
+
+ PMD_LOG_INFO(DRV, "double_vlan:%d vlan_type:%d tpid:0x%x set done ret:%d",
+ double_vlan, vlan_type, tpid, ret);
+ return ret;
+}
+
+static void sxe_vlan_strip_bitmap_set(struct rte_eth_dev *dev, u16 queue_idx, bool on)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_vlan_context *vlan_ctxt = &adapter->vlan_ctxt;
+
+ sxe_rx_queue_s *rxq;
+
+ if ((queue_idx >= SXE_HW_TXRX_RING_NUM_MAX) ||
+ (queue_idx >= dev->data->nb_rx_queues)) {
+ PMD_LOG_ERR(DRV, "invalid queue idx:%u exceed max"
+ " queue number:%u or nb_rx_queues:%u.",
+ queue_idx, SXE_HW_TXRX_RING_NUM_MAX,
+ dev->data->nb_rx_queues);
+ return;
+ }
+
+ if (on)
+ SXE_STRIP_BITMAP_SET(vlan_ctxt, queue_idx);
+ else
+ SXE_STRIP_BITMAP_CLEAR(vlan_ctxt, queue_idx);
+
+ rxq = dev->data->rx_queues[queue_idx];
+
+ if (on) {
+ rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
+ rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ } else {
+ rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
+ rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ }
+
+ PMD_LOG_INFO(DRV, "queue idx:%u vlan strip on:%d set bitmap and offload done.",
+ queue_idx, on);
+
+}
+
+void sxe_vlan_strip_switch_set(struct rte_eth_dev *dev)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ u16 i;
+ sxe_rx_queue_s *rxq;
+ bool on;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+ on = true;
+ else
+ on = false;
+ sxe_hw_vlan_tag_strip_switch(hw, i, on);
+
+ sxe_vlan_strip_bitmap_set(dev, i, on);
+ }
+
+}
+
+static void sxe_vlan_filter_disable(struct rte_eth_dev *dev)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ sxe_hw_vlan_filter_switch(hw, 0);
+
+}
+
+static void sxe_vlan_filter_enable(struct rte_eth_dev *dev)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ struct sxe_vlan_context *vlan_ctxt = &adapter->vlan_ctxt;
+ u32 vlan_ctl;
+ u16 i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ vlan_ctl = sxe_hw_vlan_type_get(hw);
+ vlan_ctl &= ~SXE_VLNCTRL_CFI;
+ vlan_ctl |= SXE_VLNCTRL_VFE;
+ sxe_hw_vlan_type_set(hw, vlan_ctl);
+
+ for (i = 0; i < SXE_VFT_TBL_SIZE; i++)
+ sxe_hw_vlan_filter_array_write(hw, i, vlan_ctxt->vlan_hash_table[i]);
+
+}
+
+static void sxe_vlan_extend_disable(struct rte_eth_dev *dev)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ u32 ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ctrl = sxe_hw_txctl_vlan_type_get(hw);
+ ctrl &= ~SXE_DMATXCTL_GDV;
+ sxe_hw_txctl_vlan_type_set(hw, ctrl);
+
+ ctrl = sxe_hw_ext_vlan_get(hw);
+ ctrl &= ~SXE_EXTENDED_VLAN;
+ sxe_hw_ext_vlan_set(hw, ctrl);
+
+}
+
+static void sxe_vlan_extend_enable(struct rte_eth_dev *dev)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ u32 ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ctrl = sxe_hw_txctl_vlan_type_get(hw);
+ ctrl |= SXE_DMATXCTL_GDV;
+ sxe_hw_txctl_vlan_type_set(hw, ctrl);
+
+ ctrl = sxe_hw_ext_vlan_get(hw);
+ ctrl |= SXE_EXTENDED_VLAN;
+ sxe_hw_ext_vlan_set(hw, ctrl);
+
+}
+
+static s32 sxe_vlan_offload_configure(struct rte_eth_dev *dev, s32 mask)
+{
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+
+ if (mask & RTE_ETH_VLAN_STRIP_MASK)
+ sxe_vlan_strip_switch_set(dev);
+
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+ sxe_vlan_filter_enable(dev);
+ else
+ sxe_vlan_filter_disable(dev);
+ }
+
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
+ sxe_vlan_extend_enable(dev);
+ else
+ sxe_vlan_extend_disable(dev);
+ }
+
+ PMD_LOG_INFO(DRV, "mask:0x%x rx mode offload:0x%"SXE_PRIX64
+ " vlan offload set done", mask, rxmode->offloads);
+
+ return 0;
+}
+
+s32 sxe_vlan_offload_set(struct rte_eth_dev *dev, s32 vlan_mask)
+{
+ s32 mask;
+ s32 ret = 0;
+
+ if (vlan_mask & RTE_ETH_VLAN_STRIP_MASK) {
+ PMD_LOG_WARN(DRV, "please set vlan strip before device start, not at this stage.");
+ ret = -1;
+ goto l_out;
+ }
+ mask = vlan_mask & ~RTE_ETH_VLAN_STRIP_MASK;
+
+ sxe_vlan_offload_configure(dev, mask);
+
+ PMD_LOG_INFO(DRV, "vlan offload mask:0x%x set done.", vlan_mask);
+
+l_out:
+ return ret;
+}
+
+void sxe_vlan_strip_queue_set(struct rte_eth_dev *dev, u16 queue, s32 on)
+{
+ UNUSED(dev);
+ UNUSED(queue);
+ UNUSED(on);
+ PMD_LOG_WARN(DRV, "please set vlan strip before device start, not at this stage.");
+
+}
+
+void sxe_vlan_filter_configure(struct rte_eth_dev *dev)
+{
+ struct sxe_adapter *adapter = dev->data->dev_private;
+ struct sxe_hw *hw = &adapter->hw;
+ u32 vlan_mask;
+ u32 vlan_ctl;
+
+ vlan_mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+ RTE_ETH_VLAN_EXTEND_MASK;
+ sxe_vlan_offload_configure(dev, vlan_mask);
+
+ if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
+ vlan_ctl = sxe_hw_vlan_type_get(hw);
+ vlan_ctl |= SXE_VLNCTRL_VFE;
+ sxe_hw_vlan_type_set(hw, vlan_ctl);
+ LOG_DEBUG_BDF("vmdq mode enable vlan filter done.");
+ }
+
+}
+
new file mode 100644
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+
+#ifndef __SXE_FILTER_H__
+#define __SXE_FILTER_H__
+
+#include <rte_ether.h>
+#include "sxe_dpdk_version.h"
+#if defined DPDK_20_11_5 || defined DPDK_19_11_6
+#include <rte_ethdev_driver.h>
+#else
+#include <ethdev_driver.h>
+#endif
+
+#include "sxe_types.h"
+#include "sxe_hw.h"
+
+struct sxe_adapter;
+
+#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC_ADDR(x) ((u8 *)(x))[0], ((u8 *)(x))[1], \
+ ((u8 *)(x))[2], ((u8 *)(x))[3], \
+ ((u8 *)(x))[4], ((u8 *)(x))[5]
+
+#define BYTE_BIT_NUM 8
+
+#define SXE_VLAN_STRIP_BITMAP_SIZE \
+ RTE_ALIGN((SXE_HW_TXRX_RING_NUM_MAX / (sizeof(u32) * BYTE_BIT_NUM)), \
+ sizeof(u32))
+
+struct sxe_vlan_context {
+ u32 vlan_hash_table[SXE_VFT_TBL_SIZE];
+ u32 strip_bitmap[SXE_VLAN_STRIP_BITMAP_SIZE];
+ u32 vlan_table_size;
+};
+
+enum sxe_uc_addr_src_type {
+ SXE_PF = 0,
+ SXE_VF,
+ SXE_VF_MACVLAN
+};
+
+struct sxe_uc_addr_table {
+ u8 rar_idx;
+ u8 vf_idx;
+ u8 type;
+ u8 original_index;
+ bool used;
+ u8 addr[SXE_MAC_ADDR_LEN];
+};
+
+struct sxe_mac_filter_context {
+ struct rte_ether_addr def_mac_addr;
+ struct rte_ether_addr cur_mac_addr;
+
+ struct rte_ether_addr fc_mac_addr;
+
+ u32 uta_used_count;
+ u32 uta_hash_table[SXE_UTA_ENTRY_NUM_MAX];
+
+ u32 mta_hash_table[SXE_MTA_ENTRY_NUM_MAX];
+ struct sxe_uc_addr_table *uc_addr_table;
+};
+
+s32 sxe_mac_addr_init(struct rte_eth_dev *eth_dev);
+
+s32 sxe_promiscuous_enable(struct rte_eth_dev *dev);
+
+s32 sxe_promiscuous_disable(struct rte_eth_dev *dev);
+
+s32 sxe_allmulticast_enable(struct rte_eth_dev *dev);
+
+s32 sxe_allmulticast_disable(struct rte_eth_dev *dev);
+
+s32 sxe_mac_addr_add(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr,
+ u32 rar_idx, u32 pool);
+
+void sxe_mac_addr_remove(struct rte_eth_dev *dev, u32 rar_idx);
+
+s32 sxe_mac_addr_set(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr);
+
+s32 sxe_uc_hash_table_set(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr, u8 on);
+
+s32 sxe_uc_all_hash_table_set(struct rte_eth_dev *dev, u8 on);
+
+s32 sxe_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mc_addr_list,
+ u32 nb_mc_addr);
+
+s32 sxe_vlan_filter_set(struct rte_eth_dev *eth_dev, u16 vlan_id, s32 on);
+
+s32 sxe_vlan_tpid_set(struct rte_eth_dev *eth_dev,
+ enum rte_vlan_type vlan_type, u16 tpid);
+
+s32 sxe_vlan_offload_set(struct rte_eth_dev *dev, s32 vlan_mask);
+
+void sxe_vlan_strip_queue_set(struct rte_eth_dev *dev, u16 queue, s32 on);
+
+void sxe_vlan_filter_configure(struct rte_eth_dev *dev);
+
+s32 sxe_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mc_addr_list,
+ u32 nb_mc_addr);
+
+void sxe_vlan_strip_switch_set(struct rte_eth_dev *dev);
+
+void sxe_fc_mac_addr_set(struct sxe_adapter *adapter);
+
+u8 sxe_sw_uc_entry_vf_add(struct sxe_adapter *adapter,
+ u8 vf_idx, u8 *mac_addr, bool macvlan);
+
+s32 sxe_sw_uc_entry_vf_del(struct sxe_adapter *adapter, u8 vf_idx,
+ bool macvlan);
+
+#endif
new file mode 100644
@@ -0,0 +1,2951 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C), 2022, Linkdata Technology Co., Ltd.
+ */
+#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL
+#include <rte_malloc.h>
+#include <rte_flow_driver.h>
+#include <rte_hash.h>
+#include <rte_hash_crc.h>
+
+#include "sxe.h"
+#include "sxe_logs.h"
+#include "sxe_hw.h"
+#include "sxe_fnav.h"
+#include "sxe_filter_ctrl.h"
+#include "sxe_offload.h"
+
+#define SXE_MIN_FIVETUPLE_PRIORITY 1
+#define SXE_MAX_FIVETUPLE_PRIORITY 7
+
+struct sxe_ntuple_filter_ele {
+ TAILQ_ENTRY(sxe_ntuple_filter_ele) entries;
+ struct rte_eth_ntuple_filter filter_info;
+};
+
+struct sxe_ethertype_filter_ele {
+ TAILQ_ENTRY(sxe_ethertype_filter_ele) entries;
+ struct rte_eth_ethertype_filter filter_info;
+};
+
+struct sxe_eth_syn_filter_ele {
+ TAILQ_ENTRY(sxe_eth_syn_filter_ele) entries;
+ struct rte_eth_syn_filter filter_info;
+};
+
+struct sxe_fnav_rule_ele {
+ TAILQ_ENTRY(sxe_fnav_rule_ele) entries;
+ struct sxe_fnav_rule filter_info;
+};
+
+struct sxe_rss_filter_ele {
+ TAILQ_ENTRY(sxe_rss_filter_ele) entries;
+ struct sxe_rss_filter filter_info;
+};
+
+struct sxe_fivetuple_filter {
+ TAILQ_ENTRY(sxe_fivetuple_filter) entries;
+ u16 index;
+ struct sxe_fivetuple_filter_info filter_info;
+ u16 queue;
+};
+
+static inline
+bool sxe_is_user_param_null(const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ bool ret = true;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ PMD_LOG_ERR(DRV, "pattern is null, validate failed.");
+ goto l_out;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ PMD_LOG_ERR(DRV, "action is null, validate failed.");
+ goto l_out;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ PMD_LOG_ERR(DRV, "attribute is null, validate failed.");
+ goto l_out;
+ }
+
+ ret = false;
+
+l_out:
+ return ret;
+}
+
+static inline
+bool sxe_is_attribute_wrong(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ bool ret = true;
+
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ PMD_LOG_ERR(DRV, "only sopport ingrass, validate failed.");
+ goto l_out;
+ }
+
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ PMD_LOG_ERR(DRV, "not support egress, validate failed.");
+ goto l_out;
+ }
+
+ if (attr->transfer) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "Not support transfer.");
+ PMD_LOG_ERR(DRV, "not support transfer, validate failed.");
+ goto l_out;
+ }
+
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ PMD_LOG_ERR(DRV, "not support group, validate failed.");
+ goto l_out;
+ }
+
+ ret = false;
+
+l_out:
+ return ret;
+}
+
+static inline
+bool sxe_is_port_mask_wrong(u16 src_port_mask, u16 dst_port_mask,
+ const struct rte_flow_item *item,
+ struct rte_flow_error *error)
+{
+ bool ret = true;
+
+ if ((src_port_mask != 0 && src_port_mask != UINT16_MAX) ||
+ (dst_port_mask != 0 && dst_port_mask != UINT16_MAX)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ PMD_LOG_WARN(DRV, "mask--src_port[0x%x], dst_port[0x%x], validate failed.",
+ src_port_mask, dst_port_mask);
+ goto l_out;
+ }
+
+ ret = false;
+
+l_out:
+ return ret;
+}
+
+static inline
+const struct rte_flow_item *sxe_next_no_void_pattern(
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_item *current)
+{
+ const struct rte_flow_item *next =
+ current ? current + 1 : &pattern[0];
+ while (1) {
+ if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
+ return next;
+ next++;
+ }
+}
+
+static inline
+const struct rte_flow_action *sxe_next_no_void_action(
+ const struct rte_flow_action actions[],
+ const struct rte_flow_action *current)
+{
+ const struct rte_flow_action *next =
+ current ? current + 1 : &actions[0];
+ while (1) {
+ if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
+ return next;
+ next++;
+ }
+}
+
+static inline
+const struct rte_flow_item *sxe_next_no_fuzzy_pattern(
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_item *current)
+{
+ const struct rte_flow_item *next =
+ sxe_next_no_void_pattern(pattern, current);
+ while (1) {
+ if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
+ return next;
+ next = sxe_next_no_void_pattern(pattern, next);
+ }
+}
+
+static u8
+sxe_flow_l4type_convert(enum rte_flow_item_type protocol)
+{
+ U8 proto = 0;
+
+ switch (protocol) {
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ proto = IPPROTO_TCP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ proto = IPPROTO_UDP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ proto = IPPROTO_SCTP;
+ break;
+ default:
+ PMD_LOG_WARN(DRV, "flow l4type convert failed.");
+ }
+
+ return proto;
+}
+
+static s32 sxe_filter_action_parse(struct rte_eth_dev *dev,
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ u16 *queue_index)
+{
+ const struct rte_flow_action *act;
+
+ act = sxe_next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ PMD_LOG_WARN(DRV, "action check wrong, validate failed.");
+ goto l_out;
+ }
+
+ *queue_index =
+ ((const struct rte_flow_action_queue *)act->conf)->index;
+
+ if (*queue_index >= dev->data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ act, "queue index much too big");
+ PMD_LOG_ERR(DRV, "queue index check wrong, validate failed.");
+ goto l_out;
+ }
+
+ act = sxe_next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ PMD_LOG_WARN(DRV, "action check wrong, validate failed.");
+ goto l_out;
+ }
+
+ PMD_LOG_DEBUG(DRV, "filter action parse success.");
+ rte_errno = 0;
+
+l_out:
+ return -rte_errno;
+}
+
+static s32 sxe_fivetuple_filter_pattern_parse(const struct rte_flow_item pattern[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec;
+ const struct rte_flow_item_udp *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec;
+ const struct rte_flow_item_sctp *sctp_mask;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ struct rte_flow_item_eth eth_null;
+ struct rte_flow_item_vlan vlan_null;
+
+ memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
+ memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
+
+ item = sxe_next_no_void_pattern(pattern, NULL);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+ goto l_out;
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ PMD_LOG_WARN(DRV, "not support last set, validate failed.");
+ goto l_out;
+ }
+
+ if ((item->spec || item->mask) &&
+ ((item->spec && memcmp(eth_spec, ð_null,
+ sizeof(struct rte_flow_item_eth))) ||
+ (item->mask && memcmp(eth_mask, ð_null,
+ sizeof(struct rte_flow_item_eth))))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ PMD_LOG_WARN(DRV, "item spec[%p], item mask[%p], validate failed.",
+ item->spec, item->mask);
+ goto l_out;
+ }
+
+ item = sxe_next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+ goto l_out;
+ }
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
+
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ PMD_LOG_WARN(DRV, "not support last set, validate failed.");
+ goto l_out;
+ }
+
+ if ((item->spec || item->mask) &&
+ ((item->spec && memcmp(vlan_spec, &vlan_null,
+ sizeof(struct rte_flow_item_vlan))) ||
+ (item->mask && memcmp(vlan_mask, &vlan_null,
+ sizeof(struct rte_flow_item_vlan))))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ PMD_LOG_WARN(DRV, "item spec[%p], item mask[%p], validate failed.",
+ item->spec, item->mask);
+ goto l_out;
+ }
+
+ item = sxe_next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+ goto l_out;
+ }
+ }
+
+ if (item->mask) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ PMD_LOG_WARN(DRV, "not support last set, validate failed.");
+ goto l_out;
+ }
+
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple spec");
+ PMD_LOG_WARN(DRV, "item spec is null, validate failed.");
+ goto l_out;
+ }
+
+ ipv4_mask = item->mask;
+ if (SXE_5TUPLE_IPV4_MASK) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ PMD_LOG_WARN(DRV, "not support other mask set, validate failed.");
+ goto l_out;
+ }
+ if ((ipv4_mask->hdr.src_addr != 0 &&
+ ipv4_mask->hdr.src_addr != UINT32_MAX) ||
+ (ipv4_mask->hdr.dst_addr != 0 &&
+ ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
+ (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
+ ipv4_mask->hdr.next_proto_id != 0)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ PMD_LOG_WARN(DRV, "mask--src_addr[0x%x], dst_addr[0x%x], next_proto_id[0x%x], validate failed.",
+ ipv4_mask->hdr.src_addr, ipv4_mask->hdr.dst_addr,
+ ipv4_mask->hdr.next_proto_id);
+ goto l_out;
+ }
+
+ filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+ filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+ filter->proto_mask = ipv4_mask->hdr.next_proto_id;
+
+ ipv4_spec = item->spec;
+ filter->dst_ip = ipv4_spec->hdr.dst_addr;
+ filter->src_ip = ipv4_spec->hdr.src_addr;
+ filter->proto = ipv4_spec->hdr.next_proto_id;
+ }
+
+ item = sxe_next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+ goto l_out;
+ }
+
+ if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
+ (!item->spec && !item->mask)) {
+ if (!filter->proto_mask) {
+ filter->proto_mask = UINT8_MAX;
+ filter->proto = sxe_flow_l4type_convert(item->type);
+ }
+ PMD_LOG_DEBUG(DRV, "TCP/UDP/SCTP item spec and mask is null, to check action.");
+ rte_errno = 0;
+ goto l_out;
+ }
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_END &&
+ (!item->spec || !item->mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ PMD_LOG_WARN(DRV, "spec or mask is null, validate failed.");
+ goto l_out;
+ }
+
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ PMD_LOG_WARN(DRV, "not support last set, validate failed.");
+ goto l_out;
+
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ if (filter->proto != IPPROTO_TCP &&
+ filter->proto_mask != 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ PMD_LOG_WARN(DRV, "protocal id is not TCP, please check.");
+ goto l_out;
+ }
+
+ tcp_mask = item->mask;
+ if (SXE_5TUPLE_TCP_MASK) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ PMD_LOG_WARN(DRV, "not support other mask set, validate failed.");
+ goto l_out;
+ }
+
+ if (sxe_is_port_mask_wrong(tcp_mask->hdr.src_port,
+ tcp_mask->hdr.dst_port, item, error)) {
+ PMD_LOG_WARN(DRV, "port mask set wrong, validate failed.");
+ goto l_out;
+ }
+
+ filter->dst_port_mask = tcp_mask->hdr.dst_port;
+ filter->src_port_mask = tcp_mask->hdr.src_port;
+
+ tcp_spec = item->spec;
+ filter->dst_port = tcp_spec->hdr.dst_port;
+ filter->src_port = tcp_spec->hdr.src_port;
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ if (filter->proto != IPPROTO_UDP &&
+ filter->proto_mask != 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ PMD_LOG_WARN(DRV, "protocal id is not UDP, please check.");
+ goto l_out;
+ }
+
+ udp_mask = item->mask;
+ if (SXE_5TUPLE_UDP_MASK) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ PMD_LOG_WARN(DRV, "not support other mask set, validate failed.");
+ goto l_out;
+ }
+
+ if (sxe_is_port_mask_wrong(udp_mask->hdr.src_port,
+ udp_mask->hdr.dst_port, item, error)) {
+ PMD_LOG_WARN(DRV, "port mask set wrong, validate failed.");
+ goto l_out;
+ }
+
+ filter->dst_port_mask = udp_mask->hdr.dst_port;
+ filter->src_port_mask = udp_mask->hdr.src_port;
+
+ udp_spec = item->spec;
+ filter->dst_port = udp_spec->hdr.dst_port;
+ filter->src_port = udp_spec->hdr.src_port;
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
+ if (filter->proto != IPPROTO_SCTP &&
+ filter->proto_mask != 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ PMD_LOG_WARN(DRV, "protocal id is not SCTP, please check.");
+ goto l_out;
+ }
+
+ sctp_mask = item->mask;
+ if (SXE_5TUPLE_SCTP_MASK) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ PMD_LOG_WARN(DRV, "not support other mask set, validate failed.");
+ goto l_out;
+ }
+
+ if (sxe_is_port_mask_wrong(sctp_mask->hdr.src_port,
+ sctp_mask->hdr.dst_port, item, error)) {
+ PMD_LOG_WARN(DRV, "port mask set wrong, validate failed.");
+ goto l_out;
+ }
+
+ filter->dst_port_mask = sctp_mask->hdr.dst_port;
+ filter->src_port_mask = sctp_mask->hdr.src_port;
+
+ sctp_spec = item->spec;
+ filter->dst_port = sctp_spec->hdr.dst_port;
+ filter->src_port = sctp_spec->hdr.src_port;
+ } else {
+ rte_errno = 0;
+ goto l_out;
+ }
+
+ item = sxe_next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ PMD_LOG_WARN(DRV, "item type[%d] is wrong, validate failed.", item->type);
+ goto l_out;
+ }
+
+ PMD_LOG_DEBUG(DRV, "fivetuple filter pattern parse success.");
+ rte_errno = 0;
+
+l_out:
+ return -rte_errno;
+
+}
+
+static s32 sxe_fivetuple_filter_parse(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ s32 ret = 0;
+ u16 queue_index = 0;
+
+ if (sxe_is_user_param_null(pattern, actions, attr, error)) {
+ PMD_LOG_ERR(DRV, "user param is null, validate failed.");
+ goto parse_failed;
+ }
+
+ ret = sxe_fivetuple_filter_pattern_parse(pattern, filter, error);
+ if (ret != 0) {
+ PMD_LOG_WARN(DRV, "pattern check wrong, validate failed.");
+ goto parse_failed;
+ }
+
+ ret = sxe_filter_action_parse(dev, actions, error, &queue_index);
+ if (ret != 0) {
+ PMD_LOG_WARN(DRV, "action check wrong, validate failed.");
+ goto parse_failed;
+ } else {
+ filter->queue = queue_index;
+ }
+
+ if (sxe_is_attribute_wrong(attr, error)) {
+ PMD_LOG_ERR(DRV, "attribute check wrong, validate failed.");
+ goto parse_failed;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ PMD_LOG_ERR(DRV, "priority check wrong, validate failed.");
+ goto parse_failed;
+ }
+
+ filter->priority = (u16)attr->priority;
+ if (attr->priority < SXE_MIN_FIVETUPLE_PRIORITY ||
+ attr->priority > SXE_MAX_FIVETUPLE_PRIORITY) {
+ PMD_LOG_WARN(DRV, "priority[%d] is out of 1~7, set to 1.", attr->priority);
+ filter->priority = SXE_MIN_FIVETUPLE_PRIORITY;
+ }
+
+ PMD_LOG_DEBUG(DRV, "five tuple filter fit, validate success!!");
+ rte_errno = 0;
+ goto l_out;
+
+parse_failed:
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ PMD_LOG_WARN(DRV, "five tuple filter, validate failed.");
+l_out:
+ return -rte_errno;
+}
+
+static enum sxe_fivetuple_protocol
+sxe_protocol_type_convert(u8 protocol_value)
+{
+ enum sxe_fivetuple_protocol protocol;
+
+ switch (protocol_value) {
+ case IPPROTO_TCP:
+ protocol = SXE_FILTER_PROTOCOL_TCP;
+ break;
+ case IPPROTO_UDP:
+ protocol = SXE_FILTER_PROTOCOL_UDP;
+ break;
+ case IPPROTO_SCTP:
+ protocol = SXE_FILTER_PROTOCOL_SCTP;
+ break;
+ default:
+ protocol = SXE_FILTER_PROTOCOL_NONE;
+ }
+
+ return protocol;
+}
+
+static s32
+sxe_ntuple_filter_to_fivetuple(struct rte_eth_ntuple_filter *ntuple_filter,
+ struct sxe_fivetuple_filter_info *filter_info)
+{
+ s32 ret = -EINVAL;
+
+ switch (ntuple_filter->dst_ip_mask) {
+ case UINT32_MAX:
+ filter_info->dst_ip_mask = 0;
+ filter_info->dst_ip = ntuple_filter->dst_ip;
+ break;
+ case 0:
+ filter_info->dst_ip_mask = 1;
+ break;
+ default:
+ PMD_LOG_ERR(DRV, "invalid dst_ip mask.");
+ goto l_out;
+ }
+
+ switch (ntuple_filter->src_ip_mask) {
+ case UINT32_MAX:
+ filter_info->src_ip_mask = 0;
+ filter_info->src_ip = ntuple_filter->src_ip;
+ break;
+ case 0:
+ filter_info->src_ip_mask = 1;
+ break;
+ default:
+ PMD_LOG_ERR(DRV, "invalid src_ip mask.");
+ goto l_out;
+ }
+
+ switch (ntuple_filter->dst_port_mask) {
+ case UINT16_MAX:
+ filter_info->dst_port_mask = 0;
+ filter_info->dst_port = ntuple_filter->dst_port;
+ break;
+ case 0:
+ filter_info->dst_port_mask = 1;
+ break;
+ default:
+ PMD_LOG_ERR(DRV, "invalid dst_port mask.");
+ goto l_out;
+ }
+
+ switch (ntuple_filter->src_port_mask) {
+ case UINT16_MAX:
+ filter_info->src_port_mask = 0;
+ filter_info->src_port = ntuple_filter->src_port;
+ break;
+ case 0:
+ filter_info->src_port_mask = 1;
+ break;
+ default:
+ PMD_LOG_ERR(DRV, "invalid src_port mask.");
+ goto l_out;
+ }
+
+ switch (ntuple_filter->proto_mask) {
+ case UINT8_MAX:
+ filter_info->proto_mask = 0;
+ filter_info->protocol =
+ sxe_protocol_type_convert(ntuple_filter->proto);
+ break;
+ case 0:
+ filter_info->proto_mask = 1;
+ break;
+ default:
+ PMD_LOG_ERR(DRV, "invalid protocol mask.");
+ goto l_out;
+ }
+
+ filter_info->priority = (u8)