[dpdk-dev,v2,09/19] ixbge: move ixgbe PMD to drivers/net directory

Message ID 1431705423-16134-10-git-send-email-bruce.richardson@intel.com (mailing list archive)
State Accepted, archived
Headers

Commit Message

Bruce Richardson May 15, 2015, 3:56 p.m. UTC
  move ixgbe PMD to drivers/net directory.
As part of the move, we rename the ixgbe directory, containing the
ixgbe "base driver" code, from "ixgbe" to "base".

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 drivers/net/Makefile                           |    2 +-
 drivers/net/ixgbe/Makefile                     |  126 +
 drivers/net/ixgbe/base/README                  |   67 +
 drivers/net/ixgbe/base/ixgbe_82598.c           | 1435 +++++++
 drivers/net/ixgbe/base/ixgbe_82598.h           |   52 +
 drivers/net/ixgbe/base/ixgbe_82599.c           | 2713 +++++++++++++
 drivers/net/ixgbe/base/ixgbe_82599.h           |   65 +
 drivers/net/ixgbe/base/ixgbe_api.c             | 1477 +++++++
 drivers/net/ixgbe/base/ixgbe_api.h             |  206 +
 drivers/net/ixgbe/base/ixgbe_common.c          | 4940 ++++++++++++++++++++++++
 drivers/net/ixgbe/base/ixgbe_common.h          |  183 +
 drivers/net/ixgbe/base/ixgbe_dcb.c             |  714 ++++
 drivers/net/ixgbe/base/ixgbe_dcb.h             |  174 +
 drivers/net/ixgbe/base/ixgbe_dcb_82598.c       |  360 ++
 drivers/net/ixgbe/base/ixgbe_dcb_82598.h       |   99 +
 drivers/net/ixgbe/base/ixgbe_dcb_82599.c       |  593 +++
 drivers/net/ixgbe/base/ixgbe_dcb_82599.h       |  153 +
 drivers/net/ixgbe/base/ixgbe_mbx.c             |  789 ++++
 drivers/net/ixgbe/base/ixgbe_mbx.h             |  150 +
 drivers/net/ixgbe/base/ixgbe_osdep.h           |  155 +
 drivers/net/ixgbe/base/ixgbe_phy.c             | 2583 +++++++++++++
 drivers/net/ixgbe/base/ixgbe_phy.h             |  181 +
 drivers/net/ixgbe/base/ixgbe_type.h            | 3858 ++++++++++++++++++
 drivers/net/ixgbe/base/ixgbe_vf.c              |  724 ++++
 drivers/net/ixgbe/base/ixgbe_vf.h              |  140 +
 drivers/net/ixgbe/base/ixgbe_x540.c            | 1040 +++++
 drivers/net/ixgbe/base/ixgbe_x540.h            |   66 +
 drivers/net/ixgbe/base/ixgbe_x550.c            | 2113 ++++++++++
 drivers/net/ixgbe/base/ixgbe_x550.h            |   91 +
 drivers/net/ixgbe/ixgbe_82599_bypass.c         |  314 ++
 drivers/net/ixgbe/ixgbe_bypass.c               |  414 ++
 drivers/net/ixgbe/ixgbe_bypass.h               |   68 +
 drivers/net/ixgbe/ixgbe_bypass_api.h           |  299 ++
 drivers/net/ixgbe/ixgbe_bypass_defines.h       |  160 +
 drivers/net/ixgbe/ixgbe_ethdev.c               | 4453 +++++++++++++++++++++
 drivers/net/ixgbe/ixgbe_ethdev.h               |  400 ++
 drivers/net/ixgbe/ixgbe_fdir.c                 | 1144 ++++++
 drivers/net/ixgbe/ixgbe_logs.h                 |   78 +
 drivers/net/ixgbe/ixgbe_pf.c                   |  629 +++
 drivers/net/ixgbe/ixgbe_rxtx.c                 | 4780 +++++++++++++++++++++++
 drivers/net/ixgbe/ixgbe_rxtx.h                 |  293 ++
 drivers/net/ixgbe/ixgbe_rxtx_vec.c             |  792 ++++
 drivers/net/ixgbe/rte_pmd_ixgbe_version.map    |    4 +
 lib/Makefile                                   |    1 -
 lib/librte_pmd_ixgbe/Makefile                  |  126 -
 lib/librte_pmd_ixgbe/ixgbe/README              |   67 -
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_82598.c       | 1435 -------
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_82598.h       |   52 -
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_82599.c       | 2713 -------------
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_82599.h       |   65 -
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.c         | 1477 -------
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.h         |  206 -
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.c      | 4940 ------------------------
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.h      |  183 -
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.c         |  714 ----
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.h         |  174 -
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.c   |  360 --
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.h   |   99 -
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.c   |  593 ---
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.h   |  153 -
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c         |  789 ----
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.h         |  150 -
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h       |  155 -
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.c         | 2583 -------------
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.h         |  181 -
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h        | 3858 ------------------
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.c          |  724 ----
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.h          |  140 -
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.c        | 1040 -----
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.h        |   66 -
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_x550.c        | 2113 ----------
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_x550.h        |   91 -
 lib/librte_pmd_ixgbe/ixgbe_82599_bypass.c      |  314 --
 lib/librte_pmd_ixgbe/ixgbe_bypass.c            |  414 --
 lib/librte_pmd_ixgbe/ixgbe_bypass.h            |   68 -
 lib/librte_pmd_ixgbe/ixgbe_bypass_api.h        |  299 --
 lib/librte_pmd_ixgbe/ixgbe_bypass_defines.h    |  160 -
 lib/librte_pmd_ixgbe/ixgbe_ethdev.c            | 4453 ---------------------
 lib/librte_pmd_ixgbe/ixgbe_ethdev.h            |  400 --
 lib/librte_pmd_ixgbe/ixgbe_fdir.c              | 1144 ------
 lib/librte_pmd_ixgbe/ixgbe_logs.h              |   78 -
 lib/librte_pmd_ixgbe/ixgbe_pf.c                |  629 ---
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c              | 4780 -----------------------
 lib/librte_pmd_ixgbe/ixgbe_rxtx.h              |  293 --
 lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c          |  792 ----
 lib/librte_pmd_ixgbe/rte_pmd_ixgbe_version.map |    4 -
 86 files changed, 39076 insertions(+), 39077 deletions(-)
 create mode 100644 drivers/net/ixgbe/Makefile
 create mode 100644 drivers/net/ixgbe/base/README
 create mode 100644 drivers/net/ixgbe/base/ixgbe_82598.c
 create mode 100644 drivers/net/ixgbe/base/ixgbe_82598.h
 create mode 100644 drivers/net/ixgbe/base/ixgbe_82599.c
 create mode 100644 drivers/net/ixgbe/base/ixgbe_82599.h
 create mode 100644 drivers/net/ixgbe/base/ixgbe_api.c
 create mode 100644 drivers/net/ixgbe/base/ixgbe_api.h
 create mode 100644 drivers/net/ixgbe/base/ixgbe_common.c
 create mode 100644 drivers/net/ixgbe/base/ixgbe_common.h
 create mode 100644 drivers/net/ixgbe/base/ixgbe_dcb.c
 create mode 100644 drivers/net/ixgbe/base/ixgbe_dcb.h
 create mode 100644 drivers/net/ixgbe/base/ixgbe_dcb_82598.c
 create mode 100644 drivers/net/ixgbe/base/ixgbe_dcb_82598.h
 create mode 100644 drivers/net/ixgbe/base/ixgbe_dcb_82599.c
 create mode 100644 drivers/net/ixgbe/base/ixgbe_dcb_82599.h
 create mode 100644 drivers/net/ixgbe/base/ixgbe_mbx.c
 create mode 100644 drivers/net/ixgbe/base/ixgbe_mbx.h
 create mode 100644 drivers/net/ixgbe/base/ixgbe_osdep.h
 create mode 100644 drivers/net/ixgbe/base/ixgbe_phy.c
 create mode 100644 drivers/net/ixgbe/base/ixgbe_phy.h
 create mode 100644 drivers/net/ixgbe/base/ixgbe_type.h
 create mode 100644 drivers/net/ixgbe/base/ixgbe_vf.c
 create mode 100644 drivers/net/ixgbe/base/ixgbe_vf.h
 create mode 100644 drivers/net/ixgbe/base/ixgbe_x540.c
 create mode 100644 drivers/net/ixgbe/base/ixgbe_x540.h
 create mode 100644 drivers/net/ixgbe/base/ixgbe_x550.c
 create mode 100644 drivers/net/ixgbe/base/ixgbe_x550.h
 create mode 100644 drivers/net/ixgbe/ixgbe_82599_bypass.c
 create mode 100644 drivers/net/ixgbe/ixgbe_bypass.c
 create mode 100644 drivers/net/ixgbe/ixgbe_bypass.h
 create mode 100644 drivers/net/ixgbe/ixgbe_bypass_api.h
 create mode 100644 drivers/net/ixgbe/ixgbe_bypass_defines.h
 create mode 100644 drivers/net/ixgbe/ixgbe_ethdev.c
 create mode 100644 drivers/net/ixgbe/ixgbe_ethdev.h
 create mode 100644 drivers/net/ixgbe/ixgbe_fdir.c
 create mode 100644 drivers/net/ixgbe/ixgbe_logs.h
 create mode 100644 drivers/net/ixgbe/ixgbe_pf.c
 create mode 100644 drivers/net/ixgbe/ixgbe_rxtx.c
 create mode 100644 drivers/net/ixgbe/ixgbe_rxtx.h
 create mode 100644 drivers/net/ixgbe/ixgbe_rxtx_vec.c
 create mode 100644 drivers/net/ixgbe/rte_pmd_ixgbe_version.map
 delete mode 100644 lib/librte_pmd_ixgbe/Makefile
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/README
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_82598.c
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_82598.h
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_82599.c
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_82599.h
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.c
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.h
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.c
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.h
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.c
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.h
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.c
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.h
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.c
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.h
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.h
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.c
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.h
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.c
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.h
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.c
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.h
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_x550.c
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_x550.h
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe_82599_bypass.c
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe_bypass.c
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe_bypass.h
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe_bypass_api.h
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe_bypass_defines.h
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe_ethdev.c
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe_ethdev.h
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe_fdir.c
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe_logs.h
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe_pf.c
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe_rxtx.c
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe_rxtx.h
 delete mode 100644 lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c
 delete mode 100644 lib/librte_pmd_ixgbe/rte_pmd_ixgbe_version.map
  

Patch

diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 7ce6685..001270d 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -37,7 +37,7 @@  DIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000
 DIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic
 DIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k
 DIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e
-#DIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += librte_pmd_ixgbe
+DIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe
 #DIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += librte_pmd_mlx4
 #DIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += librte_pmd_ring
 #DIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += librte_pmd_pcap
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
new file mode 100644
index 0000000..f92a565
--- /dev/null
+++ b/drivers/net/ixgbe/Makefile
@@ -0,0 +1,126 @@ 
+#   BSD LICENSE
+#
+#   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+#   All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_ixgbe.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_ixgbe_version.map
+
+LIBABIVER := 1
+
+ifeq ($(CC), icc)
+#
+# CFLAGS for icc
+#
+CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259
+
+else ifeq ($(CC), clang)
+#
+# CFLAGS for clang
+#
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+
+else
+#
+# CFLAGS for gcc
+#
+ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
+CFLAGS     += -Wno-deprecated
+CFLAGS_ixgbe_common.o += -Wno-unused-but-set-variable
+CFLAGS_ixgbe_x550.o += -Wno-unused-but-set-variable
+endif
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+
+ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1)
+CFLAGS_ixgbe_x550.o += -Wno-maybe-uninitialized
+endif
+
+ifeq ($(shell test $(GCC_VERSION) -ge 50 && echo 1), 1)
+CFLAGS_ixgbe_common.o += -Wno-logical-not-parentheses
+endif
+
+endif
+
+#
+# Add extra flags for base driver files (also known as shared code)
+# to disable warnings in them
+#
+BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(SRCDIR)/base
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82598.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_x540.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_x550.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_api.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82599.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82598.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_mbx.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_fdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_pf.c
+SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec.c
+
+ifeq ($(CONFIG_RTE_NIC_BYPASS),y)
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c
+endif
+
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_eal lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_mempool lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_net lib/librte_malloc
+
+ifeq ($(CONFIG_RTE_IXGBE_INC_VECTOR)$(CONFIG_RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC),yn)
+$(error The ixgbe vpmd depends on Rx bulk alloc)
+endif
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ixgbe/base/README b/drivers/net/ixgbe/base/README
new file mode 100644
index 0000000..ba1249b
--- /dev/null
+++ b/drivers/net/ixgbe/base/README
@@ -0,0 +1,67 @@ 
+..
+     BSD LICENSE
+   
+     Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+     All rights reserved.
+   
+     Redistribution and use in source and binary forms, with or without
+     modification, are permitted provided that the following conditions
+     are met:
+   
+       * Redistributions of source code must retain the above copyright
+         notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above copyright
+         notice, this list of conditions and the following disclaimer in
+         the documentation and/or other materials provided with the
+         distribution.
+       * Neither the name of Intel Corporation nor the names of its
+         contributors may be used to endorse or promote products derived
+         from this software without specific prior written permission.
+   
+     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+     "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+     LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+     A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+     OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+     LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+     DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+     THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+     (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Intel® IXGBE driver
+===================
+
+This directory contains source code of FreeBSD ixgbe driver of version
+cid-10g-shared-code.2015.02.03 released by LAD. The sub-directory of lad/
+contains the original source package.
+This driver is valid for the product(s) listed below
+
+* Intel® 10 Gigabit AF DA Dual Port Server Adapter
+* Intel® 10 Gigabit AT Server Adapter
+* Intel® 10 Gigabit AT2 Server Adapter
+* Intel® 10 Gigabit CX4 Dual Port Server Adapter
+* Intel® 10 Gigabit XF LR Server Adapter
+* Intel® 10 Gigabit XF SR Dual Port Server Adapter
+* Intel® 10 Gigabit XF SR Server Adapter
+* Intel® 82598 10 Gigabit Ethernet Controller
+* Intel® 82599 10 Gigabit Ethernet Controller
+* Intel® Ethernet Controller X540-AT2
+* Intel® Ethernet Server Adapter X520 Series
+* Intel® Ethernet Server Adapter X520-T2
+* Intel® Ethernet Controller X550-BT2
+
+Updating driver
+===============
+
+The following modifications have been made to this code to integrate it with the
+Intel® DPDK:
+
+
+ixgbe_osdep.h
+-------------
+
+The OS dependency layer has been extensively modified to support the drivers in
+the Intel® DPDK environment. It is expected that these files will not need to be
+changed on updating the driver.
diff --git a/drivers/net/ixgbe/base/ixgbe_82598.c b/drivers/net/ixgbe/base/ixgbe_82598.c
new file mode 100644
index 0000000..4e06550
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_82598.c
@@ -0,0 +1,1435 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_type.h"
+#include "ixgbe_82598.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+#define IXGBE_82598_MAX_TX_QUEUES 32
+#define IXGBE_82598_MAX_RX_QUEUES 64
+#define IXGBE_82598_RAR_ENTRIES   16
+#define IXGBE_82598_MC_TBL_SIZE  128
+#define IXGBE_82598_VFT_TBL_SIZE 128
+#define IXGBE_82598_RX_PB_SIZE   512
+
+STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+					     ixgbe_link_speed *speed,
+					     bool *autoneg);
+STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+				      bool autoneg_wait_to_complete);
+STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+				      ixgbe_link_speed *speed, bool *link_up,
+				      bool link_up_wait_to_complete);
+STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
+				      ixgbe_link_speed speed,
+				      bool autoneg_wait_to_complete);
+STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+					 ixgbe_link_speed speed,
+					 bool autoneg_wait_to_complete);
+STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
+STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
+				  u32 headroom, int strategy);
+STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+					u8 *sff8472_data);
+/**
+ *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
+ *  @hw: pointer to the HW structure
+ *
+ *  The defaults for 82598 should be in the range of 50us to 50ms,
+ *  however the hardware default for these parts is 500us to 1ms which is less
+ *  than the 10ms recommended by the pci-e spec.  To address this we need to
+ *  increase the value to either 10ms to 250ms for capability version 1 config,
+ *  or 16ms to 55ms for version 2.
+ **/
+void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
+{
+	u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
+	u16 pcie_devctl2;
+
+	/* only take action if timeout value is defaulted to 0 */
+	if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
+		goto out;
+
+	/*
+	 * if capababilities version is type 1 we can write the
+	 * timeout of 10ms to 250ms through the GCR register
+	 */
+	if (!(gcr & IXGBE_GCR_CAP_VER2)) {
+		gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
+		goto out;
+	}
+
+	/*
+	 * for version 2 capabilities we need to write the config space
+	 * directly in order to set the completion timeout value for
+	 * 16ms to 55ms
+	 */
+	pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
+	pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
+	IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
+out:
+	/* disable completion timeout resend */
+	gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
+	IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
+}
+
+/**
+ *  ixgbe_init_ops_82598 - Inits func ptrs and MAC type
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the function pointers and assign the MAC type for 82598.
+ *  Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mac_info *mac = &hw->mac;
+	struct ixgbe_phy_info *phy = &hw->phy;
+	s32 ret_val;
+
+	DEBUGFUNC("ixgbe_init_ops_82598");
+
+	ret_val = ixgbe_init_phy_ops_generic(hw);
+	ret_val = ixgbe_init_ops_generic(hw);
+
+	/* PHY */
+	phy->ops.init = ixgbe_init_phy_ops_82598;
+
+	/* MAC */
+	mac->ops.start_hw = ixgbe_start_hw_82598;
+	mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598;
+	mac->ops.reset_hw = ixgbe_reset_hw_82598;
+	mac->ops.get_media_type = ixgbe_get_media_type_82598;
+	mac->ops.get_supported_physical_layer =
+				ixgbe_get_supported_physical_layer_82598;
+	mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598;
+	mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598;
+	mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598;
+	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598;
+
+	/* RAR, Multicast, VLAN */
+	mac->ops.set_vmdq = ixgbe_set_vmdq_82598;
+	mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598;
+	mac->ops.set_vfta = ixgbe_set_vfta_82598;
+	mac->ops.set_vlvf = NULL;
+	mac->ops.clear_vfta = ixgbe_clear_vfta_82598;
+
+	/* Flow Control */
+	mac->ops.fc_enable = ixgbe_fc_enable_82598;
+
+	mac->mcft_size		= IXGBE_82598_MC_TBL_SIZE;
+	mac->vft_size		= IXGBE_82598_VFT_TBL_SIZE;
+	mac->num_rar_entries	= IXGBE_82598_RAR_ENTRIES;
+	mac->rx_pb_size		= IXGBE_82598_RX_PB_SIZE;
+	mac->max_rx_queues	= IXGBE_82598_MAX_RX_QUEUES;
+	mac->max_tx_queues	= IXGBE_82598_MAX_TX_QUEUES;
+	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
+
+	/* SFP+ Module */
+	phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598;
+	phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598;
+
+	/* Link */
+	mac->ops.check_link = ixgbe_check_mac_link_82598;
+	mac->ops.setup_link = ixgbe_setup_mac_link_82598;
+	mac->ops.flap_tx_laser = NULL;
+	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598;
+	mac->ops.setup_rxpba = ixgbe_set_rxpba_82598;
+
+	/* Manageability interface */
+	mac->ops.set_fw_drv_ver = NULL;
+
+	mac->ops.get_rtrup2tc = NULL;
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_init_phy_ops_82598 - PHY/SFP specific init
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize any function pointers that were not able to be
+ *  set during init_shared_code because the PHY/SFP type was
+ *  not known.  Perform the SFP init if necessary.
+ *
+ **/
+s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mac_info *mac = &hw->mac;
+	struct ixgbe_phy_info *phy = &hw->phy;
+	s32 ret_val = IXGBE_SUCCESS;
+	u16 list_offset, data_offset;
+
+	DEBUGFUNC("ixgbe_init_phy_ops_82598");
+
+	/* Identify the PHY */
+	phy->ops.identify(hw);
+
+	/* Overwrite the link function pointers if copper PHY */
+	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
+		mac->ops.setup_link = ixgbe_setup_copper_link_82598;
+		mac->ops.get_link_capabilities =
+				ixgbe_get_copper_link_capabilities_generic;
+	}
+
+	switch (hw->phy.type) {
+	case ixgbe_phy_tn:
+		phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
+		phy->ops.check_link = ixgbe_check_phy_link_tnx;
+		phy->ops.get_firmware_version =
+					ixgbe_get_phy_firmware_version_tnx;
+		break;
+	case ixgbe_phy_nl:
+		phy->ops.reset = ixgbe_reset_phy_nl;
+
+		/* Call SFP+ identify routine to get the SFP+ module type */
+		ret_val = phy->ops.identify_sfp(hw);
+		if (ret_val != IXGBE_SUCCESS)
+			goto out;
+		else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
+			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
+			goto out;
+		}
+
+		/* Check to see if SFP+ module is supported */
+		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
+							      &list_offset,
+							      &data_offset);
+		if (ret_val != IXGBE_SUCCESS) {
+			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
+			goto out;
+		}
+		break;
+	default:
+		break;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
+ *  @hw: pointer to hardware structure
+ *
+ *  Starts the hardware using the generic start_hw function.
+ *  Disables relaxed ordering Then set pcie completion timeout
+ *
+ **/
+s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
+{
+	u32 regval;
+	u32 i;
+	s32 ret_val = IXGBE_SUCCESS;
+
+	DEBUGFUNC("ixgbe_start_hw_82598");
+
+	ret_val = ixgbe_start_hw_generic(hw);
+
+	/* Disable relaxed ordering */
+	for (i = 0; ((i < hw->mac.max_tx_queues) &&
+	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
+	}
+
+	for (i = 0; ((i < hw->mac.max_rx_queues) &&
+	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
+		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+	}
+
+	/* set the completion timeout for interface */
+	if (ret_val == IXGBE_SUCCESS)
+		ixgbe_set_pcie_completion_timeout(hw);
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_get_link_capabilities_82598 - Determines link capabilities
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @autoneg: boolean auto-negotiation value
+ *
+ *  Determines the link capabilities by reading the AUTOC register.
+ **/
+STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+					     ixgbe_link_speed *speed,
+					     bool *autoneg)
+{
+	s32 status = IXGBE_SUCCESS;
+	u32 autoc = 0;
+
+	DEBUGFUNC("ixgbe_get_link_capabilities_82598");
+
+	/*
+	 * Determine link capabilities based on the stored value of AUTOC,
+	 * which represents EEPROM defaults.  If AUTOC value has not been
+	 * stored, use the current register value.
+	 */
+	if (hw->mac.orig_link_settings_stored)
+		autoc = hw->mac.orig_autoc;
+	else
+		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+		*speed = IXGBE_LINK_SPEED_1GB_FULL;
+		*autoneg = false;
+		break;
+
+	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+		*speed = IXGBE_LINK_SPEED_10GB_FULL;
+		*autoneg = false;
+		break;
+
+	case IXGBE_AUTOC_LMS_1G_AN:
+		*speed = IXGBE_LINK_SPEED_1GB_FULL;
+		*autoneg = true;
+		break;
+
+	case IXGBE_AUTOC_LMS_KX4_AN:
+	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
+		*speed = IXGBE_LINK_SPEED_UNKNOWN;
+		if (autoc & IXGBE_AUTOC_KX4_SUPP)
+			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
+		if (autoc & IXGBE_AUTOC_KX_SUPP)
+			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
+		*autoneg = true;
+		break;
+
+	default:
+		status = IXGBE_ERR_LINK_SETUP;
+		break;
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_get_media_type_82598 - Determines media type
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the media type (fiber, copper, backplane)
+ **/
+STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
+{
+	enum ixgbe_media_type media_type;
+
+	DEBUGFUNC("ixgbe_get_media_type_82598");
+
+	/* Detect if there is a copper PHY attached. */
+	switch (hw->phy.type) {
+	case ixgbe_phy_cu_unknown:
+	case ixgbe_phy_tn:
+		media_type = ixgbe_media_type_copper;
+		goto out;
+	default:
+		break;
+	}
+
+	/* Media type for I82598 is based on device ID */
+	switch (hw->device_id) {
+	case IXGBE_DEV_ID_82598:
+	case IXGBE_DEV_ID_82598_BX:
+		/* Default device ID is mezzanine card KX/KX4 */
+		media_type = ixgbe_media_type_backplane;
+		break;
+	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+	case IXGBE_DEV_ID_82598EB_XF_LR:
+	case IXGBE_DEV_ID_82598EB_SFP_LOM:
+		media_type = ixgbe_media_type_fiber;
+		break;
+	case IXGBE_DEV_ID_82598EB_CX4:
+	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+		media_type = ixgbe_media_type_cx4;
+		break;
+	case IXGBE_DEV_ID_82598AT:
+	case IXGBE_DEV_ID_82598AT2:
+		media_type = ixgbe_media_type_copper;
+		break;
+	default:
+		media_type = ixgbe_media_type_unknown;
+		break;
+	}
+out:
+	return media_type;
+}
+
+/**
+ *  ixgbe_fc_enable_82598 - Enable flow control
+ *  @hw: pointer to hardware structure
+ *
+ *  Enable flow control according to the current settings.
+ **/
+s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
+{
+	s32 ret_val = IXGBE_SUCCESS;
+	u32 fctrl_reg;
+	u32 rmcs_reg;
+	u32 reg;
+	u32 fcrtl, fcrth;
+	u32 link_speed = 0;
+	int i;
+	bool link_up;
+
+	DEBUGFUNC("ixgbe_fc_enable_82598");
+
+	/* Validate the water mark configuration */
+	if (!hw->fc.pause_time) {
+		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+		goto out;
+	}
+
+	/* Low water mark of zero causes XOFF floods */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+		    hw->fc.high_water[i]) {
+			if (!hw->fc.low_water[i] ||
+			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+				DEBUGOUT("Invalid water mark configuration\n");
+				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+				goto out;
+			}
+		}
+	}
+
+	/*
+	 * On 82598 having Rx FC on causes resets while doing 1G
+	 * so if it's on turn it off once we know link_speed. For
+	 * more details see 82598 Specification update.
+	 */
+	hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+	if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
+		switch (hw->fc.requested_mode) {
+		case ixgbe_fc_full:
+			hw->fc.requested_mode = ixgbe_fc_tx_pause;
+			break;
+		case ixgbe_fc_rx_pause:
+			hw->fc.requested_mode = ixgbe_fc_none;
+			break;
+		default:
+			/* no change */
+			break;
+		}
+	}
+
+	/* Negotiate the fc mode to use */
+	ixgbe_fc_autoneg(hw);
+
+	/* Disable any previous flow control settings */
+	fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+	fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
+
+	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
+
+	/*
+	 * The possible values of fc.current_mode are:
+	 * 0: Flow control is completely disabled
+	 * 1: Rx flow control is enabled (we can receive pause frames,
+	 *    but not send pause frames).
+	 * 2: Tx flow control is enabled (we can send pause frames but
+	 *     we do not support receiving pause frames).
+	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
+	 * other: Invalid.
+	 */
+	switch (hw->fc.current_mode) {
+	case ixgbe_fc_none:
+		/*
+		 * Flow control is disabled by software override or autoneg.
+		 * The code below will actually disable it in the HW.
+		 */
+		break;
+	case ixgbe_fc_rx_pause:
+		/*
+		 * Rx Flow control is enabled and Tx Flow control is
+		 * disabled by software override. Since there really
+		 * isn't a way to advertise that we are capable of RX
+		 * Pause ONLY, we will advertise that we support both
+		 * symmetric and asymmetric Rx PAUSE.  Later, we will
+		 * disable the adapter's ability to send PAUSE frames.
+		 */
+		fctrl_reg |= IXGBE_FCTRL_RFCE;
+		break;
+	case ixgbe_fc_tx_pause:
+		/*
+		 * Tx Flow control is enabled, and Rx Flow control is
+		 * disabled by software override.
+		 */
+		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+		break;
+	case ixgbe_fc_full:
+		/* Flow control (both Rx and Tx) is enabled by SW override. */
+		fctrl_reg |= IXGBE_FCTRL_RFCE;
+		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+		break;
+	default:
+		DEBUGOUT("Flow control param set incorrectly\n");
+		ret_val = IXGBE_ERR_CONFIG;
+		goto out;
+		break;
+	}
+
+	/* Set 802.3x based flow control settings. */
+	fctrl_reg |= IXGBE_FCTRL_DPF;
+	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
+	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
+
+	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+		    hw->fc.high_water[i]) {
+			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
+		} else {
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
+		}
+
+	}
+
+	/* Configure pause time (2 TCs per register) */
+	reg = hw->fc.pause_time * 0x00010001;
+	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+	/* Configure flow control refresh threshold value */
+	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  ixgbe_start_mac_link_82598 - Configures MAC link settings
+ *  @hw: pointer to hardware structure
+ *
+ *  Configures link settings based on values in the ixgbe_hw struct.
+ *  Restarts the link.  Performs autonegotiation if needed.
+ **/
+STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+				      bool autoneg_wait_to_complete)
+{
+	u32 autoc_reg;
+	u32 links_reg;
+	u32 i;
+	s32 status = IXGBE_SUCCESS;
+
+	DEBUGFUNC("ixgbe_start_mac_link_82598");
+
+	/* Restart link */
+	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+	/* Only poll for autoneg to complete if specified to do so */
+	if (autoneg_wait_to_complete) {
+		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+		     IXGBE_AUTOC_LMS_KX4_AN ||
+		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+		     IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+			links_reg = 0; /* Just in case Autoneg time = 0 */
+			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
+				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
+					break;
+				msec_delay(100);
+			}
+			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+				DEBUGOUT("Autonegotiation did not complete.\n");
+			}
+		}
+	}
+
+	/* Add delay to filter out noises during initial link setup */
+	msec_delay(50);
+
+	return status;
+}
+
+/**
+ *  ixgbe_validate_link_ready - Function looks for phy link
+ *  @hw: pointer to hardware structure
+ *
+ *  Function indicates success when phy link is available. If phy is not ready
+ *  within 5 seconds of MAC indicating link, the function returns error.
+ **/
+STATIC s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
+{
+	u32 timeout;
+	u16 an_reg;
+
+	if (hw->device_id != IXGBE_DEV_ID_82598AT2)
+		return IXGBE_SUCCESS;
+
+	for (timeout = 0;
+	     timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
+		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
+
+		if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
+		    (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
+			break;
+
+		msec_delay(100);
+	}
+
+	if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
+		DEBUGOUT("Link was indicated but link is down\n");
+		return IXGBE_ERR_LINK_SETUP;
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_check_mac_link_82598 - Get link/speed status
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @link_up: true is link is up, false otherwise
+ *  @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ *  Reads the links register to determine if link is up and the current speed
+ **/
+STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+				      ixgbe_link_speed *speed, bool *link_up,
+				      bool link_up_wait_to_complete)
+{
+	u32 links_reg;
+	u32 i;
+	u16 link_reg, adapt_comp_reg;
+
+	DEBUGFUNC("ixgbe_check_mac_link_82598");
+
+	/*
+	 * SERDES PHY requires us to read link status from undocumented
+	 * register 0xC79F.  Bit 0 set indicates link is up/ready; clear
+	 * indicates link down.  OxC00C is read to check that the XAUI lanes
+	 * are active.  Bit 0 clear indicates active; set indicates inactive.
+	 */
+	if (hw->phy.type == ixgbe_phy_nl) {
+		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
+		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
+		hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
+				     &adapt_comp_reg);
+		if (link_up_wait_to_complete) {
+			for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+				if ((link_reg & 1) &&
+				    ((adapt_comp_reg & 1) == 0)) {
+					*link_up = true;
+					break;
+				} else {
+					*link_up = false;
+				}
+				msec_delay(100);
+				hw->phy.ops.read_reg(hw, 0xC79F,
+						     IXGBE_TWINAX_DEV,
+						     &link_reg);
+				hw->phy.ops.read_reg(hw, 0xC00C,
+						     IXGBE_TWINAX_DEV,
+						     &adapt_comp_reg);
+			}
+		} else {
+			if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
+				*link_up = true;
+			else
+				*link_up = false;
+		}
+
+		if (*link_up == false)
+			goto out;
+	}
+
+	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+	if (link_up_wait_to_complete) {
+		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+			if (links_reg & IXGBE_LINKS_UP) {
+				*link_up = true;
+				break;
+			} else {
+				*link_up = false;
+			}
+			msec_delay(100);
+			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+		}
+	} else {
+		if (links_reg & IXGBE_LINKS_UP)
+			*link_up = true;
+		else
+			*link_up = false;
+	}
+
+	if (links_reg & IXGBE_LINKS_SPEED)
+		*speed = IXGBE_LINK_SPEED_10GB_FULL;
+	else
+		*speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+	if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) &&
+	    (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
+		*link_up = false;
+
+out:
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_setup_mac_link_82598 - Set MAC link speed
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ *  Set the link speed in the AUTOC register and restarts link.
+ **/
+STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
+				      ixgbe_link_speed speed,
+				      bool autoneg_wait_to_complete)
+{
+	bool autoneg = false;
+	s32 status = IXGBE_SUCCESS;
+	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+	u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	u32 autoc = curr_autoc;
+	u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
+
+	DEBUGFUNC("ixgbe_setup_mac_link_82598");
+
+	/* Check to see if speed passed in is supported. */
+	ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
+	speed &= link_capabilities;
+
+	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
+		status = IXGBE_ERR_LINK_SETUP;
+
+	/* Set KX4/KX support according to speed requested */
+	else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
+		 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+		autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
+		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+			autoc |= IXGBE_AUTOC_KX4_SUPP;
+		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+			autoc |= IXGBE_AUTOC_KX_SUPP;
+		if (autoc != curr_autoc)
+			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+	}
+
+	if (status == IXGBE_SUCCESS) {
+		/*
+		 * Setup and restart the link based on the new values in
+		 * ixgbe_hw This will write the AUTOC register based on the new
+		 * stored values
+		 */
+		status = ixgbe_start_mac_link_82598(hw,
+						    autoneg_wait_to_complete);
+	}
+
+	return status;
+}
+
+
+/**
+ *  ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg_wait_to_complete: true if waiting is needed to complete
+ *
+ *  Sets the link speed in the AUTOC register in the MAC and restarts link.
+ **/
+STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+					 ixgbe_link_speed speed,
+					 bool autoneg_wait_to_complete)
+{
+	s32 status;
+
+	DEBUGFUNC("ixgbe_setup_copper_link_82598");
+
+	/* Setup the PHY according to input speed */
+	status = hw->phy.ops.setup_link_speed(hw, speed,
+					      autoneg_wait_to_complete);
+	/* Set up MAC */
+	ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
+
+	return status;
+}
+
+/**
+ *  ixgbe_reset_hw_82598 - Performs hardware reset
+ *  @hw: pointer to hardware structure
+ *
+ *  Resets the hardware by resetting the transmit and receive units, masks and
+ *  clears all interrupts, performing a PHY reset, and performing a link (MAC)
+ *  reset.
+ **/
+STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_SUCCESS;
+	s32 phy_status = IXGBE_SUCCESS;
+	u32 ctrl;
+	u32 gheccr;
+	u32 i;
+	u32 autoc;
+	u8  analog_val;
+
+	DEBUGFUNC("ixgbe_reset_hw_82598");
+
+	/* Call adapter stop to disable tx/rx and clear interrupts */
+	status = hw->mac.ops.stop_adapter(hw);
+	if (status != IXGBE_SUCCESS)
+		goto reset_hw_out;
+
+	/*
+	 * Power up the Atlas Tx lanes if they are currently powered down.
+	 * Atlas Tx lanes are powered down for MAC loopback tests, but
+	 * they are not automatically restored on reset.
+	 */
+	hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
+	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
+		/* Enable Tx Atlas so packets can be transmitted again */
+		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
+					     &analog_val);
+		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
+		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
+					      analog_val);
+
+		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
+					     &analog_val);
+		analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
+		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
+					      analog_val);
+
+		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
+					     &analog_val);
+		analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
+		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
+					      analog_val);
+
+		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
+					     &analog_val);
+		analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
+		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
+					      analog_val);
+	}
+
+	/* Reset PHY */
+	if (hw->phy.reset_disable == false) {
+		/* PHY ops must be identified and initialized prior to reset */
+
+		/* Init PHY and function pointers, perform SFP setup */
+		phy_status = hw->phy.ops.init(hw);
+		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+			goto reset_hw_out;
+		if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
+			goto mac_reset_top;
+
+		hw->phy.ops.reset(hw);
+	}
+
+mac_reset_top:
+	/*
+	 * Issue global reset to the MAC.  This needs to be a SW reset.
+	 * If link reset is used, it might reset the MAC when mng is using it
+	 */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* Poll for reset bit to self-clear indicating reset is complete */
+	for (i = 0; i < 10; i++) {
+		usec_delay(1);
+		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+		if (!(ctrl & IXGBE_CTRL_RST))
+			break;
+	}
+	if (ctrl & IXGBE_CTRL_RST) {
+		status = IXGBE_ERR_RESET_FAILED;
+		DEBUGOUT("Reset polling failed to complete.\n");
+	}
+
+	msec_delay(50);
+
+	/*
+	 * Double resets are required for recovery from certain error
+	 * conditions.  Between resets, it is necessary to stall to allow time
+	 * for any pending HW events to complete.
+	 */
+	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+		goto mac_reset_top;
+	}
+
+	gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
+	gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
+	IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
+
+	/*
+	 * Store the original AUTOC value if it has not been
+	 * stored off yet.  Otherwise restore the stored original
+	 * AUTOC value since the reset operation sets back to deaults.
+	 */
+	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	if (hw->mac.orig_link_settings_stored == false) {
+		hw->mac.orig_autoc = autoc;
+		hw->mac.orig_link_settings_stored = true;
+	} else if (autoc != hw->mac.orig_autoc) {
+		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
+	}
+
+	/* Store the permanent mac address */
+	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+	/*
+	 * Store MAC address from RAR0, clear receive address registers, and
+	 * clear the multicast table
+	 */
+	hw->mac.ops.init_rx_addrs(hw);
+
+reset_hw_out:
+	if (phy_status != IXGBE_SUCCESS)
+		status = phy_status;
+
+	return status;
+}
+
+/**
+ *  ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
+ *  @hw: pointer to hardware struct
+ *  @rar: receive address register index to associate with a VMDq index
+ *  @vmdq: VMDq set index
+ **/
+s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+	u32 rar_high;
+	u32 rar_entries = hw->mac.num_rar_entries;
+
+	DEBUGFUNC("ixgbe_set_vmdq_82598");
+
+	/* Make sure we are using a valid rar index range */
+	if (rar >= rar_entries) {
+		DEBUGOUT1("RAR index %d is out of range.\n", rar);
+		return IXGBE_ERR_INVALID_ARGUMENT;
+	}
+
+	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+	rar_high &= ~IXGBE_RAH_VIND_MASK;
+	rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
+	IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
+ *  @hw: pointer to hardware struct
+ *  @rar: receive address register index to associate with a VMDq index
+ *  @vmdq: VMDq clear index (not used in 82598, but elsewhere)
+ **/
+STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+	u32 rar_high;
+	u32 rar_entries = hw->mac.num_rar_entries;
+
+	UNREFERENCED_1PARAMETER(vmdq);
+
+	/* Make sure we are using a valid rar index range */
+	if (rar >= rar_entries) {
+		DEBUGOUT1("RAR index %d is out of range.\n", rar);
+		return IXGBE_ERR_INVALID_ARGUMENT;
+	}
+
+	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+	if (rar_high & IXGBE_RAH_VIND_MASK) {
+		rar_high &= ~IXGBE_RAH_VIND_MASK;
+		IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_set_vfta_82598 - Set VLAN filter table
+ *  @hw: pointer to hardware structure
+ *  @vlan: VLAN id to write to VLAN filter
+ *  @vind: VMDq output index that maps queue to VLAN id in VFTA
+ *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ *
+ *  Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+			 bool vlan_on)
+{
+	u32 regindex;
+	u32 bitindex;
+	u32 bits;
+	u32 vftabyte;
+
+	DEBUGFUNC("ixgbe_set_vfta_82598");
+
+	if (vlan > 4095)
+		return IXGBE_ERR_PARAM;
+
+	/* Determine 32-bit word position in array */
+	regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */
+
+	/* Determine the location of the (VMD) queue index */
+	vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
+	bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
+
+	/* Set the nibble for VMD queue index */
+	bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
+	bits &= (~(0x0F << bitindex));
+	bits |= (vind << bitindex);
+	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
+
+	/* Determine the location of the bit for this VLAN id */
+	bitindex = vlan & 0x1F;   /* lower five bits */
+
+	bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+	if (vlan_on)
+		/* Turn on this VLAN id */
+		bits |= (1 << bitindex);
+	else
+		/* Turn off this VLAN id */
+		bits &= ~(1 << bitindex);
+	IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_clear_vfta_82598 - Clear VLAN filter table
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
+{
+	u32 offset;
+	u32 vlanbyte;
+
+	DEBUGFUNC("ixgbe_clear_vfta_82598");
+
+	for (offset = 0; offset < hw->mac.vft_size; offset++)
+		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+
+	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
+		for (offset = 0; offset < hw->mac.vft_size; offset++)
+			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
+					0);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
+ *  @hw: pointer to hardware structure
+ *  @reg: analog register to read
+ *  @val: read value
+ *
+ *  Performs read operation to Atlas analog register specified.
+ **/
+s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+	u32  atlas_ctl;
+
+	DEBUGFUNC("ixgbe_read_analog_reg8_82598");
+
+	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
+			IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
+	IXGBE_WRITE_FLUSH(hw);
+	usec_delay(10);
+	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
+	*val = (u8)atlas_ctl;
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
+ *  @hw: pointer to hardware structure
+ *  @reg: atlas register to write
+ *  @val: value to write
+ *
+ *  Performs write operation to Atlas analog register specified.
+ **/
+s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+	u32  atlas_ctl;
+
+	DEBUGFUNC("ixgbe_write_analog_reg8_82598");
+
+	atlas_ctl = (reg << 8) | val;
+	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
+	IXGBE_WRITE_FLUSH(hw);
+	usec_delay(10);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
+ *  @hw: pointer to hardware structure
+ *  @dev_addr: address to read from
+ *  @byte_offset: byte offset to read from dev_addr
+ *  @eeprom_data: value read
+ *
+ *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+STATIC s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+				    u8 byte_offset, u8 *eeprom_data)
+{
+	s32 status = IXGBE_SUCCESS;
+	u16 sfp_addr = 0;
+	u16 sfp_data = 0;
+	u16 sfp_stat = 0;
+	u16 gssr;
+	u32 i;
+
+	DEBUGFUNC("ixgbe_read_i2c_phy_82598");
+
+	if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+		gssr = IXGBE_GSSR_PHY1_SM;
+	else
+		gssr = IXGBE_GSSR_PHY0_SM;
+
+	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
+		return IXGBE_ERR_SWFW_SYNC;
+
+	if (hw->phy.type == ixgbe_phy_nl) {
+		/*
+		 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
+		 * 0xC30D. These registers are used to talk to the SFP+
+		 * module's EEPROM through the SDA/SCL (I2C) interface.
+		 */
+		sfp_addr = (dev_addr << 8) + byte_offset;
+		sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
+		hw->phy.ops.write_reg_mdi(hw,
+					  IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
+					  IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+					  sfp_addr);
+
+		/* Poll status */
+		for (i = 0; i < 100; i++) {
+			hw->phy.ops.read_reg_mdi(hw,
+						IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
+						IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+						&sfp_stat);
+			sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
+			if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
+				break;
+			msec_delay(10);
+		}
+
+		if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
+			DEBUGOUT("EEPROM read did not pass.\n");
+			status = IXGBE_ERR_SFP_NOT_PRESENT;
+			goto out;
+		}
+
+		/* Read data */
+		hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
+					IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
+
+		*eeprom_data = (u8)(sfp_data >> 8);
+	} else {
+		status = IXGBE_ERR_PHY;
+	}
+
+out:
+	hw->mac.ops.release_swfw_sync(hw, gssr);
+	return status;
+}
+
+/**
+ *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: EEPROM byte offset to read
+ *  @eeprom_data: value read
+ *
+ *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+				u8 *eeprom_data)
+{
+	return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
+					byte_offset, eeprom_data);
+}
+
+/**
+ *  ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset at address 0xA2
+ *  @eeprom_data: value read
+ *
+ *  Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+					u8 *sff8472_data)
+{
+	return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
+					byte_offset, sff8472_data);
+}
+
+/**
+ *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
+{
+	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
+	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+	u16 ext_ability = 0;
+
+	DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
+
+	hw->phy.ops.identify(hw);
+
+	/* Copper PHY must be checked before AUTOC LMS to determine correct
+	 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
+	switch (hw->phy.type) {
+	case ixgbe_phy_tn:
+	case ixgbe_phy_cu_unknown:
+		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+		goto out;
+	default:
+		break;
+	}
+
+	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+	case IXGBE_AUTOC_LMS_1G_AN:
+	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
+			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+		else
+			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
+		break;
+	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+		if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
+			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
+		else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
+			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+		else /* XAUI */
+			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+		break;
+	case IXGBE_AUTOC_LMS_KX4_AN:
+	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
+		if (autoc & IXGBE_AUTOC_KX_SUPP)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+		if (autoc & IXGBE_AUTOC_KX4_SUPP)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+		break;
+	default:
+		break;
+	}
+
+	if (hw->phy.type == ixgbe_phy_nl) {
+		hw->phy.ops.identify_sfp(hw);
+
+		switch (hw->phy.sfp_type) {
+		case ixgbe_sfp_type_da_cu:
+			physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+			break;
+		case ixgbe_sfp_type_sr:
+			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+			break;
+		case ixgbe_sfp_type_lr:
+			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+			break;
+		default:
+			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+			break;
+		}
+	}
+
+	switch (hw->device_id) {
+	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+		break;
+	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+		break;
+	case IXGBE_DEV_ID_82598EB_XF_LR:
+		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+		break;
+	default:
+		break;
+	}
+
+out:
+	return physical_layer;
+}
+
+/**
+ *  ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
+ *  port devices.
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls common function and corrects issue with some single port devices
+ *  that enable LAN1 but not LAN0.
+ **/
+void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
+{
+	struct ixgbe_bus_info *bus = &hw->bus;
+	u16 pci_gen = 0;
+	u16 pci_ctrl2 = 0;
+
+	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
+
+	ixgbe_set_lan_id_multi_port_pcie(hw);
+
+	/* check if LAN0 is disabled */
+	hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
+	if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
+
+		hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
+
+		/* if LAN0 is completely disabled force function to 0 */
+		if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
+		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
+		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
+
+			bus->func = 0;
+		}
+	}
+}
+
+/**
+ *  ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
+ *  @hw: pointer to hardware structure
+ *
+ **/
+void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
+{
+	u32 regval;
+	u32 i;
+
+	DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
+
+	/* Enable relaxed ordering */
+	for (i = 0; ((i < hw->mac.max_tx_queues) &&
+	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
+	}
+
+	for (i = 0; ((i < hw->mac.max_rx_queues) &&
+	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
+		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+	}
+
+}
+
+/**
+ * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
+				  u32 headroom, int strategy)
+{
+	u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
+	u8 i = 0;
+	UNREFERENCED_1PARAMETER(headroom);
+
+	if (!num_pb)
+		return;
+
+	/* Setup Rx packet buffer sizes */
+	switch (strategy) {
+	case PBA_STRATEGY_WEIGHTED:
+		/* Setup the first four at 80KB */
+		rxpktsize = IXGBE_RXPBSIZE_80KB;
+		for (; i < 4; i++)
+			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+		/* Setup the last four at 48KB...don't re-init i */
+		rxpktsize = IXGBE_RXPBSIZE_48KB;
+		/* Fall Through */
+	case PBA_STRATEGY_EQUAL:
+	default:
+		/* Divide the remaining Rx packet buffer evenly among the TCs */
+		for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
+			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+		break;
+	}
+
+	/* Setup Tx packet buffer sizes */
+	for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
+		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
+}
+
+/**
+ *  ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit
+ *  @hw: pointer to hardware structure
+ *  @regval: register value to write to RXCTRL
+ *
+ *  Enables the Rx DMA unit
+ **/
+s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval)
+{
+	DEBUGFUNC("ixgbe_enable_rx_dma_82598");
+
+	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
+
+	return IXGBE_SUCCESS;
+}
diff --git a/drivers/net/ixgbe/base/ixgbe_82598.h b/drivers/net/ixgbe/base/ixgbe_82598.h
new file mode 100644
index 0000000..4000486
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_82598.h
@@ -0,0 +1,52 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_82598_H_
+#define _IXGBE_82598_H_
+
+u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
+s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
+void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
+s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+				u8 *eeprom_data);
+u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
+void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
+void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval);
+#endif /* _IXGBE_82598_H_ */
diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c
new file mode 100644
index 0000000..239b833
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_82599.c
@@ -0,0 +1,2713 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_type.h"
+#include "ixgbe_82599.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+#define IXGBE_82599_MAX_TX_QUEUES 128
+#define IXGBE_82599_MAX_RX_QUEUES 128
+#define IXGBE_82599_RAR_ENTRIES   128
+#define IXGBE_82599_MC_TBL_SIZE   128
+#define IXGBE_82599_VFT_TBL_SIZE  128
+#define IXGBE_82599_RX_PB_SIZE	  512
+
+STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+					 ixgbe_link_speed speed,
+					 bool autoneg_wait_to_complete);
+STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
+				   u16 offset, u16 *data);
+STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
+					  u16 words, u16 *data);
+STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+					u8 dev_addr, u8 *data);
+STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+					u8 dev_addr, u8 data);
+
+void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mac_info *mac = &hw->mac;
+
+	DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
+
+	/*
+	 * enable the laser control functions for SFP+ fiber
+	 * and MNG not enabled
+	 */
+	if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+	    !ixgbe_mng_enabled(hw)) {
+		mac->ops.disable_tx_laser =
+				       ixgbe_disable_tx_laser_multispeed_fiber;
+		mac->ops.enable_tx_laser =
+					ixgbe_enable_tx_laser_multispeed_fiber;
+		mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber;
+
+	} else {
+		mac->ops.disable_tx_laser = NULL;
+		mac->ops.enable_tx_laser = NULL;
+		mac->ops.flap_tx_laser = NULL;
+	}
+
+	if (hw->phy.multispeed_fiber) {
+		/* Set up dual speed SFP+ support */
+		mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
+	} else {
+		if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
+		     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
+		      hw->phy.smart_speed == ixgbe_smart_speed_on) &&
+		      !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+			mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed;
+		} else {
+			mac->ops.setup_link = ixgbe_setup_mac_link_82599;
+		}
+	}
+}
+
+/**
+ *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize any function pointers that were not able to be
+ *  set during init_shared_code because the PHY/SFP type was
+ *  not known.  Perform the SFP init if necessary.
+ *
+ **/
+s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mac_info *mac = &hw->mac;
+	struct ixgbe_phy_info *phy = &hw->phy;
+	s32 ret_val = IXGBE_SUCCESS;
+	u32 esdp;
+
+	DEBUGFUNC("ixgbe_init_phy_ops_82599");
+
+	if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
+		/* Store flag indicating I2C bus access control unit. */
+		hw->phy.qsfp_shared_i2c_bus = TRUE;
+
+		/* Initialize access to QSFP+ I2C bus */
+		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+		esdp |= IXGBE_ESDP_SDP0_DIR;
+		esdp &= ~IXGBE_ESDP_SDP1_DIR;
+		esdp &= ~IXGBE_ESDP_SDP0;
+		esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
+		esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
+		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+		IXGBE_WRITE_FLUSH(hw);
+
+		phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599;
+		phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599;
+	}
+	/* Identify the PHY or SFP module */
+	ret_val = phy->ops.identify(hw);
+	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
+		goto init_phy_ops_out;
+
+	/* Setup function pointers based on detected SFP module and speeds */
+	ixgbe_init_mac_link_ops_82599(hw);
+	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
+		hw->phy.ops.reset = NULL;
+
+	/* If copper media, overwrite with copper function pointers */
+	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
+		mac->ops.setup_link = ixgbe_setup_copper_link_82599;
+		mac->ops.get_link_capabilities =
+				  ixgbe_get_copper_link_capabilities_generic;
+	}
+
+	/* Set necessary function pointers based on PHY type */
+	switch (hw->phy.type) {
+	case ixgbe_phy_tn:
+		phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
+		phy->ops.check_link = ixgbe_check_phy_link_tnx;
+		phy->ops.get_firmware_version =
+			     ixgbe_get_phy_firmware_version_tnx;
+		break;
+	default:
+		break;
+	}
+init_phy_ops_out:
+	return ret_val;
+}
+
+s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
+{
+	s32 ret_val = IXGBE_SUCCESS;
+	u16 list_offset, data_offset, data_value;
+
+	DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
+
+	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
+		ixgbe_init_mac_link_ops_82599(hw);
+
+		hw->phy.ops.reset = NULL;
+
+		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
+							      &data_offset);
+		if (ret_val != IXGBE_SUCCESS)
+			goto setup_sfp_out;
+
+		/* PHY config will finish before releasing the semaphore */
+		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+							IXGBE_GSSR_MAC_CSR_SM);
+		if (ret_val != IXGBE_SUCCESS) {
+			ret_val = IXGBE_ERR_SWFW_SYNC;
+			goto setup_sfp_out;
+		}
+
+		if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
+			goto setup_sfp_err;
+		while (data_value != 0xffff) {
+			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
+			IXGBE_WRITE_FLUSH(hw);
+			if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
+				goto setup_sfp_err;
+		}
+
+		/* Release the semaphore */
+		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+		/* Delay obtaining semaphore again to allow FW access
+		 * prot_autoc_write uses the semaphore too.
+		 */
+		msec_delay(hw->eeprom.semaphore_delay);
+
+		/* Restart DSP and set SFI mode */
+		ret_val = hw->mac.ops.prot_autoc_write(hw,
+			hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
+			false);
+
+		if (ret_val) {
+			DEBUGOUT("sfp module setup not complete\n");
+			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
+			goto setup_sfp_out;
+		}
+
+	}
+
+setup_sfp_out:
+	return ret_val;
+
+setup_sfp_err:
+	/* Release the semaphore */
+	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+	/* Delay obtaining semaphore again to allow FW access */
+	msec_delay(hw->eeprom.semaphore_delay);
+	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+		      "eeprom read at offset %d failed", data_offset);
+	return IXGBE_ERR_PHY;
+}
+
+/**
+ *  prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
+ *  @hw: pointer to hardware structure
+ *  @locked: Return the if we locked for this read.
+ *  @reg_val: Value we read from AUTOC
+ *
+ *  For this part (82599) we need to wrap read-modify-writes with a possible
+ *  FW/SW lock.  It is assumed this lock will be freed with the next
+ *  prot_autoc_write_82599().
+ */
+s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
+{
+	s32 ret_val;
+
+	*locked = false;
+	 /* If LESM is on then we need to hold the SW/FW semaphore. */
+	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+					IXGBE_GSSR_MAC_CSR_SM);
+		if (ret_val != IXGBE_SUCCESS)
+			return IXGBE_ERR_SWFW_SYNC;
+
+		*locked = true;
+	}
+
+	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
+ * @hw: pointer to hardware structure
+ * @reg_val: value to write to AUTOC
+ * @locked: bool to indicate whether the SW/FW lock was already taken by
+ *           previous proc_autoc_read_82599.
+ *
+ * This part (82599) may need to hold the SW/FW lock around all writes to
+ * AUTOC. Likewise after a write we need to do a pipeline reset.
+ */
+s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
+{
+	s32 ret_val = IXGBE_SUCCESS;
+
+	/* Blocked by MNG FW so bail */
+	if (ixgbe_check_reset_blocked(hw))
+		goto out;
+
+	/* We only need to get the lock if:
+	 *  - We didn't do it already (in the read part of a read-modify-write)
+	 *  - LESM is enabled.
+	 */
+	if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+					IXGBE_GSSR_MAC_CSR_SM);
+		if (ret_val != IXGBE_SUCCESS)
+			return IXGBE_ERR_SWFW_SYNC;
+
+		locked = true;
+	}
+
+	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+	ret_val = ixgbe_reset_pipeline_82599(hw);
+
+out:
+	/* Free the SW/FW semaphore as we either grabbed it here or
+	 * already had it when this function was called.
+	 */
+	if (locked)
+		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_init_ops_82599 - Inits func ptrs and MAC type
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the function pointers and assign the MAC type for 82599.
+ *  Does not touch the hardware.
+ **/
+
+s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mac_info *mac = &hw->mac;
+	struct ixgbe_phy_info *phy = &hw->phy;
+	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+	s32 ret_val;
+
+	DEBUGFUNC("ixgbe_init_ops_82599");
+
+	ixgbe_init_phy_ops_generic(hw);
+	ret_val = ixgbe_init_ops_generic(hw);
+
+	/* PHY */
+	phy->ops.identify = ixgbe_identify_phy_82599;
+	phy->ops.init = ixgbe_init_phy_ops_82599;
+
+	/* MAC */
+	mac->ops.reset_hw = ixgbe_reset_hw_82599;
+	mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
+	mac->ops.get_media_type = ixgbe_get_media_type_82599;
+	mac->ops.get_supported_physical_layer =
+				    ixgbe_get_supported_physical_layer_82599;
+	mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
+	mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
+	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599;
+	mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599;
+	mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599;
+	mac->ops.start_hw = ixgbe_start_hw_82599;
+	mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
+	mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
+	mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
+	mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
+	mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
+	mac->ops.prot_autoc_read = prot_autoc_read_82599;
+	mac->ops.prot_autoc_write = prot_autoc_write_82599;
+
+	/* RAR, Multicast, VLAN */
+	mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
+	mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
+	mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
+	mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
+	mac->rar_highwater = 1;
+	mac->ops.set_vfta = ixgbe_set_vfta_generic;
+	mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
+	mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
+	mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
+	mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599;
+	mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
+	mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
+
+	/* Link */
+	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599;
+	mac->ops.check_link = ixgbe_check_mac_link_generic;
+	mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
+	ixgbe_init_mac_link_ops_82599(hw);
+
+	mac->mcft_size		= IXGBE_82599_MC_TBL_SIZE;
+	mac->vft_size		= IXGBE_82599_VFT_TBL_SIZE;
+	mac->num_rar_entries	= IXGBE_82599_RAR_ENTRIES;
+	mac->rx_pb_size		= IXGBE_82599_RX_PB_SIZE;
+	mac->max_rx_queues	= IXGBE_82599_MAX_RX_QUEUES;
+	mac->max_tx_queues	= IXGBE_82599_MAX_TX_QUEUES;
+	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
+
+	mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
+				   IXGBE_FWSM_MODE_MASK) ? true : false;
+
+	hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
+
+	/* EEPROM */
+	eeprom->ops.read = ixgbe_read_eeprom_82599;
+	eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599;
+
+	/* Manageability interface */
+	mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
+
+	mac->ops.get_thermal_sensor_data =
+					 ixgbe_get_thermal_sensor_data_generic;
+	mac->ops.init_thermal_sensor_thresh =
+				      ixgbe_init_thermal_sensor_thresh_generic;
+
+	mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @autoneg: true when autoneg or autotry is enabled
+ *
+ *  Determines the link capabilities by reading the AUTOC register.
+ **/
+s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+				      ixgbe_link_speed *speed,
+				      bool *autoneg)
+{
+	s32 status = IXGBE_SUCCESS;
+	u32 autoc = 0;
+
+	DEBUGFUNC("ixgbe_get_link_capabilities_82599");
+
+
+	/* Check if 1G SFP module. */
+	if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
+	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
+		*speed = IXGBE_LINK_SPEED_1GB_FULL;
+		*autoneg = true;
+		goto out;
+	}
+
+	/*
+	 * Determine link capabilities based on the stored value of AUTOC,
+	 * which represents EEPROM defaults.  If AUTOC value has not
+	 * been stored, use the current register values.
+	 */
+	if (hw->mac.orig_link_settings_stored)
+		autoc = hw->mac.orig_autoc;
+	else
+		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+		*speed = IXGBE_LINK_SPEED_1GB_FULL;
+		*autoneg = false;
+		break;
+
+	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+		*speed = IXGBE_LINK_SPEED_10GB_FULL;
+		*autoneg = false;
+		break;
+
+	case IXGBE_AUTOC_LMS_1G_AN:
+		*speed = IXGBE_LINK_SPEED_1GB_FULL;
+		*autoneg = true;
+		break;
+
+	case IXGBE_AUTOC_LMS_10G_SERIAL:
+		*speed = IXGBE_LINK_SPEED_10GB_FULL;
+		*autoneg = false;
+		break;
+
+	case IXGBE_AUTOC_LMS_KX4_KX_KR:
+	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
+		*speed = IXGBE_LINK_SPEED_UNKNOWN;
+		if (autoc & IXGBE_AUTOC_KR_SUPP)
+			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
+		if (autoc & IXGBE_AUTOC_KX4_SUPP)
+			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
+		if (autoc & IXGBE_AUTOC_KX_SUPP)
+			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
+		*autoneg = true;
+		break;
+
+	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
+		*speed = IXGBE_LINK_SPEED_100_FULL;
+		if (autoc & IXGBE_AUTOC_KR_SUPP)
+			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
+		if (autoc & IXGBE_AUTOC_KX4_SUPP)
+			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
+		if (autoc & IXGBE_AUTOC_KX_SUPP)
+			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
+		*autoneg = true;
+		break;
+
+	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
+		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
+		*autoneg = false;
+		break;
+
+	default:
+		status = IXGBE_ERR_LINK_SETUP;
+		goto out;
+		break;
+	}
+
+	if (hw->phy.multispeed_fiber) {
+		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
+			  IXGBE_LINK_SPEED_1GB_FULL;
+
+		/* QSFP must not enable full auto-negotiation
+		 * Limited autoneg is enabled at 1G
+		 */
+		if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
+			*autoneg = false;
+		else
+			*autoneg = true;
+	}
+
+out:
+	return status;
+}
+
+/**
+ *  ixgbe_get_media_type_82599 - Get media type
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the media type (fiber, copper, backplane)
+ **/
+enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
+{
+	enum ixgbe_media_type media_type;
+
+	DEBUGFUNC("ixgbe_get_media_type_82599");
+
+	/* Detect if there is a copper PHY attached. */
+	switch (hw->phy.type) {
+	case ixgbe_phy_cu_unknown:
+	case ixgbe_phy_tn:
+		media_type = ixgbe_media_type_copper;
+		goto out;
+	default:
+		break;
+	}
+
+	switch (hw->device_id) {
+	case IXGBE_DEV_ID_82599_KX4:
+	case IXGBE_DEV_ID_82599_KX4_MEZZ:
+	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+	case IXGBE_DEV_ID_82599_KR:
+	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
+	case IXGBE_DEV_ID_82599_XAUI_LOM:
+		/* Default device ID is mezzanine card KX/KX4 */
+		media_type = ixgbe_media_type_backplane;
+		break;
+	case IXGBE_DEV_ID_82599_SFP:
+	case IXGBE_DEV_ID_82599_SFP_FCOE:
+	case IXGBE_DEV_ID_82599_SFP_EM:
+	case IXGBE_DEV_ID_82599_SFP_SF2:
+	case IXGBE_DEV_ID_82599_SFP_SF_QP:
+	case IXGBE_DEV_ID_82599EN_SFP:
+		media_type = ixgbe_media_type_fiber;
+		break;
+	case IXGBE_DEV_ID_82599_CX4:
+		media_type = ixgbe_media_type_cx4;
+		break;
+	case IXGBE_DEV_ID_82599_T3_LOM:
+		media_type = ixgbe_media_type_copper;
+		break;
+	case IXGBE_DEV_ID_82599_LS:
+		media_type = ixgbe_media_type_fiber_lco;
+		break;
+	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+		media_type = ixgbe_media_type_fiber_qsfp;
+		break;
+	default:
+		media_type = ixgbe_media_type_unknown;
+		break;
+	}
+out:
+	return media_type;
+}
+
+/**
+ *  ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
+ *  @hw: pointer to hardware structure
+ *
+ *  Disables link during D3 power down sequence.
+ *
+ **/
+void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
+{
+	u32 autoc2_reg;
+	u16 ee_ctrl_2 = 0;
+
+	DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
+	ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
+
+	if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
+	    ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
+		autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+		autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
+		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
+	}
+}
+
+/**
+ *  ixgbe_start_mac_link_82599 - Setup MAC link settings
+ *  @hw: pointer to hardware structure
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ *  Configures link settings based on values in the ixgbe_hw struct.
+ *  Restarts the link.  Performs autonegotiation if needed.
+ **/
+s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+			       bool autoneg_wait_to_complete)
+{
+	u32 autoc_reg;
+	u32 links_reg;
+	u32 i;
+	s32 status = IXGBE_SUCCESS;
+	bool got_lock = false;
+
+	DEBUGFUNC("ixgbe_start_mac_link_82599");
+
+
+	/*  reset_pipeline requires us to hold this lock as it writes to
+	 *  AUTOC.
+	 */
+	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+		status = hw->mac.ops.acquire_swfw_sync(hw,
+						       IXGBE_GSSR_MAC_CSR_SM);
+		if (status != IXGBE_SUCCESS)
+			goto out;
+
+		got_lock = true;
+	}
+
+	/* Restart link */
+	ixgbe_reset_pipeline_82599(hw);
+
+	if (got_lock)
+		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
+	/* Only poll for autoneg to complete if specified to do so */
+	if (autoneg_wait_to_complete) {
+		autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
+		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+		     IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+		     IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+			links_reg = 0; /* Just in case Autoneg time = 0 */
+			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
+				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
+					break;
+				msec_delay(100);
+			}
+			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+				DEBUGOUT("Autoneg did not complete.\n");
+			}
+		}
+	}
+
+	/* Add delay to filter out noises during initial link setup */
+	msec_delay(50);
+
+out:
+	return status;
+}
+
+/**
+ *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
+ *  @hw: pointer to hardware structure
+ *
+ *  The base drivers may require better control over SFP+ module
+ *  PHY states.  This includes selectively shutting down the Tx
+ *  laser on the PHY, effectively halting physical link.
+ **/
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+	/* Blocked by MNG FW so bail */
+	if (ixgbe_check_reset_blocked(hw))
+		return;
+
+	/* Disable Tx laser; allow 100us to go dark per spec */
+	esdp_reg |= IXGBE_ESDP_SDP3;
+	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+	IXGBE_WRITE_FLUSH(hw);
+	usec_delay(100);
+}
+
+/**
+ *  ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
+ *  @hw: pointer to hardware structure
+ *
+ *  The base drivers may require better control over SFP+ module
+ *  PHY states.  This includes selectively turning on the Tx
+ *  laser on the PHY, effectively starting physical link.
+ **/
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+	/* Enable Tx laser; allow 100ms to light up */
+	esdp_reg &= ~IXGBE_ESDP_SDP3;
+	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+	IXGBE_WRITE_FLUSH(hw);
+	msec_delay(100);
+}
+
+/**
+ *  ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
+ *  @hw: pointer to hardware structure
+ *
+ *  When the driver changes the link speeds that it can support,
+ *  it sets autotry_restart to true to indicate that we need to
+ *  initiate a new autotry session with the link partner.  To do
+ *  so, we set the speed then disable and re-enable the Tx laser, to
+ *  alert the link partner that it also needs to restart autotry on its
+ *  end.  This is consistent with true clause 37 autoneg, which also
+ *  involves a loss of signal.
+ **/
+void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+	DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
+
+	/* Blocked by MNG FW so bail */
+	if (ixgbe_check_reset_blocked(hw))
+		return;
+
+	if (hw->mac.autotry_restart) {
+		ixgbe_disable_tx_laser_multispeed_fiber(hw);
+		ixgbe_enable_tx_laser_multispeed_fiber(hw);
+		hw->mac.autotry_restart = false;
+	}
+}
+
+
+/**
+ *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ *  Set the link speed in the AUTOC register and restarts link.
+ **/
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+				     ixgbe_link_speed speed,
+				     bool autoneg_wait_to_complete)
+{
+	s32 status = IXGBE_SUCCESS;
+	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+	u32 speedcnt = 0;
+	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+	u32 i = 0;
+	bool autoneg, link_up = false;
+
+	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
+
+	/* Mask off requested but non-supported speeds */
+	status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	speed &= link_speed;
+
+	/*
+	 * Try each speed one by one, highest priority first.  We do this in
+	 * software because 10gb fiber doesn't support speed autonegotiation.
+	 */
+	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+		speedcnt++;
+		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+		/* If we already have link at this speed, just jump out */
+		status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+		if (status != IXGBE_SUCCESS)
+			return status;
+
+		if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
+			goto out;
+
+		/* Set the module link speed */
+		switch (hw->phy.media_type) {
+		case ixgbe_media_type_fiber:
+			esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
+			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+			IXGBE_WRITE_FLUSH(hw);
+			break;
+		case ixgbe_media_type_fiber_qsfp:
+			/* QSFP module automatically detects MAC link speed */
+			break;
+		default:
+			DEBUGOUT("Unexpected media type.\n");
+			break;
+		}
+
+		/* Allow module to change analog characteristics (1G->10G) */
+		msec_delay(40);
+
+		status = ixgbe_setup_mac_link_82599(hw,
+						    IXGBE_LINK_SPEED_10GB_FULL,
+						    autoneg_wait_to_complete);
+		if (status != IXGBE_SUCCESS)
+			return status;
+
+		/* Flap the tx laser if it has not already been done */
+		ixgbe_flap_tx_laser(hw);
+
+		/*
+		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
+		 * Section 73.10.2, we may have to wait up to 500ms if KR is
+		 * attempted.  82599 uses the same timing for 10g SFI.
+		 */
+		for (i = 0; i < 5; i++) {
+			/* Wait for the link partner to also set speed */
+			msec_delay(100);
+
+			/* If we have link, just jump out */
+			status = ixgbe_check_link(hw, &link_speed,
+						  &link_up, false);
+			if (status != IXGBE_SUCCESS)
+				return status;
+
+			if (link_up)
+				goto out;
+		}
+	}
+
+	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+		speedcnt++;
+		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
+			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+		/* If we already have link at this speed, just jump out */
+		status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+		if (status != IXGBE_SUCCESS)
+			return status;
+
+		if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
+			goto out;
+
+		/* Set the module link speed */
+		switch (hw->phy.media_type) {
+		case ixgbe_media_type_fiber:
+			esdp_reg &= ~IXGBE_ESDP_SDP5;
+			esdp_reg |= IXGBE_ESDP_SDP5_DIR;
+			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+			IXGBE_WRITE_FLUSH(hw);
+			break;
+		case ixgbe_media_type_fiber_qsfp:
+			/* QSFP module automatically detects link speed */
+			break;
+		default:
+			DEBUGOUT("Unexpected media type.\n");
+			break;
+		}
+
+		/* Allow module to change analog characteristics (10G->1G) */
+		msec_delay(40);
+
+		status = ixgbe_setup_mac_link_82599(hw,
+						    IXGBE_LINK_SPEED_1GB_FULL,
+						    autoneg_wait_to_complete);
+		if (status != IXGBE_SUCCESS)
+			return status;
+
+		/* Flap the Tx laser if it has not already been done */
+		ixgbe_flap_tx_laser(hw);
+
+		/* Wait for the link partner to also set speed */
+		msec_delay(100);
+
+		/* If we have link, just jump out */
+		status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+		if (status != IXGBE_SUCCESS)
+			return status;
+
+		if (link_up)
+			goto out;
+	}
+
+	/*
+	 * We didn't get link.  Configure back to the highest speed we tried,
+	 * (if there was more than one).  We call ourselves back with just the
+	 * single highest speed that the user requested.
+	 */
+	if (speedcnt > 1)
+		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
+			highest_link_speed, autoneg_wait_to_complete);
+
+out:
+	/* Set autoneg_advertised value based on input link speed */
+	hw->phy.autoneg_advertised = 0;
+
+	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+	return status;
+}
+
+/**
+ *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ *  Implements the Intel SmartSpeed algorithm.
+ **/
+s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+				    ixgbe_link_speed speed,
+				    bool autoneg_wait_to_complete)
+{
+	s32 status = IXGBE_SUCCESS;
+	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+	s32 i, j;
+	bool link_up = false;
+	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+	DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
+
+	 /* Set autoneg_advertised value based on input link speed */
+	hw->phy.autoneg_advertised = 0;
+
+	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+	if (speed & IXGBE_LINK_SPEED_100_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+
+	/*
+	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
+	 * autoneg advertisement if link is unable to be established at the
+	 * highest negotiated rate.  This can sometimes happen due to integrity
+	 * issues with the physical media connection.
+	 */
+
+	/* First, try to get link with full advertisement */
+	hw->phy.smart_speed_active = false;
+	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
+		status = ixgbe_setup_mac_link_82599(hw, speed,
+						    autoneg_wait_to_complete);
+		if (status != IXGBE_SUCCESS)
+			goto out;
+
+		/*
+		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
+		 * Section 73.10.2, we may have to wait up to 500ms if KR is
+		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
+		 * Table 9 in the AN MAS.
+		 */
+		for (i = 0; i < 5; i++) {
+			msec_delay(100);
+
+			/* If we have link, just jump out */
+			status = ixgbe_check_link(hw, &link_speed, &link_up,
+						  false);
+			if (status != IXGBE_SUCCESS)
+				goto out;
+
+			if (link_up)
+				goto out;
+		}
+	}
+
+	/*
+	 * We didn't get link.  If we advertised KR plus one of KX4/KX
+	 * (or BX4/BX), then disable KR and try again.
+	 */
+	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
+	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
+		goto out;
+
+	/* Turn SmartSpeed on to disable KR support */
+	hw->phy.smart_speed_active = true;
+	status = ixgbe_setup_mac_link_82599(hw, speed,
+					    autoneg_wait_to_complete);
+	if (status != IXGBE_SUCCESS)
+		goto out;
+
+	/*
+	 * Wait for the controller to acquire link.  600ms will allow for
+	 * the AN link_fail_inhibit_timer as well for multiple cycles of
+	 * parallel detect, both 10g and 1g. This allows for the maximum
+	 * connect attempts as defined in the AN MAS table 73-7.
+	 */
+	for (i = 0; i < 6; i++) {
+		msec_delay(100);
+
+		/* If we have link, just jump out */
+		status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+		if (status != IXGBE_SUCCESS)
+			goto out;
+
+		if (link_up)
+			goto out;
+	}
+
+	/* We didn't get link.  Turn SmartSpeed back off. */
+	hw->phy.smart_speed_active = false;
+	status = ixgbe_setup_mac_link_82599(hw, speed,
+					    autoneg_wait_to_complete);
+
+out:
+	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
+		DEBUGOUT("Smartspeed has downgraded the link speed "
+		"from the maximum advertised\n");
+	return status;
+}
+
+/**
+ *  ixgbe_setup_mac_link_82599 - Set MAC link speed
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ *  Set the link speed in the AUTOC register and restarts link.
+ **/
+s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+			       ixgbe_link_speed speed,
+			       bool autoneg_wait_to_complete)
+{
+	bool autoneg = false;
+	s32 status = IXGBE_SUCCESS;
+	u32 pma_pmd_1g, link_mode;
+	u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */
+	u32 orig_autoc = 0; /* holds the cached value of AUTOC register */
+	u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */
+	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
+	u32 links_reg;
+	u32 i;
+	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+
+	DEBUGFUNC("ixgbe_setup_mac_link_82599");
+
+	/* Check to see if speed passed in is supported. */
+	status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
+	if (status)
+		goto out;
+
+	speed &= link_capabilities;
+
+	if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
+		status = IXGBE_ERR_LINK_SETUP;
+		goto out;
+	}
+
+	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
+	if (hw->mac.orig_link_settings_stored)
+		orig_autoc = hw->mac.orig_autoc;
+	else
+		orig_autoc = autoc;
+
+	link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
+	pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+
+	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
+	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+		/* Set KX4/KX/KR support according to speed requested */
+		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
+		if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
+				autoc |= IXGBE_AUTOC_KX4_SUPP;
+			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
+			    (hw->phy.smart_speed_active == false))
+				autoc |= IXGBE_AUTOC_KR_SUPP;
+		}
+		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+			autoc |= IXGBE_AUTOC_KX_SUPP;
+	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
+		   (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
+		    link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
+		/* Switch from 1G SFI to 10G SFI if requested */
+		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
+		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
+			autoc &= ~IXGBE_AUTOC_LMS_MASK;
+			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
+		}
+	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
+		   (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
+		/* Switch from 10G SFI to 1G SFI if requested */
+		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
+		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
+			autoc &= ~IXGBE_AUTOC_LMS_MASK;
+			if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel)
+				autoc |= IXGBE_AUTOC_LMS_1G_AN;
+			else
+				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
+		}
+	}
+
+	if (autoc != current_autoc) {
+		/* Restart link */
+		status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
+		if (status != IXGBE_SUCCESS)
+			goto out;
+
+		/* Only poll for autoneg to complete if specified to do so */
+		if (autoneg_wait_to_complete) {
+			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
+			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+				links_reg = 0; /*Just in case Autoneg time=0*/
+				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
+					links_reg =
+					       IXGBE_READ_REG(hw, IXGBE_LINKS);
+					if (links_reg & IXGBE_LINKS_KX_AN_COMP)
+						break;
+					msec_delay(100);
+				}
+				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+					status =
+						IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+					DEBUGOUT("Autoneg did not complete.\n");
+				}
+			}
+		}
+
+		/* Add delay to filter out noises during initial link setup */
+		msec_delay(50);
+	}
+
+out:
+	return status;
+}
+
+/**
+ *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg_wait_to_complete: true if waiting is needed to complete
+ *
+ *  Restarts link on PHY and MAC based on settings passed in.
+ **/
+STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+					 ixgbe_link_speed speed,
+					 bool autoneg_wait_to_complete)
+{
+	s32 status;
+
+	DEBUGFUNC("ixgbe_setup_copper_link_82599");
+
+	/* Setup the PHY according to input speed */
+	status = hw->phy.ops.setup_link_speed(hw, speed,
+					      autoneg_wait_to_complete);
+	/* Set up MAC */
+	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
+
+	return status;
+}
+
+/**
+ *  ixgbe_reset_hw_82599 - Perform hardware reset
+ *  @hw: pointer to hardware structure
+ *
+ *  Resets the hardware by resetting the transmit and receive units, masks
+ *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ *  reset.
+ **/
+s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
+{
+	ixgbe_link_speed link_speed;
+	s32 status;
+	u32 ctrl = 0;
+	u32 i, autoc, autoc2;
+	u32 curr_lms;
+	bool link_up = false;
+
+	DEBUGFUNC("ixgbe_reset_hw_82599");
+
+	/* Call adapter stop to disable tx/rx and clear interrupts */
+	status = hw->mac.ops.stop_adapter(hw);
+	if (status != IXGBE_SUCCESS)
+		goto reset_hw_out;
+
+	/* flush pending Tx transactions */
+	ixgbe_clear_tx_pending(hw);
+
+	/* PHY ops must be identified and initialized prior to reset */
+
+	/* Identify PHY and related function pointers */
+	status = hw->phy.ops.init(hw);
+
+	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+		goto reset_hw_out;
+
+	/* Setup SFP module if there is one present. */
+	if (hw->phy.sfp_setup_needed) {
+		status = hw->mac.ops.setup_sfp(hw);
+		hw->phy.sfp_setup_needed = false;
+	}
+
+	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+		goto reset_hw_out;
+
+	/* Reset PHY */
+	if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
+		hw->phy.ops.reset(hw);
+
+	/* remember AUTOC from before we reset */
+	curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
+
+mac_reset_top:
+	/*
+	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
+	 * If link reset is used when link is up, it might reset the PHY when
+	 * mng is using it.  If link is down or the flag to force full link
+	 * reset is set, then perform link reset.
+	 */
+	ctrl = IXGBE_CTRL_LNK_RST;
+	if (!hw->force_full_reset) {
+		hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+		if (link_up)
+			ctrl = IXGBE_CTRL_RST;
+	}
+
+	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* Poll for reset bit to self-clear meaning reset is complete */
+	for (i = 0; i < 10; i++) {
+		usec_delay(1);
+		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+		if (!(ctrl & IXGBE_CTRL_RST_MASK))
+			break;
+	}
+
+	if (ctrl & IXGBE_CTRL_RST_MASK) {
+		status = IXGBE_ERR_RESET_FAILED;
+		DEBUGOUT("Reset polling failed to complete.\n");
+	}
+
+	msec_delay(50);
+
+	/*
+	 * Double resets are required for recovery from certain error
+	 * conditions.  Between resets, it is necessary to stall to
+	 * allow time for any pending HW events to complete.
+	 */
+	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+		goto mac_reset_top;
+	}
+
+	/*
+	 * Store the original AUTOC/AUTOC2 values if they have not been
+	 * stored off yet.  Otherwise restore the stored original
+	 * values since the reset operation sets back to defaults.
+	 */
+	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+
+	/* Enable link if disabled in NVM */
+	if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
+		autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
+		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
+		IXGBE_WRITE_FLUSH(hw);
+	}
+
+	if (hw->mac.orig_link_settings_stored == false) {
+		hw->mac.orig_autoc = autoc;
+		hw->mac.orig_autoc2 = autoc2;
+		hw->mac.orig_link_settings_stored = true;
+	} else {
+
+		/* If MNG FW is running on a multi-speed device that
+		 * doesn't autoneg with out driver support we need to
+		 * leave LMS in the state it was before we MAC reset.
+		 * Likewise if we support WoL we don't want change the
+		 * LMS state.
+		 */
+		if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
+		    hw->wol_enabled)
+			hw->mac.orig_autoc =
+				(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
+				curr_lms;
+
+		if (autoc != hw->mac.orig_autoc) {
+			status = hw->mac.ops.prot_autoc_write(hw,
+							hw->mac.orig_autoc,
+							false);
+			if (status != IXGBE_SUCCESS)
+				goto reset_hw_out;
+		}
+
+		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
+		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
+			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
+			autoc2 |= (hw->mac.orig_autoc2 &
+				   IXGBE_AUTOC2_UPPER_MASK);
+			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
+		}
+	}
+
+	/* Store the permanent mac address */
+	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+	/*
+	 * Store MAC address from RAR0, clear receive address registers, and
+	 * clear the multicast table.  Also reset num_rar_entries to 128,
+	 * since we modify this value when programming the SAN MAC address.
+	 */
+	hw->mac.num_rar_entries = 128;
+	hw->mac.ops.init_rx_addrs(hw);
+
+	/* Store the permanent SAN mac address */
+	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
+
+	/* Add the SAN MAC address to the RAR only if it's a valid address */
+	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
+				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+		/* Save the SAN MAC RAR index */
+		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+
+		/* Reserve the last RAR for the SAN MAC address */
+		hw->mac.num_rar_entries--;
+	}
+
+	/* Store the alternative WWNN/WWPN prefix */
+	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+				   &hw->mac.wwpn_prefix);
+
+reset_hw_out:
+	return status;
+}
+
+/**
+ * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
+ * @hw: pointer to hardware structure
+ * @fdircmd: current value of FDIRCMD register
+ */
+STATIC s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
+{
+	int i;
+
+	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
+		*fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
+		if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
+			return IXGBE_SUCCESS;
+		usec_delay(10);
+	}
+
+	return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
+}
+
+/**
+ *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
+ *  @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
+{
+	s32 err;
+	int i;
+	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+	u32 fdircmd;
+	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
+
+	DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
+
+	/*
+	 * Before starting reinitialization process,
+	 * FDIRCMD.CMD must be zero.
+	 */
+	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+	if (err) {
+		DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
+		return err;
+	}
+
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
+	IXGBE_WRITE_FLUSH(hw);
+	/*
+	 * 82599 adapters flow director init flow cannot be restarted,
+	 * Workaround 82599 silicon errata by performing the following steps
+	 * before re-writing the FDIRCTRL control register with the same value.
+	 * - write 1 to bit 8 of FDIRCMD register &
+	 * - write 0 to bit 8 of FDIRCMD register
+	 */
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
+			 IXGBE_FDIRCMD_CLEARHT));
+	IXGBE_WRITE_FLUSH(hw);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+			 ~IXGBE_FDIRCMD_CLEARHT));
+	IXGBE_WRITE_FLUSH(hw);
+	/*
+	 * Clear FDIR Hash register to clear any leftover hashes
+	 * waiting to be programmed.
+	 */
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
+	IXGBE_WRITE_FLUSH(hw);
+
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* Poll init-done after we write FDIRCTRL register */
+	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+				   IXGBE_FDIRCTRL_INIT_DONE)
+			break;
+		msec_delay(1);
+	}
+	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
+		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
+		return IXGBE_ERR_FDIR_REINIT_FAILED;
+	}
+
+	/* Clear FDIR statistics registers (read to clear) */
+	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
+	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
+	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+	IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+	IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
+ *  @hw: pointer to hardware structure
+ *  @fdirctrl: value to write to flow director control register
+ **/
+STATIC void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+{
+	int i;
+
+	DEBUGFUNC("ixgbe_fdir_enable_82599");
+
+	/* Prime the keys for hashing */
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
+
+	/*
+	 * Poll init-done after we write the register.  Estimated times:
+	 *      10G: PBALLOC = 11b, timing is 60us
+	 *       1G: PBALLOC = 11b, timing is 600us
+	 *     100M: PBALLOC = 11b, timing is 6ms
+	 *
+	 *     Multiple these timings by 4 if under full Rx load
+	 *
+	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
+	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
+	 * this might not finish in our poll time, but we can live with that
+	 * for now.
+	 */
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+	IXGBE_WRITE_FLUSH(hw);
+	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+				   IXGBE_FDIRCTRL_INIT_DONE)
+			break;
+		msec_delay(1);
+	}
+
+	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
+		DEBUGOUT("Flow Director poll time exceeded!\n");
+}
+
+/**
+ *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
+ *  @hw: pointer to hardware structure
+ *  @fdirctrl: value to write to flow director control register, initially
+ *	     contains just the value of the Rx packet buffer allocation
+ **/
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+{
+	DEBUGFUNC("ixgbe_init_fdir_signature_82599");
+
+	/*
+	 * Continue setup of fdirctrl register bits:
+	 *  Move the flexible bytes to use the ethertype - shift 6 words
+	 *  Set the maximum length per hash bucket to 0xA filters
+	 *  Send interrupt when 64 filters are left
+	 */
+	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
+		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+
+	/* write hashes and fdirctrl register, poll for completion */
+	ixgbe_fdir_enable_82599(hw, fdirctrl);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
+ *  @hw: pointer to hardware structure
+ *  @fdirctrl: value to write to flow director control register, initially
+ *	     contains just the value of the Rx packet buffer allocation
+ *  @cloud_mode: true - cloud mode, false - other mode
+ **/
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
+			bool cloud_mode)
+{
+	DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
+
+	/*
+	 * Continue setup of fdirctrl register bits:
+	 *  Turn perfect match filtering on
+	 *  Report hash in RSS field of Rx wb descriptor
+	 *  Initialize the drop queue
+	 *  Move the flexible bytes to use the ethertype - shift 6 words
+	 *  Set the maximum length per hash bucket to 0xA filters
+	 *  Send interrupt when 64 (0x4 * 16) filters are left
+	 */
+	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
+		    IXGBE_FDIRCTRL_REPORT_STATUS |
+		    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
+		    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
+		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+
+	if (cloud_mode)
+		fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD <<
+					IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
+
+	/* write hashes and fdirctrl register, poll for completion */
+	ixgbe_fdir_enable_82599(hw, fdirctrl);
+
+	return IXGBE_SUCCESS;
+}
+
+/*
+ * These defines allow us to quickly generate all of the necessary instructions
+ * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
+ * for values 0 through 15
+ */
+#define IXGBE_ATR_COMMON_HASH_KEY \
+		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
+#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
+do { \
+	u32 n = (_n); \
+	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+		common_hash ^= lo_hash_dword >> n; \
+	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+		bucket_hash ^= lo_hash_dword >> n; \
+	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+		sig_hash ^= lo_hash_dword << (16 - n); \
+	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+		common_hash ^= hi_hash_dword >> n; \
+	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+		bucket_hash ^= hi_hash_dword >> n; \
+	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+		sig_hash ^= hi_hash_dword << (16 - n); \
+} while (0)
+
+/**
+ *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
+ *  @stream: input bitstream to compute the hash on
+ *
+ *  This function is almost identical to the function above but contains
+ *  several optimizations such as unwinding all of the loops, letting the
+ *  compiler work out all of the conditional ifs since the keys are static
+ *  defines, and computing two keys at once since the hashed dword stream
+ *  will be the same for both keys.
+ **/
+u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+				     union ixgbe_atr_hash_dword common)
+{
+	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
+
+	/* record the flow_vm_vlan bits as they are a key part to the hash */
+	flow_vm_vlan = IXGBE_NTOHL(input.dword);
+
+	/* generate common hash dword */
+	hi_hash_dword = IXGBE_NTOHL(common.dword);
+
+	/* low dword is word swapped version of common */
+	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+	/* apply flow ID/VM pool/VLAN ID bits to hash words */
+	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+	/* Process bits 0 and 16 */
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
+
+	/*
+	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+	 * delay this because bit 0 of the stream should not be processed
+	 * so we do not add the VLAN until after bit 0 was processed
+	 */
+	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+	/* Process remaining 30 bit of the key */
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
+
+	/* combine common_hash result with signature and bucket hashes */
+	bucket_hash ^= common_hash;
+	bucket_hash &= IXGBE_ATR_HASH_MASK;
+
+	sig_hash ^= common_hash << 16;
+	sig_hash &= IXGBE_ATR_HASH_MASK << 16;
+
+	/* return completed signature hash */
+	return sig_hash ^ bucket_hash;
+}
+
+/**
+ *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
+ *  @hw: pointer to hardware structure
+ *  @input: unique input dword
+ *  @common: compressed common input dword
+ *  @queue: queue index to direct traffic to
+ *
+ * Note that the tunnel bit in input must not be set when the hardware
+ * tunneling support does not exist.
+ **/
+s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+					  union ixgbe_atr_hash_dword input,
+					  union ixgbe_atr_hash_dword common,
+					  u8 queue)
+{
+	u64 fdirhashcmd;
+	u8 flow_type;
+	bool tunnel;
+	u32 fdircmd;
+	s32 err;
+
+	DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
+
+	/*
+	 * Get the flow_type in order to program FDIRCMD properly
+	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
+	 * fifth is FDIRCMD.TUNNEL_FILTER
+	 */
+	tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
+	flow_type = input.formatted.flow_type &
+		    (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
+	switch (flow_type) {
+	case IXGBE_ATR_FLOW_TYPE_TCPV4:
+	case IXGBE_ATR_FLOW_TYPE_UDPV4:
+	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+	case IXGBE_ATR_FLOW_TYPE_TCPV6:
+	case IXGBE_ATR_FLOW_TYPE_UDPV6:
+	case IXGBE_ATR_FLOW_TYPE_SCTPV6:
+		break;
+	default:
+		DEBUGOUT(" Error on flow type input\n");
+		return IXGBE_ERR_CONFIG;
+	}
+
+	/* configure FDIRCMD register */
+	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+	fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+	if (tunnel)
+		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
+
+	/*
+	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
+	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
+	 */
+	fdirhashcmd = (u64)fdircmd << 32;
+	fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
+	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
+
+	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+	if (err) {
+		DEBUGOUT("Flow Director command did not complete!\n");
+		return err;
+	}
+
+	DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
+
+	return IXGBE_SUCCESS;
+}
+
+#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
+do { \
+	u32 n = (_n); \
+	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+		bucket_hash ^= lo_hash_dword >> n; \
+	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+		bucket_hash ^= hi_hash_dword >> n; \
+} while (0)
+
+/**
+ *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
+ *  @atr_input: input bitstream to compute the hash on
+ *  @input_mask: mask for the input bitstream
+ *
+ *  This function serves two main purposes.  First it applies the input_mask
+ *  to the atr_input resulting in a cleaned up atr_input data stream.
+ *  Secondly it computes the hash and stores it in the bkt_hash field at
+ *  the end of the input byte stream.  This way it will be available for
+ *  future use without needing to recompute the hash.
+ **/
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+					  union ixgbe_atr_input *input_mask)
+{
+
+	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+	u32 bucket_hash = 0;
+	u32 hi_dword = 0;
+	u32 i = 0;
+
+	/* Apply masks to input data */
+	for (i = 0; i < 14; i++)
+		input->dword_stream[i]  &= input_mask->dword_stream[i];
+
+	/* record the flow_vm_vlan bits as they are a key part to the hash */
+	flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
+
+	/* generate common hash dword */
+	for (i = 1; i <= 13; i++)
+		hi_dword ^= input->dword_stream[i];
+	hi_hash_dword = IXGBE_NTOHL(hi_dword);
+
+	/* low dword is word swapped version of common */
+	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+	/* apply flow ID/VM pool/VLAN ID bits to hash words */
+	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+	/* Process bits 0 and 16 */
+	IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
+
+	/*
+	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+	 * delay this because bit 0 of the stream should not be processed
+	 * so we do not add the VLAN until after bit 0 was processed
+	 */
+	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+	/* Process remaining 30 bit of the key */
+	for (i = 1; i <= 15; i++)
+		IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
+
+	/*
+	 * Limit hash to 13 bits since max bucket count is 8K.
+	 * Store result at the end of the input stream.
+	 */
+	input->formatted.bkt_hash = bucket_hash & 0x1FFF;
+}
+
+/**
+ *  ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks
+ *  @input_mask: mask to be bit swapped
+ *
+ *  The source and destination port masks for flow director are bit swapped
+ *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
+ *  generate a correctly swapped value we need to bit swap the mask and that
+ *  is what is accomplished by this function.
+ **/
+STATIC u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
+{
+	u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
+	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
+	mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
+	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+/*
+ * These two macros are meant to address the fact that we have registers
+ * that are either all or in part big-endian.  As a result on big-endian
+ * systems we will end up byte swapping the value to little-endian before
+ * it is byte swapped again and written to the hardware in the original
+ * big-endian format.
+ */
+#define IXGBE_STORE_AS_BE32(_value) \
+	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
+	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
+
+#define IXGBE_WRITE_REG_BE32(a, reg, value) \
+	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
+
+#define IXGBE_STORE_AS_BE16(_value) \
+	IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
+
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+				    union ixgbe_atr_input *input_mask, bool cloud_mode)
+{
+	/* mask IPv6 since it is currently not supported */
+	u32 fdirm = IXGBE_FDIRM_DIPv6;
+	u32 fdirtcpm;
+	u32 fdirip6m;
+	DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
+
+	/*
+	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
+	 * are zero, then assume a full mask for that field.  Also assume that
+	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
+	 * cannot be masked out in this implementation.
+	 *
+	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
+	 * point in time.
+	 */
+
+	/* verify bucket hash is cleared on hash generation */
+	if (input_mask->formatted.bkt_hash)
+		DEBUGOUT(" bucket hash should always be 0 in mask\n");
+
+	/* Program FDIRM and verify partial masks */
+	switch (input_mask->formatted.vm_pool & 0x7F) {
+	case 0x0:
+		fdirm |= IXGBE_FDIRM_POOL;
+	case 0x7F:
+		break;
+	default:
+		DEBUGOUT(" Error on vm pool mask\n");
+		return IXGBE_ERR_CONFIG;
+	}
+
+	switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
+	case 0x0:
+		fdirm |= IXGBE_FDIRM_L4P;
+		if (input_mask->formatted.dst_port ||
+		    input_mask->formatted.src_port) {
+			DEBUGOUT(" Error on src/dst port mask\n");
+			return IXGBE_ERR_CONFIG;
+		}
+	case IXGBE_ATR_L4TYPE_MASK:
+		break;
+	default:
+		DEBUGOUT(" Error on flow type mask\n");
+		return IXGBE_ERR_CONFIG;
+	}
+
+	switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
+	case 0x0000:
+		/* mask VLAN ID, fall through to mask VLAN priority */
+		fdirm |= IXGBE_FDIRM_VLANID;
+	case 0x0FFF:
+		/* mask VLAN priority */
+		fdirm |= IXGBE_FDIRM_VLANP;
+		break;
+	case 0xE000:
+		/* mask VLAN ID only, fall through */
+		fdirm |= IXGBE_FDIRM_VLANID;
+	case 0xEFFF:
+		/* no VLAN fields masked */
+		break;
+	default:
+		DEBUGOUT(" Error on VLAN mask\n");
+		return IXGBE_ERR_CONFIG;
+	}
+
+	switch (input_mask->formatted.flex_bytes & 0xFFFF) {
+	case 0x0000:
+		/* Mask Flex Bytes, fall through */
+		fdirm |= IXGBE_FDIRM_FLEX;
+	case 0xFFFF:
+		break;
+	default:
+		DEBUGOUT(" Error on flexible byte mask\n");
+		return IXGBE_ERR_CONFIG;
+	}
+
+	if (cloud_mode) {
+		fdirm |= IXGBE_FDIRM_L3P;
+		fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
+		fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
+
+		switch (input_mask->formatted.inner_mac[0] & 0xFF) {
+		case 0x00:
+			/* Mask inner MAC, fall through */
+			fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC;
+		case 0xFF:
+			break;
+		default:
+			DEBUGOUT(" Error on inner_mac byte mask\n");
+			return IXGBE_ERR_CONFIG;
+		}
+
+		switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) {
+		case 0x0:
+			/* Mask vxlan id */
+			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI;
+			break;
+		case 0x00FFFFFF:
+			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
+			break;
+		case 0xFFFFFFFF:
+			break;
+		default:
+			DEBUGOUT(" Error on TNI/VNI byte mask\n");
+			return IXGBE_ERR_CONFIG;
+		}
+
+		switch (input_mask->formatted.tunnel_type & 0xFFFF) {
+		case 0x0:
+			/* Mask turnnel type, fall through */
+			fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
+		case 0xFFFF:
+			break;
+		default:
+			DEBUGOUT(" Error on tunnel type byte mask\n");
+			return IXGBE_ERR_CONFIG;
+		}
+		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m);
+
+		/* Set all bits in FDIRSIP4M and FDIRDIP4M cloud mode */
+		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
+		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
+	}
+
+	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
+
+	if (!cloud_mode) {
+		/* store the TCP/UDP port masks, bit reversed from port
+		 * layout */
+		fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
+
+		/* write both the same so that UDP and TCP use the same mask */
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+		/* also use it for SCTP */
+		switch (hw->mac.type) {
+		case ixgbe_mac_X550:
+		case ixgbe_mac_X550EM_x:
+			IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
+			break;
+		default:
+			break;
+		}
+
+		/* store source and destination IP masks (big-enian) */
+		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
+				     ~input_mask->formatted.src_ip[0]);
+		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
+				     ~input_mask->formatted.dst_ip[0]);
+	}
+	return IXGBE_SUCCESS;
+}
+
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+					  union ixgbe_atr_input *input,
+					  u16 soft_id, u8 queue, bool cloud_mode)
+{
+	u32 fdirport, fdirvlan, fdirhash, fdircmd;
+	u32 addr_low, addr_high;
+	u32 cloud_type = 0;
+	s32 err;
+
+	DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
+	if (!cloud_mode) {
+		/* currently IPv6 is not supported, must be programmed with 0 */
+		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
+				     input->formatted.src_ip[0]);
+		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
+				     input->formatted.src_ip[1]);
+		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
+				     input->formatted.src_ip[2]);
+
+		/* record the source address (big-endian) */
+		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA,
+			input->formatted.src_ip[0]);
+
+		/* record the first 32 bits of the destination address
+		 * (big-endian) */
+		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA,
+			input->formatted.dst_ip[0]);
+
+		/* record source and destination port (little-endian)*/
+		fdirport = IXGBE_NTOHS(input->formatted.dst_port);
+		fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+		fdirport |= IXGBE_NTOHS(input->formatted.src_port);
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+	}
+
+	/* record VLAN (little-endian) and flex_bytes(big-endian) */
+	fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
+	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+	fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+
+	if (cloud_mode) {
+		if (input->formatted.tunnel_type != 0)
+			cloud_type = 0x80000000;
+
+		addr_low = ((u32)input->formatted.inner_mac[0] |
+				((u32)input->formatted.inner_mac[1] << 8) |
+				((u32)input->formatted.inner_mac[2] << 16) |
+				((u32)input->formatted.inner_mac[3] << 24));
+		addr_high = ((u32)input->formatted.inner_mac[4] |
+				((u32)input->formatted.inner_mac[5] << 8));
+		cloud_type |= addr_high;
+		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low);
+		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type);
+		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni);
+	}
+
+	/* configure FDIRHASH register */
+	fdirhash = input->formatted.bkt_hash;
+	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+	/*
+	 * flush all previous writes to make certain registers are
+	 * programmed prior to issuing the command
+	 */
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* configure FDIRCMD register */
+	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+	if (queue == IXGBE_FDIR_DROP_QUEUE)
+		fdircmd |= IXGBE_FDIRCMD_DROP;
+	if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK)
+		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
+	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+	fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
+
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
+	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+	if (err) {
+		DEBUGOUT("Flow Director command did not complete!\n");
+		return err;
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+					  union ixgbe_atr_input *input,
+					  u16 soft_id)
+{
+	u32 fdirhash;
+	u32 fdircmd;
+	s32 err;
+
+	/* configure FDIRHASH register */
+	fdirhash = input->formatted.bkt_hash;
+	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+	/* flush hash to HW */
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* Query if filter is present */
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
+
+	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+	if (err) {
+		DEBUGOUT("Flow Director command did not complete!\n");
+		return err;
+	}
+
+	/* if filter exists in hardware then remove it */
+	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+		IXGBE_WRITE_FLUSH(hw);
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
+ *  @hw: pointer to hardware structure
+ *  @input: input bitstream
+ *  @input_mask: mask for the input bitstream
+ *  @soft_id: software index for the filters
+ *  @queue: queue index to direct traffic to
+ *
+ *  Note that the caller to this function must lock before calling, since the
+ *  hardware writes must be protected from one another.
+ **/
+s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+					union ixgbe_atr_input *input,
+					union ixgbe_atr_input *input_mask,
+					u16 soft_id, u8 queue, bool cloud_mode)
+{
+	s32 err = IXGBE_ERR_CONFIG;
+
+	DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
+
+	/*
+	 * Check flow_type formatting, and bail out before we touch the hardware
+	 * if there's a configuration issue
+	 */
+	switch (input->formatted.flow_type) {
+	case IXGBE_ATR_FLOW_TYPE_IPV4:
+	case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
+		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
+		if (input->formatted.dst_port || input->formatted.src_port) {
+			DEBUGOUT(" Error on src/dst port\n");
+			return IXGBE_ERR_CONFIG;
+		}
+		break;
+	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+	case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
+		if (input->formatted.dst_port || input->formatted.src_port) {
+			DEBUGOUT(" Error on src/dst port\n");
+			return IXGBE_ERR_CONFIG;
+		}
+	case IXGBE_ATR_FLOW_TYPE_TCPV4:
+	case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
+	case IXGBE_ATR_FLOW_TYPE_UDPV4:
+	case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
+		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+						  IXGBE_ATR_L4TYPE_MASK;
+		break;
+	default:
+		DEBUGOUT(" Error on flow type input\n");
+		return err;
+	}
+
+	/* program input mask into the HW */
+	err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode);
+	if (err)
+		return err;
+
+	/* apply mask and compute/store hash */
+	ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
+
+	/* program filters to filter memory */
+	return ixgbe_fdir_write_perfect_filter_82599(hw, input,
+						     soft_id, queue, cloud_mode);
+}
+
+/**
+ *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
+ *  @hw: pointer to hardware structure
+ *  @reg: analog register to read
+ *  @val: read value
+ *
+ *  Performs read operation to Omer analog register specified.
+ **/
+s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+	u32  core_ctl;
+
+	DEBUGFUNC("ixgbe_read_analog_reg8_82599");
+
+	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
+			(reg << 8));
+	IXGBE_WRITE_FLUSH(hw);
+	usec_delay(10);
+	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
+	*val = (u8)core_ctl;
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
+ *  @hw: pointer to hardware structure
+ *  @reg: atlas register to write
+ *  @val: value to write
+ *
+ *  Performs write operation to Omer analog register specified.
+ **/
+s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+	u32  core_ctl;
+
+	DEBUGFUNC("ixgbe_write_analog_reg8_82599");
+
+	core_ctl = (reg << 8) | val;
+	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
+	IXGBE_WRITE_FLUSH(hw);
+	usec_delay(10);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
+ *  @hw: pointer to hardware structure
+ *
+ *  Starts the hardware using the generic start_hw function
+ *  and the generation start_hw function.
+ *  Then performs revision-specific operations, if any.
+ **/
+s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
+{
+	s32 ret_val = IXGBE_SUCCESS;
+
+	DEBUGFUNC("ixgbe_start_hw_82599");
+
+	ret_val = ixgbe_start_hw_generic(hw);
+	if (ret_val != IXGBE_SUCCESS)
+		goto out;
+
+	ret_val = ixgbe_start_hw_gen2(hw);
+	if (ret_val != IXGBE_SUCCESS)
+		goto out;
+
+	/* We need to run link autotry after the driver loads */
+	hw->mac.autotry_restart = true;
+
+	if (ret_val == IXGBE_SUCCESS)
+		ret_val = ixgbe_verify_fw_version_82599(hw);
+out:
+	return ret_val;
+}
+
+/**
+ *  ixgbe_identify_phy_82599 - Get physical layer module
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines the physical layer module found on the current adapter.
+ *  If PHY already detected, maintains current PHY type in hw struct,
+ *  otherwise executes the PHY detection routine.
+ **/
+s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
+{
+	s32 status;
+
+	DEBUGFUNC("ixgbe_identify_phy_82599");
+
+	/* Detect PHY if not unknown - returns success if already detected. */
+	status = ixgbe_identify_phy_generic(hw);
+	if (status != IXGBE_SUCCESS) {
+		/* 82599 10GBASE-T requires an external PHY */
+		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
+			return status;
+		else
+			status = ixgbe_identify_module_generic(hw);
+	}
+
+	/* Set PHY type none if no PHY detected */
+	if (hw->phy.type == ixgbe_phy_unknown) {
+		hw->phy.type = ixgbe_phy_none;
+		return IXGBE_SUCCESS;
+	}
+
+	/* Return error if SFP module has been detected but is not supported */
+	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
+		return IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+	return status;
+}
+
+/**
+ *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
+{
+	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
+	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
+	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+	u16 ext_ability = 0;
+
+	DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
+
+	hw->phy.ops.identify(hw);
+
+	switch (hw->phy.type) {
+	case ixgbe_phy_tn:
+	case ixgbe_phy_cu_unknown:
+		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+		goto out;
+	default:
+		break;
+	}
+
+	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+	case IXGBE_AUTOC_LMS_1G_AN:
+	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
+			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
+			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
+			goto out;
+		} else
+			/* SFI mode so read SFP module */
+			goto sfp_check;
+		break;
+	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
+			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
+		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
+			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
+			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
+		goto out;
+		break;
+	case IXGBE_AUTOC_LMS_10G_SERIAL:
+		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
+			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+			goto out;
+		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
+			goto sfp_check;
+		break;
+	case IXGBE_AUTOC_LMS_KX4_KX_KR:
+	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
+		if (autoc & IXGBE_AUTOC_KX_SUPP)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+		if (autoc & IXGBE_AUTOC_KX4_SUPP)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+		if (autoc & IXGBE_AUTOC_KR_SUPP)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+		goto out;
+		break;
+	default:
+		goto out;
+		break;
+	}
+
+sfp_check:
+	/* SFP check must be done last since DA modules are sometimes used to
+	 * test KR mode -  we need to id KR mode correctly before SFP module.
+	 * Call identify_sfp because the pluggable module may have changed */
+	physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
+out:
+	return physical_layer;
+}
+
+/**
+ *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
+ *  @hw: pointer to hardware structure
+ *  @regval: register value to write to RXCTRL
+ *
+ *  Enables the Rx DMA unit for 82599
+ **/
+s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
+{
+
+	DEBUGFUNC("ixgbe_enable_rx_dma_82599");
+
+	/*
+	 * Workaround for 82599 silicon errata when enabling the Rx datapath.
+	 * If traffic is incoming before we enable the Rx unit, it could hang
+	 * the Rx DMA unit.  Therefore, make sure the security engine is
+	 * completely disabled prior to enabling the Rx unit.
+	 */
+
+	hw->mac.ops.disable_sec_rx_path(hw);
+
+	if (regval & IXGBE_RXCTRL_RXEN)
+		ixgbe_enable_rx(hw);
+	else
+		ixgbe_disable_rx(hw);
+
+	hw->mac.ops.enable_sec_rx_path(hw);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_verify_fw_version_82599 - verify FW version for 82599
+ *  @hw: pointer to hardware structure
+ *
+ *  Verifies that installed the firmware version is 0.6 or higher
+ *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
+ *
+ *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
+ *  if the FW version is not supported.
+ **/
+STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_ERR_EEPROM_VERSION;
+	u16 fw_offset, fw_ptp_cfg_offset;
+	u16 fw_version;
+
+	DEBUGFUNC("ixgbe_verify_fw_version_82599");
+
+	/* firmware check is only necessary for SFI devices */
+	if (hw->phy.media_type != ixgbe_media_type_fiber) {
+		status = IXGBE_SUCCESS;
+		goto fw_version_out;
+	}
+
+	/* get the offset to the Firmware Module block */
+	if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
+		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+			      "eeprom read at offset %d failed", IXGBE_FW_PTR);
+		return IXGBE_ERR_EEPROM_VERSION;
+	}
+
+	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
+		goto fw_version_out;
+
+	/* get the offset to the Pass Through Patch Configuration block */
+	if (hw->eeprom.ops.read(hw, (fw_offset +
+				 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
+				 &fw_ptp_cfg_offset)) {
+		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+			      "eeprom read at offset %d failed",
+			      fw_offset +
+			      IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
+		return IXGBE_ERR_EEPROM_VERSION;
+	}
+
+	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
+		goto fw_version_out;
+
+	/* get the firmware version */
+	if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
+			    IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
+		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+			      "eeprom read at offset %d failed",
+			      fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
+		return IXGBE_ERR_EEPROM_VERSION;
+	}
+
+	if (fw_version > 0x5)
+		status = IXGBE_SUCCESS;
+
+fw_version_out:
+	return status;
+}
+
+/**
+ *  ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns true if the LESM FW module is present and enabled. Otherwise
+ *  returns false. Smart Speed must be disabled if LESM FW module is enabled.
+ **/
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
+{
+	bool lesm_enabled = false;
+	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
+	s32 status;
+
+	DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
+
+	/* get the offset to the Firmware Module block */
+	status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
+
+	if ((status != IXGBE_SUCCESS) ||
+	    (fw_offset == 0) || (fw_offset == 0xFFFF))
+		goto out;
+
+	/* get the offset to the LESM Parameters block */
+	status = hw->eeprom.ops.read(hw, (fw_offset +
+				     IXGBE_FW_LESM_PARAMETERS_PTR),
+				     &fw_lesm_param_offset);
+
+	if ((status != IXGBE_SUCCESS) ||
+	    (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
+		goto out;
+
+	/* get the LESM state word */
+	status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
+				     IXGBE_FW_LESM_STATE_1),
+				     &fw_lesm_state);
+
+	if ((status == IXGBE_SUCCESS) &&
+	    (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
+		lesm_enabled = true;
+
+out:
+	return lesm_enabled;
+}
+
+/**
+ *  ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
+ *  fastest available method
+ *
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in EEPROM to read
+ *  @words: number of words
+ *  @data: word(s) read from the EEPROM
+ *
+ *  Retrieves 16 bit word(s) read from EEPROM
+ **/
+STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
+					  u16 words, u16 *data)
+{
+	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+	s32 ret_val = IXGBE_ERR_CONFIG;
+
+	DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
+
+	/*
+	 * If EEPROM is detected and can be addressed using 14 bits,
+	 * use EERD otherwise use bit bang
+	 */
+	if ((eeprom->type == ixgbe_eeprom_spi) &&
+	    (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
+		ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
+							 data);
+	else
+		ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
+								    words,
+								    data);
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_read_eeprom_82599 - Read EEPROM word using
+ *  fastest available method
+ *
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM
+ **/
+STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
+				   u16 offset, u16 *data)
+{
+	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+	s32 ret_val = IXGBE_ERR_CONFIG;
+
+	DEBUGFUNC("ixgbe_read_eeprom_82599");
+
+	/*
+	 * If EEPROM is detected and can be addressed using 14 bits,
+	 * use EERD otherwise use bit bang
+	 */
+	if ((eeprom->type == ixgbe_eeprom_spi) &&
+	    (offset <= IXGBE_EERD_MAX_ADDR))
+		ret_val = ixgbe_read_eerd_generic(hw, offset, data);
+	else
+		ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
+
+	return ret_val;
+}
+
+/**
+ * ixgbe_reset_pipeline_82599 - perform pipeline reset
+ *
+ *  @hw: pointer to hardware structure
+ *
+ * Reset pipeline by asserting Restart_AN together with LMS change to ensure
+ * full pipeline reset.  This function assumes the SW/FW lock is held.
+ **/
+s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+{
+	s32 ret_val;
+	u32 anlp1_reg = 0;
+	u32 i, autoc_reg, autoc2_reg;
+
+	/* Enable link if disabled in NVM */
+	autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+	if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
+		autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
+		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
+		IXGBE_WRITE_FLUSH(hw);
+	}
+
+	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+	/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
+	IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
+			autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
+	/* Wait for AN to leave state 0 */
+	for (i = 0; i < 10; i++) {
+		msec_delay(4);
+		anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+		if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
+			break;
+	}
+
+	if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
+		DEBUGOUT("auto negotiation not completed\n");
+		ret_val = IXGBE_ERR_RESET_FAILED;
+		goto reset_pipeline_out;
+	}
+
+	ret_val = IXGBE_SUCCESS;
+
+reset_pipeline_out:
+	/* Write AUTOC register with original LMS field and Restart_AN */
+	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return ret_val;
+}
+
+
+/**
+ *  ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to read
+ *  @data: value read
+ *
+ *  Performs byte read operation to SFP module's EEPROM over I2C interface at
+ *  a specified device address.
+ **/
+STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+				u8 dev_addr, u8 *data)
+{
+	u32 esdp;
+	s32 status;
+	s32 timeout = 200;
+
+	DEBUGFUNC("ixgbe_read_i2c_byte_82599");
+
+	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
+		/* Acquire I2C bus ownership. */
+		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+		esdp |= IXGBE_ESDP_SDP0;
+		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+		IXGBE_WRITE_FLUSH(hw);
+
+		while (timeout) {
+			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+			if (esdp & IXGBE_ESDP_SDP1)
+				break;
+
+			msec_delay(5);
+			timeout--;
+		}
+
+		if (!timeout) {
+			DEBUGOUT("Driver can't access resource,"
+				 " acquiring I2C bus timeout.\n");
+			status = IXGBE_ERR_I2C;
+			goto release_i2c_access;
+		}
+	}
+
+	status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
+
+release_i2c_access:
+
+	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
+		/* Release I2C bus ownership. */
+		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+		esdp &= ~IXGBE_ESDP_SDP0;
+		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+		IXGBE_WRITE_FLUSH(hw);
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to write
+ *  @data: value to write
+ *
+ *  Performs byte write operation to SFP module's EEPROM over I2C interface at
+ *  a specified device address.
+ **/
+STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+				 u8 dev_addr, u8 data)
+{
+	u32 esdp;
+	s32 status;
+	s32 timeout = 200;
+
+	DEBUGFUNC("ixgbe_write_i2c_byte_82599");
+
+	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
+		/* Acquire I2C bus ownership. */
+		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+		esdp |= IXGBE_ESDP_SDP0;
+		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+		IXGBE_WRITE_FLUSH(hw);
+
+		while (timeout) {
+			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+			if (esdp & IXGBE_ESDP_SDP1)
+				break;
+
+			msec_delay(5);
+			timeout--;
+		}
+
+		if (!timeout) {
+			DEBUGOUT("Driver can't access resource,"
+				 " acquiring I2C bus timeout.\n");
+			status = IXGBE_ERR_I2C;
+			goto release_i2c_access;
+		}
+	}
+
+	status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
+
+release_i2c_access:
+
+	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
+		/* Release I2C bus ownership. */
+		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+		esdp &= ~IXGBE_ESDP_SDP0;
+		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+		IXGBE_WRITE_FLUSH(hw);
+	}
+
+	return status;
+}
+
diff --git a/drivers/net/ixgbe/base/ixgbe_82599.h b/drivers/net/ixgbe/base/ixgbe_82599.h
new file mode 100644
index 0000000..39316a9
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_82599.h
@@ -0,0 +1,65 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_82599_H_
+#define _IXGBE_82599_H_
+
+s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+				      ixgbe_link_speed *speed, bool *autoneg);
+enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+					  ixgbe_link_speed speed,
+					  bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+				    ixgbe_link_speed speed,
+				    bool autoneg_wait_to_complete);
+s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+			       bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+			       bool autoneg_wait_to_complete);
+s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
+void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
+s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
+u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
+s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val);
+s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 reg_val, bool locked);
+#endif /* _IXGBE_82599_H_ */
diff --git a/drivers/net/ixgbe/base/ixgbe_api.c b/drivers/net/ixgbe/base/ixgbe_api.c
new file mode 100644
index 0000000..c704b69
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_api.c
@@ -0,0 +1,1477 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+
+/**
+ * ixgbe_dcb_get_rtrup2tc - read rtrup2tc reg
+ * @hw: pointer to hardware structure
+ * @map: pointer to u8 arr for returning map
+ *
+ * Read the rtrup2tc HW register and resolve its content into map
+ **/
+void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
+{
+	if (hw->mac.ops.get_rtrup2tc)
+		hw->mac.ops.get_rtrup2tc(hw, map);
+}
+
+/**
+ *  ixgbe_init_shared_code - Initialize the shared code
+ *  @hw: pointer to hardware structure
+ *
+ *  This will assign function pointers and assign the MAC type and PHY code.
+ *  Does not touch the hardware. This function must be called prior to any
+ *  other function in the shared code. The ixgbe_hw structure should be
+ *  memset to 0 prior to calling this function.  The following fields in
+ *  hw structure should be filled in prior to calling this function:
+ *  hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ *  subsystem_vendor_id, and revision_id
+ **/
+s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
+{
+	s32 status;
+
+	DEBUGFUNC("ixgbe_init_shared_code");
+
+	/*
+	 * Set the mac type
+	 */
+	ixgbe_set_mac_type(hw);
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		status = ixgbe_init_ops_82598(hw);
+		break;
+	case ixgbe_mac_82599EB:
+		status = ixgbe_init_ops_82599(hw);
+		break;
+	case ixgbe_mac_X540:
+		status = ixgbe_init_ops_X540(hw);
+		break;
+	case ixgbe_mac_X550:
+		status = ixgbe_init_ops_X550(hw);
+		break;
+	case ixgbe_mac_X550EM_x:
+		status = ixgbe_init_ops_X550EM(hw);
+		break;
+	case ixgbe_mac_82599_vf:
+	case ixgbe_mac_X540_vf:
+	case ixgbe_mac_X550_vf:
+	case ixgbe_mac_X550EM_x_vf:
+		status = ixgbe_init_ops_vf(hw);
+		break;
+	default:
+		status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+		break;
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_set_mac_type - Sets MAC type
+ *  @hw: pointer to the HW structure
+ *
+ *  This function sets the mac type of the adapter based on the
+ *  vendor ID and device ID stored in the hw structure.
+ **/
+s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
+{
+	s32 ret_val = IXGBE_SUCCESS;
+
+	DEBUGFUNC("ixgbe_set_mac_type\n");
+
+	if (hw->vendor_id != IXGBE_INTEL_VENDOR_ID) {
+		ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
+			     "Unsupported vendor id: %x", hw->vendor_id);
+		return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+	}
+
+	switch (hw->device_id) {
+	case IXGBE_DEV_ID_82598:
+	case IXGBE_DEV_ID_82598_BX:
+	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+	case IXGBE_DEV_ID_82598AT:
+	case IXGBE_DEV_ID_82598AT2:
+	case IXGBE_DEV_ID_82598EB_CX4:
+	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+	case IXGBE_DEV_ID_82598EB_XF_LR:
+	case IXGBE_DEV_ID_82598EB_SFP_LOM:
+		hw->mac.type = ixgbe_mac_82598EB;
+		break;
+	case IXGBE_DEV_ID_82599_KX4:
+	case IXGBE_DEV_ID_82599_KX4_MEZZ:
+	case IXGBE_DEV_ID_82599_XAUI_LOM:
+	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+	case IXGBE_DEV_ID_82599_KR:
+	case IXGBE_DEV_ID_82599_SFP:
+	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
+	case IXGBE_DEV_ID_82599_SFP_FCOE:
+	case IXGBE_DEV_ID_82599_SFP_EM:
+	case IXGBE_DEV_ID_82599_SFP_SF2:
+	case IXGBE_DEV_ID_82599_SFP_SF_QP:
+	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+	case IXGBE_DEV_ID_82599EN_SFP:
+	case IXGBE_DEV_ID_82599_CX4:
+	case IXGBE_DEV_ID_82599_LS:
+	case IXGBE_DEV_ID_82599_T3_LOM:
+		hw->mac.type = ixgbe_mac_82599EB;
+		break;
+	case IXGBE_DEV_ID_82599_VF:
+	case IXGBE_DEV_ID_82599_VF_HV:
+		hw->mac.type = ixgbe_mac_82599_vf;
+		break;
+	case IXGBE_DEV_ID_X540_VF:
+	case IXGBE_DEV_ID_X540_VF_HV:
+		hw->mac.type = ixgbe_mac_X540_vf;
+		break;
+	case IXGBE_DEV_ID_X540T:
+	case IXGBE_DEV_ID_X540T1:
+		hw->mac.type = ixgbe_mac_X540;
+		break;
+	case IXGBE_DEV_ID_X550T:
+		hw->mac.type = ixgbe_mac_X550;
+		break;
+	case IXGBE_DEV_ID_X550EM_X_KX4:
+	case IXGBE_DEV_ID_X550EM_X_KR:
+	case IXGBE_DEV_ID_X550EM_X_10G_T:
+	case IXGBE_DEV_ID_X550EM_X_1G_T:
+	case IXGBE_DEV_ID_X550EM_X_SFP:
+		hw->mac.type = ixgbe_mac_X550EM_x;
+		break;
+	case IXGBE_DEV_ID_X550_VF:
+	case IXGBE_DEV_ID_X550_VF_HV:
+		hw->mac.type = ixgbe_mac_X550_vf;
+		break;
+	case IXGBE_DEV_ID_X550EM_X_VF:
+	case IXGBE_DEV_ID_X550EM_X_VF_HV:
+		hw->mac.type = ixgbe_mac_X550EM_x_vf;
+		break;
+	default:
+		ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+		ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
+			     "Unsupported device id: %x",
+			     hw->device_id);
+		break;
+	}
+
+	DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n",
+		  hw->mac.type, ret_val);
+	return ret_val;
+}
+
+/**
+ *  ixgbe_init_hw - Initialize the hardware
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the hardware by resetting and then starting the hardware
+ **/
+s32 ixgbe_init_hw(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_reset_hw - Performs a hardware reset
+ *  @hw: pointer to hardware structure
+ *
+ *  Resets the hardware by resetting the transmit and receive units, masks and
+ *  clears all interrupts, performs a PHY reset, and performs a MAC reset
+ **/
+s32 ixgbe_reset_hw(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_start_hw - Prepares hardware for Rx/Tx
+ *  @hw: pointer to hardware structure
+ *
+ *  Starts the hardware by filling the bus info structure and media type,
+ *  clears all on chip counters, initializes receive address registers,
+ *  multicast table, VLAN filter table, calls routine to setup link and
+ *  flow control settings, and leaves transmit and receive units disabled
+ *  and uninitialized.
+ **/
+s32 ixgbe_start_hw(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_enable_relaxed_ordering - Enables tx relaxed ordering,
+ *  which is disabled by default in ixgbe_start_hw();
+ *
+ *  @hw: pointer to hardware structure
+ *
+ *   Enable relaxed ordering;
+ **/
+void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw)
+{
+	if (hw->mac.ops.enable_relaxed_ordering)
+		hw->mac.ops.enable_relaxed_ordering(hw);
+}
+
+/**
+ *  ixgbe_clear_hw_cntrs - Clear hardware counters
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears all hardware statistics counters by reading them from the hardware
+ *  Statistics counters are clear on read.
+ **/
+s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_media_type - Get media type
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the media type (fiber, copper, backplane)
+ **/
+enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw),
+			       ixgbe_media_type_unknown);
+}
+
+/**
+ *  ixgbe_get_mac_addr - Get MAC address
+ *  @hw: pointer to hardware structure
+ *  @mac_addr: Adapter MAC address
+ *
+ *  Reads the adapter's MAC address from the first Receive Address Register
+ *  (RAR0) A reset of the adapter must have been performed prior to calling
+ *  this function in order for the MAC address to have been loaded from the
+ *  EEPROM into RAR0
+ **/
+s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr,
+			       (hw, mac_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_san_mac_addr - Get SAN MAC address
+ *  @hw: pointer to hardware structure
+ *  @san_mac_addr: SAN MAC address
+ *
+ *  Reads the SAN MAC address from the EEPROM, if it's available.  This is
+ *  per-port, so set_lan_id() must be called before reading the addresses.
+ **/
+s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.get_san_mac_addr,
+			       (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_set_san_mac_addr - Write a SAN MAC address
+ *  @hw: pointer to hardware structure
+ *  @san_mac_addr: SAN MAC address
+ *
+ *  Writes A SAN MAC address to the EEPROM.
+ **/
+s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.set_san_mac_addr,
+			       (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_device_caps - Get additional device capabilities
+ *  @hw: pointer to hardware structure
+ *  @device_caps: the EEPROM word for device capabilities
+ *
+ *  Reads the extra device capabilities from the EEPROM
+ **/
+s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.get_device_caps,
+			       (hw, device_caps), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from the EEPROM
+ *  @hw: pointer to hardware structure
+ *  @wwnn_prefix: the alternative WWNN prefix
+ *  @wwpn_prefix: the alternative WWPN prefix
+ *
+ *  This function will read the EEPROM from the alternative SAN MAC address
+ *  block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+			 u16 *wwpn_prefix)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.get_wwn_prefix,
+			       (hw, wwnn_prefix, wwpn_prefix),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_fcoe_boot_status -  Get FCOE boot status from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @bs: the fcoe boot status
+ *
+ *  This function will read the FCOE boot status from the iSCSI FCOE block
+ **/
+s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.get_fcoe_boot_status,
+			       (hw, bs),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_bus_info - Set PCI bus info
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
+ **/
+s32 ixgbe_get_bus_info(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_num_of_tx_queues - Get Tx queues
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the number of transmit queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw)
+{
+	return hw->mac.max_tx_queues;
+}
+
+/**
+ *  ixgbe_get_num_of_rx_queues - Get Rx queues
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the number of receive queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw)
+{
+	return hw->mac.max_rx_queues;
+}
+
+/**
+ *  ixgbe_stop_adapter - Disable Rx/Tx units
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ *  disables transmit and receive units. The adapter_stopped flag is used by
+ *  the shared code and drivers to determine if the adapter is in a stopped
+ *  state and should not touch the hardware.
+ **/
+s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_read_pba_string - Reads part number string from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @pba_num: stores the part number string from the EEPROM
+ *  @pba_num_size: part number string buffer length
+ *
+ *  Reads the part number string from the EEPROM.
+ **/
+s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
+{
+	return ixgbe_read_pba_string_generic(hw, pba_num, pba_num_size);
+}
+
+/**
+ *  ixgbe_read_pba_num - Reads part number from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @pba_num: stores the part number from the EEPROM
+ *
+ *  Reads the part number from the EEPROM.
+ **/
+s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num)
+{
+	return ixgbe_read_pba_num_generic(hw, pba_num);
+}
+
+/**
+ *  ixgbe_identify_phy - Get PHY type
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines the physical layer module found on the current adapter.
+ **/
+s32 ixgbe_identify_phy(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_SUCCESS;
+
+	if (hw->phy.type == ixgbe_phy_unknown) {
+		status = ixgbe_call_func(hw, hw->phy.ops.identify, (hw),
+					 IXGBE_NOT_IMPLEMENTED);
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_reset_phy - Perform a PHY reset
+ *  @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_SUCCESS;
+
+	if (hw->phy.type == ixgbe_phy_unknown) {
+		if (ixgbe_identify_phy(hw) != IXGBE_SUCCESS)
+			status = IXGBE_ERR_PHY;
+	}
+
+	if (status == IXGBE_SUCCESS) {
+		status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw),
+					 IXGBE_NOT_IMPLEMENTED);
+	}
+	return status;
+}
+
+/**
+ *  ixgbe_get_phy_firmware_version -
+ *  @hw: pointer to hardware structure
+ *  @firmware_version: pointer to firmware version
+ **/
+s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version)
+{
+	s32 status = IXGBE_SUCCESS;
+
+	status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version,
+				 (hw, firmware_version),
+				 IXGBE_NOT_IMPLEMENTED);
+	return status;
+}
+
+/**
+ *  ixgbe_read_phy_reg - Read PHY register
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit address of PHY register to read
+ *  @phy_data: Pointer to read data from PHY register
+ *
+ *  Reads a value from a specified PHY register
+ **/
+s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+		       u16 *phy_data)
+{
+	if (hw->phy.id == 0)
+		ixgbe_identify_phy(hw);
+
+	return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr,
+			       device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_write_phy_reg - Write PHY register
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit PHY register to write
+ *  @phy_data: Data to write to the PHY register
+ *
+ *  Writes a value to specified PHY register
+ **/
+s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+			u16 phy_data)
+{
+	if (hw->phy.id == 0)
+		ixgbe_identify_phy(hw);
+
+	return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr,
+			       device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_setup_phy_link - Restart PHY autoneg
+ *  @hw: pointer to hardware structure
+ *
+ *  Restart autonegotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_setup_internal_phy - Configure integrated PHY
+ * @hw: pointer to hardware structure
+ *
+ * Reconfigure the integrated PHY in order to enable talk to the external PHY.
+ * Returns success if not implemented, since nothing needs to be done in this
+ * case.
+ */
+s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->phy.ops.setup_internal_link, (hw),
+			       IXGBE_SUCCESS);
+}
+
+/**
+ *  ixgbe_check_phy_link - Determine link and speed status
+ *  @hw: pointer to hardware structure
+ *
+ *  Reads a PHY register to determine if link is up and the current speed for
+ *  the PHY.
+ **/
+s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+			 bool *link_up)
+{
+	return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed,
+			       link_up), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_setup_phy_link_speed - Set auto advertise
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *
+ *  Sets the auto advertised capabilities
+ **/
+s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+			       bool autoneg_wait_to_complete)
+{
+	return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed,
+			       autoneg_wait_to_complete),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_phy_power - Control the phy power state
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ */
+s32 ixgbe_set_phy_power(struct ixgbe_hw *hw, bool on)
+{
+	return ixgbe_call_func(hw, hw->phy.ops.set_phy_power, (hw, on),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_check_link - Get link and speed status
+ *  @hw: pointer to hardware structure
+ *
+ *  Reads the links register to determine if link is up and the current speed
+ **/
+s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+		     bool *link_up, bool link_up_wait_to_complete)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed,
+			       link_up, link_up_wait_to_complete),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_disable_tx_laser - Disable Tx laser
+ *  @hw: pointer to hardware structure
+ *
+ *  If the driver needs to disable the laser on SFI optics.
+ **/
+void ixgbe_disable_tx_laser(struct ixgbe_hw *hw)
+{
+	if (hw->mac.ops.disable_tx_laser)
+		hw->mac.ops.disable_tx_laser(hw);
+}
+
+/**
+ *  ixgbe_enable_tx_laser - Enable Tx laser
+ *  @hw: pointer to hardware structure
+ *
+ *  If the driver needs to enable the laser on SFI optics.
+ **/
+void ixgbe_enable_tx_laser(struct ixgbe_hw *hw)
+{
+	if (hw->mac.ops.enable_tx_laser)
+		hw->mac.ops.enable_tx_laser(hw);
+}
+
+/**
+ *  ixgbe_flap_tx_laser - flap Tx laser to start autotry process
+ *  @hw: pointer to hardware structure
+ *
+ *  When the driver changes the link speeds that it can support then
+ *  flap the tx laser to alert the link partner to start autotry
+ *  process on its end.
+ **/
+void ixgbe_flap_tx_laser(struct ixgbe_hw *hw)
+{
+	if (hw->mac.ops.flap_tx_laser)
+		hw->mac.ops.flap_tx_laser(hw);
+}
+
+/**
+ *  ixgbe_setup_link - Set link speed
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *
+ *  Configures link settings.  Restarts the link.
+ *  Performs autonegotiation if needed.
+ **/
+s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+		     bool autoneg_wait_to_complete)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed,
+			       autoneg_wait_to_complete),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_link_capabilities - Returns link capabilities
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines the link capabilities of the current configuration.
+ **/
+s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+				bool *autoneg)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw,
+			       speed, autoneg), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_led_on - Turn on LEDs
+ *  @hw: pointer to hardware structure
+ *  @index: led number to turn on
+ *
+ *  Turns on the software controllable LEDs.
+ **/
+s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_led_off - Turn off LEDs
+ *  @hw: pointer to hardware structure
+ *  @index: led number to turn off
+ *
+ *  Turns off the software controllable LEDs.
+ **/
+s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_blink_led_start - Blink LEDs
+ *  @hw: pointer to hardware structure
+ *  @index: led number to blink
+ *
+ *  Blink LED based on index.
+ **/
+s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_blink_led_stop - Stop blinking LEDs
+ *  @hw: pointer to hardware structure
+ *
+ *  Stop blinking LED based on index.
+ **/
+s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_init_eeprom_params - Initialize EEPROM parameters
+ *  @hw: pointer to hardware structure
+ *
+ *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ *  ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+
+/**
+ *  ixgbe_write_eeprom - Write word to EEPROM
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @data: 16 bit word to be written to the EEPROM
+ *
+ *  Writes 16 bit value to EEPROM. If ixgbe_eeprom_update_checksum is not
+ *  called after this function, the EEPROM will most likely contain an
+ *  invalid checksum.
+ **/
+s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+	return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_write_eeprom_buffer - Write word(s) to EEPROM
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *  @words: number of words
+ *
+ *  Writes 16 bit word(s) to EEPROM. If ixgbe_eeprom_update_checksum is not
+ *  called after this function, the EEPROM will most likely contain an
+ *  invalid checksum.
+ **/
+s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, u16 words,
+			      u16 *data)
+{
+	return ixgbe_call_func(hw, hw->eeprom.ops.write_buffer,
+			       (hw, offset, words, data),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_read_eeprom - Read word from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be read
+ *  @data: read 16 bit value from EEPROM
+ *
+ *  Reads 16 bit value from EEPROM
+ **/
+s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+	return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_read_eeprom_buffer - Read word(s) from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be read
+ *  @data: read 16 bit word(s) from EEPROM
+ *  @words: number of words
+ *
+ *  Reads 16 bit word(s) from EEPROM
+ **/
+s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
+			     u16 words, u16 *data)
+{
+	return ixgbe_call_func(hw, hw->eeprom.ops.read_buffer,
+			       (hw, offset, words, data),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_validate_eeprom_checksum - Validate EEPROM checksum
+ *  @hw: pointer to hardware structure
+ *  @checksum_val: calculated checksum
+ *
+ *  Performs checksum calculation and validates the EEPROM checksum
+ **/
+s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+	return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum,
+			       (hw, checksum_val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_eeprom_update_checksum - Updates the EEPROM checksum
+ *  @hw: pointer to hardware structure
+ **/
+s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_insert_mac_addr - Find a RAR for this mac address
+ *  @hw: pointer to hardware structure
+ *  @addr: Address to put into receive address register
+ *  @vmdq: VMDq pool to assign
+ *
+ *  Puts an ethernet address into a receive address register, or
+ *  finds the rar that it is aleady in; adds to the pool list
+ **/
+s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.insert_mac_addr,
+			       (hw, addr, vmdq),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_set_rar - Set Rx address register
+ *  @hw: pointer to hardware structure
+ *  @index: Receive address register to write
+ *  @addr: Address to put into receive address register
+ *  @vmdq: VMDq "set"
+ *  @enable_addr: set flag that address is active
+ *
+ *  Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+		  u32 enable_addr)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq,
+			       enable_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_clear_rar - Clear Rx address register
+ *  @hw: pointer to hardware structure
+ *  @index: Receive address register to write
+ *
+ *  Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_set_vmdq - Associate a VMDq index with a receive address
+ *  @hw: pointer to hardware structure
+ *  @rar: receive address register index to associate with VMDq index
+ *  @vmdq: VMDq set or pool index
+ **/
+s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq),
+			       IXGBE_NOT_IMPLEMENTED);
+
+}
+
+/**
+ *  ixgbe_set_vmdq_san_mac - Associate VMDq index 127 with a receive address
+ *  @hw: pointer to hardware structure
+ *  @vmdq: VMDq default pool index
+ **/
+s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.set_vmdq_san_mac,
+			       (hw, vmdq), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address
+ *  @hw: pointer to hardware structure
+ *  @rar: receive address register index to disassociate with VMDq index
+ *  @vmdq: VMDq set or pool index
+ **/
+s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_init_rx_addrs - Initializes receive address filters.
+ *  @hw: pointer to hardware structure
+ *
+ *  Places the MAC address in receive address register 0 and clears the rest
+ *  of the receive address registers. Clears the multicast table. Assumes
+ *  the receiver is in reset when the routine is called.
+ **/
+s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_num_rx_addrs - Returns the number of RAR entries.
+ *  @hw: pointer to hardware structure
+ **/
+u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw)
+{
+	return hw->mac.num_rar_entries;
+}
+
+/**
+ *  ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses
+ *  @hw: pointer to hardware structure
+ *  @addr_list: the list of new multicast addresses
+ *  @addr_count: number of addresses
+ *  @func: iterator function to walk the multicast address list
+ *
+ *  The given list replaces any existing list. Clears the secondary addrs from
+ *  receive address registers. Uses unused receive address registers for the
+ *  first secondary addresses, and falls back to promiscuous mode as needed.
+ **/
+s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
+			      u32 addr_count, ixgbe_mc_addr_itr func)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw,
+			       addr_list, addr_count, func),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_update_mc_addr_list - Updates the MAC's list of multicast addresses
+ *  @hw: pointer to hardware structure
+ *  @mc_addr_list: the list of new multicast addresses
+ *  @mc_addr_count: number of addresses
+ *  @func: iterator function to walk the multicast address list
+ *
+ *  The given list replaces any existing list. Clears the MC addrs from receive
+ *  address registers and the multicast table. Uses unused receive address
+ *  registers for the first multicast addresses, and hashes the rest into the
+ *  multicast table.
+ **/
+s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
+			      u32 mc_addr_count, ixgbe_mc_addr_itr func,
+			      bool clear)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw,
+			       mc_addr_list, mc_addr_count, func, clear),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_enable_mc - Enable multicast address in RAR
+ *  @hw: pointer to hardware structure
+ *
+ *  Enables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_enable_mc(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_disable_mc - Disable multicast address in RAR
+ *  @hw: pointer to hardware structure
+ *
+ *  Disables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_disable_mc(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_clear_vfta - Clear VLAN filter table
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+s32 ixgbe_clear_vfta(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_set_vfta - Set VLAN filter table
+ *  @hw: pointer to hardware structure
+ *  @vlan: VLAN id to write to VLAN filter
+ *  @vind: VMDq output index that maps queue to VLAN id in VFTA
+ *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ *
+ *  Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind,
+			       vlan_on), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_set_vlvf - Set VLAN Pool Filter
+ *  @hw: pointer to hardware structure
+ *  @vlan: VLAN id to write to VLAN filter
+ *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
+ *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ *  @vfta_changed: pointer to boolean flag which indicates whether VFTA
+ *                 should be changed
+ *
+ *  Turn on/off specified bit in VLVF table.
+ **/
+s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
+		    bool *vfta_changed)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.set_vlvf, (hw, vlan, vind,
+			       vlan_on, vfta_changed), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_fc_enable - Enable flow control
+ *  @hw: pointer to hardware structure
+ *
+ *  Configures the flow control settings based on SW configuration.
+ **/
+s32 ixgbe_fc_enable(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_fw_drv_ver - Try to send the driver version number FW
+ * @hw: pointer to hardware structure
+ * @maj: driver major number to be sent to firmware
+ * @min: driver minor number to be sent to firmware
+ * @build: driver build number to be sent to firmware
+ * @ver: driver version number to be sent to firmware
+ **/
+s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
+			 u8 ver)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.set_fw_drv_ver, (hw, maj, min,
+			       build, ver), IXGBE_NOT_IMPLEMENTED);
+}
+
+
+/**
+ *  ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
+ *  @hw: pointer to hardware structure
+ *
+ *  Updates the temperatures in mac.thermal_sensor_data
+ **/
+s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.get_thermal_sensor_data, (hw),
+				IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds
+ *  @hw: pointer to hardware structure
+ *
+ *  Inits the thermal sensor thresholds according to the NVM map
+ **/
+s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.init_thermal_sensor_thresh, (hw),
+				IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_dmac_config - Configure DMA Coalescing registers.
+ *  @hw: pointer to hardware structure
+ *
+ *  Configure DMA coalescing. If enabling dmac, dmac is activated.
+ *  When disabling dmac, dmac enable dmac bit is cleared.
+ **/
+s32 ixgbe_dmac_config(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.dmac_config, (hw),
+				IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_dmac_update_tcs - Configure DMA Coalescing registers.
+ *  @hw: pointer to hardware structure
+ *
+ *  Disables dmac, updates per TC settings, and then enable dmac.
+ **/
+s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.dmac_update_tcs, (hw),
+				IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_dmac_config_tcs - Configure DMA Coalescing registers.
+ *  @hw: pointer to hardware structure
+ *
+ *  Configure DMA coalescing threshold per TC and set high priority bit for
+ *  FCOE TC. The dmac enable bit must be cleared before configuring.
+ **/
+s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.dmac_config_tcs, (hw),
+				IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_setup_eee - Enable/disable EEE support
+ *  @hw: pointer to the HW structure
+ *  @enable_eee: boolean flag to enable EEE
+ *
+ *  Enable/disable EEE based on enable_ee flag.
+ *  Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C
+ *  are modified.
+ *
+ **/
+s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.setup_eee, (hw, enable_eee),
+			IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_source_address_pruning - Enable/Disable source address pruning
+ * @hw: pointer to hardware structure
+ * @enbale: enable or disable source address pruning
+ * @pool: Rx pool - Rx pool to toggle source address pruning
+ **/
+void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable,
+				      unsigned int pool)
+{
+	if (hw->mac.ops.set_source_address_pruning)
+		hw->mac.ops.set_source_address_pruning(hw, enable, pool);
+}
+
+/**
+ *  ixgbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing
+ *  @hw: pointer to hardware structure
+ *  @enable: enable or disable switch for Ethertype anti-spoofing
+ *  @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
+ *
+ **/
+void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
+{
+	if (hw->mac.ops.set_ethertype_anti_spoofing)
+		hw->mac.ops.set_ethertype_anti_spoofing(hw, enable, vf);
+}
+
+/**
+ *  ixgbe_read_iosf_sb_reg - Read 32 bit PHY register
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit address of PHY register to read
+ *  @device_type: type of device you want to communicate with
+ *  @phy_data: Pointer to read data from PHY register
+ *
+ *  Reads a value from a specified PHY register
+ **/
+s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
+			   u32 device_type, u32 *phy_data)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.read_iosf_sb_reg, (hw, reg_addr,
+			       device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_write_iosf_sb_reg - Write 32 bit register through IOSF Sideband
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit PHY register to write
+ *  @device_type: type of device you want to communicate with
+ *  @phy_data: Data to write to the PHY register
+ *
+ *  Writes a value to specified PHY register
+ **/
+s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
+			    u32 device_type, u32 phy_data)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.write_iosf_sb_reg, (hw, reg_addr,
+			       device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_disable_mdd - Disable malicious driver detection
+ *  @hw: pointer to hardware structure
+ *
+ **/
+void ixgbe_disable_mdd(struct ixgbe_hw *hw)
+{
+	if (hw->mac.ops.disable_mdd)
+		hw->mac.ops.disable_mdd(hw);
+}
+
+/**
+ *  ixgbe_enable_mdd - Enable malicious driver detection
+ *  @hw: pointer to hardware structure
+ *
+ **/
+void ixgbe_enable_mdd(struct ixgbe_hw *hw)
+{
+	if (hw->mac.ops.enable_mdd)
+		hw->mac.ops.enable_mdd(hw);
+}
+
+/**
+ *  ixgbe_mdd_event - Handle malicious driver detection event
+ *  @hw: pointer to hardware structure
+ *  @vf_bitmap: vf bitmap of malicious vfs
+ *
+ **/
+void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap)
+{
+	if (hw->mac.ops.mdd_event)
+		hw->mac.ops.mdd_event(hw, vf_bitmap);
+}
+
+/**
+ *  ixgbe_restore_mdd_vf - Restore VF that was disabled during malicious driver
+ *  detection event
+ *  @hw: pointer to hardware structure
+ *  @vf: vf index
+ *
+ **/
+void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf)
+{
+	if (hw->mac.ops.restore_mdd_vf)
+		hw->mac.ops.restore_mdd_vf(hw, vf);
+}
+
+/**
+ *  ixgbe_read_analog_reg8 - Reads 8 bit analog register
+ *  @hw: pointer to hardware structure
+ *  @reg: analog register to read
+ *  @val: read value
+ *
+ *  Performs write operation to analog register specified.
+ **/
+s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg,
+			       val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_write_analog_reg8 - Writes 8 bit analog register
+ *  @hw: pointer to hardware structure
+ *  @reg: analog register to write
+ *  @val: value to write
+ *
+ *  Performs write operation to Atlas analog register specified.
+ **/
+s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg,
+			       val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_init_uta_tables - Initializes Unicast Table Arrays.
+ *  @hw: pointer to hardware structure
+ *
+ *  Initializes the Unicast Table Arrays to zero on device load.  This
+ *  is part of the Rx init addr execution path.
+ **/
+s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to read
+ *  @dev_addr: I2C bus address to read from
+ *  @data: value read
+ *
+ *  Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+			u8 *data)
+{
+	return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte, (hw, byte_offset,
+			       dev_addr, data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_read_i2c_combined - Perform I2C read combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to read from
+ * @reg: I2C device register to read from
+ * @val: pointer to location to receive read value
+ *
+ * Returns an error code on error.
+ */
+s32 ixgbe_read_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val)
+{
+	return ixgbe_call_func(hw, hw->phy.ops.read_i2c_combined, (hw, addr,
+			       reg, val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_write_i2c_byte - Writes 8 bit word over I2C
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to write
+ *  @dev_addr: I2C bus address to write to
+ *  @data: value to write
+ *
+ *  Performs byte write operation to SFP module's EEPROM over I2C interface
+ *  at a specified device address.
+ **/
+s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+			 u8 data)
+{
+	return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte, (hw, byte_offset,
+			       dev_addr, data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_write_i2c_combined - Perform I2C write combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to write to
+ * @reg: I2C device register to write to
+ * @val: value to write
+ *
+ * Returns an error code on error.
+ */
+s32 ixgbe_write_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val)
+{
+	return ixgbe_call_func(hw, hw->phy.ops.write_i2c_combined, (hw, addr,
+			       reg, val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: EEPROM byte offset to write
+ *  @eeprom_data: value to write
+ *
+ *  Performs byte write operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw,
+			   u8 byte_offset, u8 eeprom_data)
+{
+	return ixgbe_call_func(hw, hw->phy.ops.write_i2c_eeprom,
+			       (hw, byte_offset, eeprom_data),
+			       IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: EEPROM byte offset to read
+ *  @eeprom_data: value read
+ *
+ *  Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data)
+{
+	return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom,
+			      (hw, byte_offset, eeprom_data),
+			      IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_supported_physical_layer - Returns physical layer type
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer,
+			       (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN);
+}
+
+/**
+ *  ixgbe_enable_rx_dma - Enables Rx DMA unit, dependent on device specifics
+ *  @hw: pointer to hardware structure
+ *  @regval: bitfield to write to the Rx DMA register
+ *
+ *  Enables the Rx DMA unit of the device.
+ **/
+s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.enable_rx_dma,
+			       (hw, regval), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_disable_sec_rx_path - Stops the receive data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Stops the receive data path.
+ **/
+s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.disable_sec_rx_path,
+				(hw), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_enable_sec_rx_path - Enables the receive data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Enables the receive data path.
+ **/
+s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.enable_sec_rx_path,
+				(hw), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore
+ *  @hw: pointer to hardware structure
+ *  @mask: Mask to specify which semaphore to acquire
+ *
+ *  Acquires the SWFW semaphore through SW_FW_SYNC register for the specified
+ *  function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync,
+			       (hw, mask), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_release_swfw_semaphore - Release SWFW semaphore
+ *  @hw: pointer to hardware structure
+ *  @mask: Mask to specify which semaphore to release
+ *
+ *  Releases the SWFW semaphore through SW_FW_SYNC register for the specified
+ *  function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask)
+{
+	if (hw->mac.ops.release_swfw_sync)
+		hw->mac.ops.release_swfw_sync(hw, mask);
+}
+
+
+void ixgbe_disable_rx(struct ixgbe_hw *hw)
+{
+	if (hw->mac.ops.disable_rx)
+		hw->mac.ops.disable_rx(hw);
+}
+
+void ixgbe_enable_rx(struct ixgbe_hw *hw)
+{
+	if (hw->mac.ops.enable_rx)
+		hw->mac.ops.enable_rx(hw);
+}
diff --git a/drivers/net/ixgbe/base/ixgbe_api.h b/drivers/net/ixgbe/base/ixgbe_api.h
new file mode 100644
index 0000000..8386e29
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_api.h
@@ -0,0 +1,206 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_API_H_
+#define _IXGBE_API_H_
+
+#include "ixgbe_type.h"
+
+void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map);
+
+s32 ixgbe_init_shared_code(struct ixgbe_hw *hw);
+
+extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
+
+s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
+s32 ixgbe_init_hw(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw(struct ixgbe_hw *hw);
+void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw);
+s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
+enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw);
+s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_get_bus_info(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter(struct ixgbe_hw *hw);
+s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num);
+s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size);
+
+s32 ixgbe_identify_phy(struct ixgbe_hw *hw);
+s32 ixgbe_reset_phy(struct ixgbe_hw *hw);
+s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+		       u16 *phy_data);
+s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+			u16 phy_data);
+
+s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
+s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw);
+s32 ixgbe_check_phy_link(struct ixgbe_hw *hw,
+			 ixgbe_link_speed *speed,
+			 bool *link_up);
+s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
+			       ixgbe_link_speed speed,
+			       bool autoneg_wait_to_complete);
+s32 ixgbe_set_phy_power(struct ixgbe_hw *, bool on);
+void ixgbe_disable_tx_laser(struct ixgbe_hw *hw);
+void ixgbe_enable_tx_laser(struct ixgbe_hw *hw);
+void ixgbe_flap_tx_laser(struct ixgbe_hw *hw);
+s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+		     bool autoneg_wait_to_complete);
+s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+		     bool *link_up, bool link_up_wait_to_complete);
+s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+				bool *autoneg);
+s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index);
+
+s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw);
+s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
+			      u16 words, u16 *data);
+s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
+			     u16 words, u16 *data);
+
+s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
+s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw);
+
+s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
+s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+		  u32 enable_addr);
+s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq);
+s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw);
+s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
+			      u32 addr_count, ixgbe_mc_addr_itr func);
+s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
+			      u32 mc_addr_count, ixgbe_mc_addr_itr func,
+			      bool clear);
+void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr_list, u32 vmdq);
+s32 ixgbe_enable_mc(struct ixgbe_hw *hw);
+s32 ixgbe_disable_mc(struct ixgbe_hw *hw);
+s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
+s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
+		   u32 vind, bool vlan_on);
+s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+		   bool vlan_on, bool *vfta_changed);
+s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
+s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
+			 u8 ver);
+s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw);
+s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw);
+void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
+s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw,
+				   u16 *firmware_version);
+s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw);
+s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data);
+u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
+s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw);
+s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw);
+s32 ixgbe_mng_fw_enabled(struct ixgbe_hw *hw);
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
+					bool cloud_mode);
+s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+					  union ixgbe_atr_hash_dword input,
+					  union ixgbe_atr_hash_dword common,
+					  u8 queue);
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+				    union ixgbe_atr_input *input_mask, bool cloud_mode);
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+					  union ixgbe_atr_input *input,
+					  u16 soft_id, u8 queue, bool cloud_mode);
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+					  union ixgbe_atr_input *input,
+					  u16 soft_id);
+s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+					union ixgbe_atr_input *input,
+					union ixgbe_atr_input *mask,
+					u16 soft_id,
+					u8 queue,
+					bool cloud_mode);
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+					  union ixgbe_atr_input *mask);
+u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+				     union ixgbe_atr_hash_dword common);
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
+s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+			u8 *data);
+s32 ixgbe_read_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val);
+s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+			 u8 data);
+s32 ixgbe_write_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val);
+s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data);
+s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps);
+s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask);
+s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+			 u16 *wwpn_prefix);
+s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs);
+s32 ixgbe_dmac_config(struct ixgbe_hw *hw);
+s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw);
+s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw);
+s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee);
+void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable,
+				      unsigned int vf);
+void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable,
+				       int vf);
+s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
+			u32 device_type, u32 *phy_data);
+s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
+			u32 device_type, u32 phy_data);
+void ixgbe_disable_mdd(struct ixgbe_hw *hw);
+void ixgbe_enable_mdd(struct ixgbe_hw *hw);
+void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap);
+void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf);
+void ixgbe_disable_rx(struct ixgbe_hw *hw);
+void ixgbe_enable_rx(struct ixgbe_hw *hw);
+
+#endif /* _IXGBE_API_H_ */
diff --git a/drivers/net/ixgbe/base/ixgbe_common.c b/drivers/net/ixgbe/base/ixgbe_common.c
new file mode 100644
index 0000000..d801de3
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -0,0 +1,4940 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+#include "ixgbe_dcb.h"
+#include "ixgbe_dcb_82599.h"
+#include "ixgbe_api.h"
+
+STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
+STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
+STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
+STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
+					u16 count);
+STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
+STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
+STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
+STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw);
+
+STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
+STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
+					 u16 *san_mac_offset);
+STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+					     u16 words, u16 *data);
+STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+					      u16 words, u16 *data);
+STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+						 u16 offset);
+
+/**
+ *  ixgbe_init_ops_generic - Inits function ptrs
+ *  @hw: pointer to the hardware structure
+ *
+ *  Initialize the function pointers.
+ **/
+s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
+{
+	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+	struct ixgbe_mac_info *mac = &hw->mac;
+	u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+	DEBUGFUNC("ixgbe_init_ops_generic");
+
+	/* EEPROM */
+	eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
+	/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
+	if (eec & IXGBE_EEC_PRES) {
+		eeprom->ops.read = ixgbe_read_eerd_generic;
+		eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
+	} else {
+		eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
+		eeprom->ops.read_buffer =
+				 ixgbe_read_eeprom_buffer_bit_bang_generic;
+	}
+	eeprom->ops.write = ixgbe_write_eeprom_generic;
+	eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
+	eeprom->ops.validate_checksum =
+				      ixgbe_validate_eeprom_checksum_generic;
+	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
+	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
+
+	/* MAC */
+	mac->ops.init_hw = ixgbe_init_hw_generic;
+	mac->ops.reset_hw = NULL;
+	mac->ops.start_hw = ixgbe_start_hw_generic;
+	mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
+	mac->ops.get_media_type = NULL;
+	mac->ops.get_supported_physical_layer = NULL;
+	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
+	mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
+	mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
+	mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
+	mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
+	mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
+	mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
+	mac->ops.prot_autoc_read = prot_autoc_read_generic;
+	mac->ops.prot_autoc_write = prot_autoc_write_generic;
+
+	/* LEDs */
+	mac->ops.led_on = ixgbe_led_on_generic;
+	mac->ops.led_off = ixgbe_led_off_generic;
+	mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
+	mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
+
+	/* RAR, Multicast, VLAN */
+	mac->ops.set_rar = ixgbe_set_rar_generic;
+	mac->ops.clear_rar = ixgbe_clear_rar_generic;
+	mac->ops.insert_mac_addr = NULL;
+	mac->ops.set_vmdq = NULL;
+	mac->ops.clear_vmdq = NULL;
+	mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
+	mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
+	mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
+	mac->ops.enable_mc = ixgbe_enable_mc_generic;
+	mac->ops.disable_mc = ixgbe_disable_mc_generic;
+	mac->ops.clear_vfta = NULL;
+	mac->ops.set_vfta = NULL;
+	mac->ops.set_vlvf = NULL;
+	mac->ops.init_uta_tables = NULL;
+	mac->ops.enable_rx = ixgbe_enable_rx_generic;
+	mac->ops.disable_rx = ixgbe_disable_rx_generic;
+
+	/* Flow Control */
+	mac->ops.fc_enable = ixgbe_fc_enable_generic;
+
+	/* Link */
+	mac->ops.get_link_capabilities = NULL;
+	mac->ops.setup_link = NULL;
+	mac->ops.check_link = NULL;
+	mac->ops.dmac_config = NULL;
+	mac->ops.dmac_update_tcs = NULL;
+	mac->ops.dmac_config_tcs = NULL;
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
+ * of flow control
+ * @hw: pointer to hardware structure
+ *
+ * This function returns true if the device supports flow control
+ * autonegotiation, and false if it does not.
+ *
+ **/
+bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+{
+	bool supported = false;
+	ixgbe_link_speed speed;
+	bool link_up;
+
+	DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
+
+	switch (hw->phy.media_type) {
+	case ixgbe_media_type_fiber_qsfp:
+	case ixgbe_media_type_fiber:
+		hw->mac.ops.check_link(hw, &speed, &link_up, false);
+		/* if link is down, assume supported */
+		if (link_up)
+			supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
+				true : false;
+		else
+			supported = true;
+		break;
+	case ixgbe_media_type_backplane:
+		supported = true;
+		break;
+	case ixgbe_media_type_copper:
+		/* only some copper devices support flow control autoneg */
+		switch (hw->device_id) {
+		case IXGBE_DEV_ID_82599_T3_LOM:
+		case IXGBE_DEV_ID_X540T:
+		case IXGBE_DEV_ID_X540T1:
+		case IXGBE_DEV_ID_X550T:
+			supported = true;
+			break;
+		default:
+			supported = false;
+		}
+	default:
+		break;
+	}
+
+	ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
+		      "Device %x does not support flow control autoneg",
+		      hw->device_id);
+	return supported;
+}
+
+/**
+ *  ixgbe_setup_fc - Set up flow control
+ *  @hw: pointer to hardware structure
+ *
+ *  Called at init time to set up flow control.
+ **/
+STATIC s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
+{
+	s32 ret_val = IXGBE_SUCCESS;
+	u32 reg = 0, reg_bp = 0;
+	u16 reg_cu = 0;
+	bool locked = false;
+
+	DEBUGFUNC("ixgbe_setup_fc");
+
+	/* Validate the requested mode */
+	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+			   "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+		goto out;
+	}
+
+	/*
+	 * 10gig parts do not have a word in the EEPROM to determine the
+	 * default flow control setting, so we explicitly set it to full.
+	 */
+	if (hw->fc.requested_mode == ixgbe_fc_default)
+		hw->fc.requested_mode = ixgbe_fc_full;
+
+	/*
+	 * Set up the 1G and 10G flow control advertisement registers so the
+	 * HW will be able to do fc autoneg once the cable is plugged in.  If
+	 * we link at 10G, the 1G advertisement is harmless and vice versa.
+	 */
+	switch (hw->phy.media_type) {
+	case ixgbe_media_type_backplane:
+		/* some MAC's need RMW protection on AUTOC */
+		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
+		if (ret_val != IXGBE_SUCCESS)
+			goto out;
+
+		/* only backplane uses autoc so fall though */
+	case ixgbe_media_type_fiber_qsfp:
+	case ixgbe_media_type_fiber:
+		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+
+		break;
+	case ixgbe_media_type_copper:
+		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
+		break;
+	default:
+		break;
+	}
+
+	/*
+	 * The possible values of fc.requested_mode are:
+	 * 0: Flow control is completely disabled
+	 * 1: Rx flow control is enabled (we can receive pause frames,
+	 *    but not send pause frames).
+	 * 2: Tx flow control is enabled (we can send pause frames but
+	 *    we do not support receiving pause frames).
+	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
+	 * other: Invalid.
+	 */
+	switch (hw->fc.requested_mode) {
+	case ixgbe_fc_none:
+		/* Flow control completely disabled by software override. */
+		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+		if (hw->phy.media_type == ixgbe_media_type_backplane)
+			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
+				    IXGBE_AUTOC_ASM_PAUSE);
+		else if (hw->phy.media_type == ixgbe_media_type_copper)
+			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
+		break;
+	case ixgbe_fc_tx_pause:
+		/*
+		 * Tx Flow control is enabled, and Rx Flow control is
+		 * disabled by software override.
+		 */
+		reg |= IXGBE_PCS1GANA_ASM_PAUSE;
+		reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
+		if (hw->phy.media_type == ixgbe_media_type_backplane) {
+			reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
+			reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
+		} else if (hw->phy.media_type == ixgbe_media_type_copper) {
+			reg_cu |= IXGBE_TAF_ASM_PAUSE;
+			reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
+		}
+		break;
+	case ixgbe_fc_rx_pause:
+		/*
+		 * Rx Flow control is enabled and Tx Flow control is
+		 * disabled by software override. Since there really
+		 * isn't a way to advertise that we are capable of RX
+		 * Pause ONLY, we will advertise that we support both
+		 * symmetric and asymmetric Rx PAUSE, as such we fall
+		 * through to the fc_full statement.  Later, we will
+		 * disable the adapter's ability to send PAUSE frames.
+		 */
+	case ixgbe_fc_full:
+		/* Flow control (both Rx and Tx) is enabled by SW override. */
+		reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
+		if (hw->phy.media_type == ixgbe_media_type_backplane)
+			reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
+				  IXGBE_AUTOC_ASM_PAUSE;
+		else if (hw->phy.media_type == ixgbe_media_type_copper)
+			reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
+		break;
+	default:
+		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
+			     "Flow control param set incorrectly\n");
+		ret_val = IXGBE_ERR_CONFIG;
+		goto out;
+		break;
+	}
+
+	if (hw->mac.type < ixgbe_mac_X540) {
+		/*
+		 * Enable auto-negotiation between the MAC & PHY;
+		 * the MAC will advertise clause 37 flow control.
+		 */
+		IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
+		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
+
+		/* Disable AN timeout */
+		if (hw->fc.strict_ieee)
+			reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
+
+		IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
+		DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+	}
+
+	/*
+	 * AUTOC restart handles negotiation of 1G and 10G on backplane
+	 * and copper. There is no need to set the PCS1GCTL register.
+	 *
+	 */
+	if (hw->phy.media_type == ixgbe_media_type_backplane) {
+		reg_bp |= IXGBE_AUTOC_AN_RESTART;
+		ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
+		if (ret_val)
+			goto out;
+	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
+		    (ixgbe_device_supports_autoneg_fc(hw))) {
+		hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
+	}
+
+	DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+out:
+	return ret_val;
+}
+
+/**
+ *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
+ *  @hw: pointer to hardware structure
+ *
+ *  Starts the hardware by filling the bus info structure and media type, clears
+ *  all on chip counters, initializes receive address registers, multicast
+ *  table, VLAN filter table, calls routine to set up link and flow control
+ *  settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
+{
+	s32 ret_val;
+	u32 ctrl_ext;
+
+	DEBUGFUNC("ixgbe_start_hw_generic");
+
+	/* Set the media type */
+	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
+
+	/* PHY ops initialization must be done in reset_hw() */
+
+	/* Clear the VLAN filter table */
+	hw->mac.ops.clear_vfta(hw);
+
+	/* Clear statistics registers */
+	hw->mac.ops.clear_hw_cntrs(hw);
+
+	/* Set No Snoop Disable */
+	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+	ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* Setup flow control */
+	ret_val = ixgbe_setup_fc(hw);
+	if (ret_val != IXGBE_SUCCESS)
+		goto out;
+
+	/* Clear adapter stopped flag */
+	hw->adapter_stopped = false;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  ixgbe_start_hw_gen2 - Init sequence for common device family
+ *  @hw: pointer to hw structure
+ *
+ * Performs the init sequence common to the second generation
+ * of 10 GbE devices.
+ * Devices in the second generation:
+ *     82599
+ *     X540
+ **/
+s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
+{
+	u32 i;
+	u32 regval;
+
+	/* Clear the rate limiters */
+	for (i = 0; i < hw->mac.max_tx_queues; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
+		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
+	}
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* Disable relaxed ordering */
+	for (i = 0; i < hw->mac.max_tx_queues; i++) {
+		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
+		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
+	}
+
+	for (i = 0; i < hw->mac.max_rx_queues; i++) {
+		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
+		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_init_hw_generic - Generic hardware initialization
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the hardware by resetting the hardware, filling the bus info
+ *  structure and media type, clears all on chip counters, initializes receive
+ *  address registers, multicast table, VLAN filter table, calls routine to set
+ *  up link and flow control settings, and leaves transmit and receive units
+ *  disabled and uninitialized
+ **/
+s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
+{
+	s32 status;
+
+	DEBUGFUNC("ixgbe_init_hw_generic");
+
+	/* Reset the hardware */
+	status = hw->mac.ops.reset_hw(hw);
+
+	if (status == IXGBE_SUCCESS) {
+		/* Start the HW */
+		status = hw->mac.ops.start_hw(hw);
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears all hardware statistics counters by reading them from the hardware
+ *  Statistics counters are clear on read.
+ **/
+s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
+{
+	u16 i = 0;
+
+	DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
+
+	IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+	IXGBE_READ_REG(hw, IXGBE_ILLERRC);
+	IXGBE_READ_REG(hw, IXGBE_ERRBC);
+	IXGBE_READ_REG(hw, IXGBE_MSPDC);
+	for (i = 0; i < 8; i++)
+		IXGBE_READ_REG(hw, IXGBE_MPC(i));
+
+	IXGBE_READ_REG(hw, IXGBE_MLFC);
+	IXGBE_READ_REG(hw, IXGBE_MRFC);
+	IXGBE_READ_REG(hw, IXGBE_RLEC);
+	IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+	IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+	if (hw->mac.type >= ixgbe_mac_82599EB) {
+		IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+		IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+	} else {
+		IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+		IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+	}
+
+	for (i = 0; i < 8; i++) {
+		IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
+		IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
+		if (hw->mac.type >= ixgbe_mac_82599EB) {
+			IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+			IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+		} else {
+			IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+			IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+		}
+	}
+	if (hw->mac.type >= ixgbe_mac_82599EB)
+		for (i = 0; i < 8; i++)
+			IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
+	IXGBE_READ_REG(hw, IXGBE_PRC64);
+	IXGBE_READ_REG(hw, IXGBE_PRC127);
+	IXGBE_READ_REG(hw, IXGBE_PRC255);
+	IXGBE_READ_REG(hw, IXGBE_PRC511);
+	IXGBE_READ_REG(hw, IXGBE_PRC1023);
+	IXGBE_READ_REG(hw, IXGBE_PRC1522);
+	IXGBE_READ_REG(hw, IXGBE_GPRC);
+	IXGBE_READ_REG(hw, IXGBE_BPRC);
+	IXGBE_READ_REG(hw, IXGBE_MPRC);
+	IXGBE_READ_REG(hw, IXGBE_GPTC);
+	IXGBE_READ_REG(hw, IXGBE_GORCL);
+	IXGBE_READ_REG(hw, IXGBE_GORCH);
+	IXGBE_READ_REG(hw, IXGBE_GOTCL);
+	IXGBE_READ_REG(hw, IXGBE_GOTCH);
+	if (hw->mac.type == ixgbe_mac_82598EB)
+		for (i = 0; i < 8; i++)
+			IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+	IXGBE_READ_REG(hw, IXGBE_RUC);
+	IXGBE_READ_REG(hw, IXGBE_RFC);
+	IXGBE_READ_REG(hw, IXGBE_ROC);
+	IXGBE_READ_REG(hw, IXGBE_RJC);
+	IXGBE_READ_REG(hw, IXGBE_MNGPRC);
+	IXGBE_READ_REG(hw, IXGBE_MNGPDC);
+	IXGBE_READ_REG(hw, IXGBE_MNGPTC);
+	IXGBE_READ_REG(hw, IXGBE_TORL);
+	IXGBE_READ_REG(hw, IXGBE_TORH);
+	IXGBE_READ_REG(hw, IXGBE_TPR);
+	IXGBE_READ_REG(hw, IXGBE_TPT);
+	IXGBE_READ_REG(hw, IXGBE_PTC64);
+	IXGBE_READ_REG(hw, IXGBE_PTC127);
+	IXGBE_READ_REG(hw, IXGBE_PTC255);
+	IXGBE_READ_REG(hw, IXGBE_PTC511);
+	IXGBE_READ_REG(hw, IXGBE_PTC1023);
+	IXGBE_READ_REG(hw, IXGBE_PTC1522);
+	IXGBE_READ_REG(hw, IXGBE_MPTC);
+	IXGBE_READ_REG(hw, IXGBE_BPTC);
+	for (i = 0; i < 16; i++) {
+		IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+		IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+		if (hw->mac.type >= ixgbe_mac_82599EB) {
+			IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
+			IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
+			IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
+			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
+			IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+		} else {
+			IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+			IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+		}
+	}
+
+	if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
+		if (hw->phy.id == 0)
+			ixgbe_identify_phy(hw);
+		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
+				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
+		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
+				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
+		hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
+				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
+		hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
+				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_read_pba_string_generic - Reads part number string from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @pba_num: stores the part number string from the EEPROM
+ *  @pba_num_size: part number string buffer length
+ *
+ *  Reads the part number string from the EEPROM.
+ **/
+s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+				  u32 pba_num_size)
+{
+	s32 ret_val;
+	u16 data;
+	u16 pba_ptr;
+	u16 offset;
+	u16 length;
+
+	DEBUGFUNC("ixgbe_read_pba_string_generic");
+
+	if (pba_num == NULL) {
+		DEBUGOUT("PBA string buffer was null\n");
+		return IXGBE_ERR_INVALID_ARGUMENT;
+	}
+
+	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
+	if (ret_val) {
+		DEBUGOUT("NVM Read Error\n");
+		return ret_val;
+	}
+
+	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
+	if (ret_val) {
+		DEBUGOUT("NVM Read Error\n");
+		return ret_val;
+	}
+
+	/*
+	 * if data is not ptr guard the PBA must be in legacy format which
+	 * means pba_ptr is actually our second data word for the PBA number
+	 * and we can decode it into an ascii string
+	 */
+	if (data != IXGBE_PBANUM_PTR_GUARD) {
+		DEBUGOUT("NVM PBA number is not stored as string\n");
+
+		/* we will need 11 characters to store the PBA */
+		if (pba_num_size < 11) {
+			DEBUGOUT("PBA string buffer too small\n");
+			return IXGBE_ERR_NO_SPACE;
+		}
+
+		/* extract hex string from data and pba_ptr */
+		pba_num[0] = (data >> 12) & 0xF;
+		pba_num[1] = (data >> 8) & 0xF;
+		pba_num[2] = (data >> 4) & 0xF;
+		pba_num[3] = data & 0xF;
+		pba_num[4] = (pba_ptr >> 12) & 0xF;
+		pba_num[5] = (pba_ptr >> 8) & 0xF;
+		pba_num[6] = '-';
+		pba_num[7] = 0;
+		pba_num[8] = (pba_ptr >> 4) & 0xF;
+		pba_num[9] = pba_ptr & 0xF;
+
+		/* put a null character on the end of our string */
+		pba_num[10] = '\0';
+
+		/* switch all the data but the '-' to hex char */
+		for (offset = 0; offset < 10; offset++) {
+			if (pba_num[offset] < 0xA)
+				pba_num[offset] += '0';
+			else if (pba_num[offset] < 0x10)
+				pba_num[offset] += 'A' - 0xA;
+		}
+
+		return IXGBE_SUCCESS;
+	}
+
+	ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
+	if (ret_val) {
+		DEBUGOUT("NVM Read Error\n");
+		return ret_val;
+	}
+
+	if (length == 0xFFFF || length == 0) {
+		DEBUGOUT("NVM PBA number section invalid length\n");
+		return IXGBE_ERR_PBA_SECTION;
+	}
+
+	/* check if pba_num buffer is big enough */
+	if (pba_num_size  < (((u32)length * 2) - 1)) {
+		DEBUGOUT("PBA string buffer too small\n");
+		return IXGBE_ERR_NO_SPACE;
+	}
+
+	/* trim pba length from start of string */
+	pba_ptr++;
+	length--;
+
+	for (offset = 0; offset < length; offset++) {
+		ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
+		if (ret_val) {
+			DEBUGOUT("NVM Read Error\n");
+			return ret_val;
+		}
+		pba_num[offset * 2] = (u8)(data >> 8);
+		pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
+	}
+	pba_num[offset * 2] = '\0';
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_read_pba_num_generic - Reads part number from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @pba_num: stores the part number from the EEPROM
+ *
+ *  Reads the part number from the EEPROM.
+ **/
+s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
+{
+	s32 ret_val;
+	u16 data;
+
+	DEBUGFUNC("ixgbe_read_pba_num_generic");
+
+	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
+	if (ret_val) {
+		DEBUGOUT("NVM Read Error\n");
+		return ret_val;
+	} else if (data == IXGBE_PBANUM_PTR_GUARD) {
+		DEBUGOUT("NVM Not supported\n");
+		return IXGBE_NOT_IMPLEMENTED;
+	}
+	*pba_num = (u32)(data << 16);
+
+	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
+	if (ret_val) {
+		DEBUGOUT("NVM Read Error\n");
+		return ret_val;
+	}
+	*pba_num |= data;
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_read_pba_raw
+ *  @hw: pointer to the HW structure
+ *  @eeprom_buf: optional pointer to EEPROM image
+ *  @eeprom_buf_size: size of EEPROM image in words
+ *  @max_pba_block_size: PBA block size limit
+ *  @pba: pointer to output PBA structure
+ *
+ *  Reads PBA from EEPROM image when eeprom_buf is not NULL.
+ *  Reads PBA from physical EEPROM device when eeprom_buf is NULL.
+ *
+ **/
+s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+		       u32 eeprom_buf_size, u16 max_pba_block_size,
+		       struct ixgbe_pba *pba)
+{
+	s32 ret_val;
+	u16 pba_block_size;
+
+	if (pba == NULL)
+		return IXGBE_ERR_PARAM;
+
+	if (eeprom_buf == NULL) {
+		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
+						     &pba->word[0]);
+		if (ret_val)
+			return ret_val;
+	} else {
+		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
+			pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
+			pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
+		} else {
+			return IXGBE_ERR_PARAM;
+		}
+	}
+
+	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
+		if (pba->pba_block == NULL)
+			return IXGBE_ERR_PARAM;
+
+		ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
+						   eeprom_buf_size,
+						   &pba_block_size);
+		if (ret_val)
+			return ret_val;
+
+		if (pba_block_size > max_pba_block_size)
+			return IXGBE_ERR_PARAM;
+
+		if (eeprom_buf == NULL) {
+			ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
+							     pba_block_size,
+							     pba->pba_block);
+			if (ret_val)
+				return ret_val;
+		} else {
+			if (eeprom_buf_size > (u32)(pba->word[1] +
+					      pba_block_size)) {
+				memcpy(pba->pba_block,
+				       &eeprom_buf[pba->word[1]],
+				       pba_block_size * sizeof(u16));
+			} else {
+				return IXGBE_ERR_PARAM;
+			}
+		}
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_write_pba_raw
+ *  @hw: pointer to the HW structure
+ *  @eeprom_buf: optional pointer to EEPROM image
+ *  @eeprom_buf_size: size of EEPROM image in words
+ *  @pba: pointer to PBA structure
+ *
+ *  Writes PBA to EEPROM image when eeprom_buf is not NULL.
+ *  Writes PBA to physical EEPROM device when eeprom_buf is NULL.
+ *
+ **/
+s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+			u32 eeprom_buf_size, struct ixgbe_pba *pba)
+{
+	s32 ret_val;
+
+	if (pba == NULL)
+		return IXGBE_ERR_PARAM;
+
+	if (eeprom_buf == NULL) {
+		ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
+						      &pba->word[0]);
+		if (ret_val)
+			return ret_val;
+	} else {
+		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
+			eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
+			eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
+		} else {
+			return IXGBE_ERR_PARAM;
+		}
+	}
+
+	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
+		if (pba->pba_block == NULL)
+			return IXGBE_ERR_PARAM;
+
+		if (eeprom_buf == NULL) {
+			ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
+							      pba->pba_block[0],
+							      pba->pba_block);
+			if (ret_val)
+				return ret_val;
+		} else {
+			if (eeprom_buf_size > (u32)(pba->word[1] +
+					      pba->pba_block[0])) {
+				memcpy(&eeprom_buf[pba->word[1]],
+				       pba->pba_block,
+				       pba->pba_block[0] * sizeof(u16));
+			} else {
+				return IXGBE_ERR_PARAM;
+			}
+		}
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_get_pba_block_size
+ *  @hw: pointer to the HW structure
+ *  @eeprom_buf: optional pointer to EEPROM image
+ *  @eeprom_buf_size: size of EEPROM image in words
+ *  @pba_data_size: pointer to output variable
+ *
+ *  Returns the size of the PBA block in words. Function operates on EEPROM
+ *  image if the eeprom_buf pointer is not NULL otherwise it accesses physical
+ *  EEPROM device.
+ *
+ **/
+s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
+			     u32 eeprom_buf_size, u16 *pba_block_size)
+{
+	s32 ret_val;
+	u16 pba_word[2];
+	u16 length;
+
+	DEBUGFUNC("ixgbe_get_pba_block_size");
+
+	if (eeprom_buf == NULL) {
+		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
+						     &pba_word[0]);
+		if (ret_val)
+			return ret_val;
+	} else {
+		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
+			pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
+			pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
+		} else {
+			return IXGBE_ERR_PARAM;
+		}
+	}
+
+	if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
+		if (eeprom_buf == NULL) {
+			ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
+						      &length);
+			if (ret_val)
+				return ret_val;
+		} else {
+			if (eeprom_buf_size > pba_word[1])
+				length = eeprom_buf[pba_word[1] + 0];
+			else
+				return IXGBE_ERR_PARAM;
+		}
+
+		if (length == 0xFFFF || length == 0)
+			return IXGBE_ERR_PBA_SECTION;
+	} else {
+		/* PBA number in legacy format, there is no PBA Block. */
+		length = 0;
+	}
+
+	if (pba_block_size != NULL)
+		*pba_block_size = length;
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_get_mac_addr_generic - Generic get MAC address
+ *  @hw: pointer to hardware structure
+ *  @mac_addr: Adapter MAC address
+ *
+ *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
+ *  A reset of the adapter must be performed prior to calling this function
+ *  in order for the MAC address to have been loaded from the EEPROM into RAR0
+ **/
+s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+	u32 rar_high;
+	u32 rar_low;
+	u16 i;
+
+	DEBUGFUNC("ixgbe_get_mac_addr_generic");
+
+	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
+	rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
+
+	for (i = 0; i < 4; i++)
+		mac_addr[i] = (u8)(rar_low >> (i*8));
+
+	for (i = 0; i < 2; i++)
+		mac_addr[i+4] = (u8)(rar_high >> (i*8));
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_set_pci_config_data_generic - Generic store PCI bus info
+ *  @hw: pointer to hardware structure
+ *  @link_status: the link status returned by the PCI config space
+ *
+ *  Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
+ **/
+void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
+{
+	struct ixgbe_mac_info *mac = &hw->mac;
+
+	if (hw->bus.type == ixgbe_bus_type_unknown)
+		hw->bus.type = ixgbe_bus_type_pci_express;
+
+	switch (link_status & IXGBE_PCI_LINK_WIDTH) {
+	case IXGBE_PCI_LINK_WIDTH_1:
+		hw->bus.width = ixgbe_bus_width_pcie_x1;
+		break;
+	case IXGBE_PCI_LINK_WIDTH_2:
+		hw->bus.width = ixgbe_bus_width_pcie_x2;
+		break;
+	case IXGBE_PCI_LINK_WIDTH_4:
+		hw->bus.width = ixgbe_bus_width_pcie_x4;
+		break;
+	case IXGBE_PCI_LINK_WIDTH_8:
+		hw->bus.width = ixgbe_bus_width_pcie_x8;
+		break;
+	default:
+		hw->bus.width = ixgbe_bus_width_unknown;
+		break;
+	}
+
+	switch (link_status & IXGBE_PCI_LINK_SPEED) {
+	case IXGBE_PCI_LINK_SPEED_2500:
+		hw->bus.speed = ixgbe_bus_speed_2500;
+		break;
+	case IXGBE_PCI_LINK_SPEED_5000:
+		hw->bus.speed = ixgbe_bus_speed_5000;
+		break;
+	case IXGBE_PCI_LINK_SPEED_8000:
+		hw->bus.speed = ixgbe_bus_speed_8000;
+		break;
+	default:
+		hw->bus.speed = ixgbe_bus_speed_unknown;
+		break;
+	}
+
+	mac->ops.set_lan_id(hw);
+}
+
+/**
+ *  ixgbe_get_bus_info_generic - Generic set PCI bus info
+ *  @hw: pointer to hardware structure
+ *
+ *  Gets the PCI bus info (speed, width, type) then calls helper function to
+ *  store this data within the ixgbe_hw structure.
+ **/
+s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
+{
+	u16 link_status;
+
+	DEBUGFUNC("ixgbe_get_bus_info_generic");
+
+	/* Get the negotiated link width and speed from PCI config space */
+	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
+
+	ixgbe_set_pci_config_data_generic(hw, link_status);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines the LAN function id by reading memory-mapped registers
+ *  and swaps the port value if requested.
+ **/
+void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
+{
+	struct ixgbe_bus_info *bus = &hw->bus;
+	u32 reg;
+
+	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
+
+	reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
+	bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
+	bus->lan_id = bus->func;
+
+	/* check for a port swap */
+	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+	if (reg & IXGBE_FACTPS_LFS)
+		bus->func ^= 0x1;
+}
+
+/**
+ *  ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ *  disables transmit and receive units. The adapter_stopped flag is used by
+ *  the shared code and drivers to determine if the adapter is in a stopped
+ *  state and should not touch the hardware.
+ **/
+s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
+{
+	u32 reg_val;
+	u16 i;
+
+	DEBUGFUNC("ixgbe_stop_adapter_generic");
+
+	/*
+	 * Set the adapter_stopped flag so other driver functions stop touching
+	 * the hardware
+	 */
+	hw->adapter_stopped = true;
+
+	/* Disable the receive unit */
+	ixgbe_disable_rx(hw);
+
+	/* Clear interrupt mask to stop interrupts from being generated */
+	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
+
+	/* Clear any pending interrupts, flush previous writes */
+	IXGBE_READ_REG(hw, IXGBE_EICR);
+
+	/* Disable the transmit unit.  Each queue must be disabled. */
+	for (i = 0; i < hw->mac.max_tx_queues; i++)
+		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
+
+	/* Disable the receive unit by stopping each queue */
+	for (i = 0; i < hw->mac.max_rx_queues; i++) {
+		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+		reg_val &= ~IXGBE_RXDCTL_ENABLE;
+		reg_val |= IXGBE_RXDCTL_SWFLSH;
+		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
+	}
+
+	/* flush all queues disables */
+	IXGBE_WRITE_FLUSH(hw);
+	msec_delay(2);
+
+	/*
+	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
+	 * access and verify no pending requests
+	 */
+	return ixgbe_disable_pcie_master(hw);
+}
+
+/**
+ *  ixgbe_led_on_generic - Turns on the software controllable LEDs.
+ *  @hw: pointer to hardware structure
+ *  @index: led number to turn on
+ **/
+s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
+{
+	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+	DEBUGFUNC("ixgbe_led_on_generic");
+
+	/* To turn on the LED, set mode to ON. */
+	led_reg &= ~IXGBE_LED_MODE_MASK(index);
+	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
+	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_led_off_generic - Turns off the software controllable LEDs.
+ *  @hw: pointer to hardware structure
+ *  @index: led number to turn off
+ **/
+s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
+{
+	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+	DEBUGFUNC("ixgbe_led_off_generic");
+
+	/* To turn off the LED, set mode to OFF. */
+	led_reg &= ~IXGBE_LED_MODE_MASK(index);
+	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
+	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_init_eeprom_params_generic - Initialize EEPROM params
+ *  @hw: pointer to hardware structure
+ *
+ *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ *  ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
+{
+	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+	u32 eec;
+	u16 eeprom_size;
+
+	DEBUGFUNC("ixgbe_init_eeprom_params_generic");
+
+	if (eeprom->type == ixgbe_eeprom_uninitialized) {
+		eeprom->type = ixgbe_eeprom_none;
+		/* Set default semaphore delay to 10ms which is a well
+		 * tested value */
+		eeprom->semaphore_delay = 10;
+		/* Clear EEPROM page size, it will be initialized as needed */
+		eeprom->word_page_size = 0;
+
+		/*
+		 * Check for EEPROM present first.
+		 * If not present leave as none
+		 */
+		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+		if (eec & IXGBE_EEC_PRES) {
+			eeprom->type = ixgbe_eeprom_spi;
+
+			/*
+			 * SPI EEPROM is assumed here.  This code would need to
+			 * change if a future EEPROM is not SPI.
+			 */
+			eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+					    IXGBE_EEC_SIZE_SHIFT);
+			eeprom->word_size = 1 << (eeprom_size +
+					     IXGBE_EEPROM_WORD_SIZE_SHIFT);
+		}
+
+		if (eec & IXGBE_EEC_ADDR_SIZE)
+			eeprom->address_bits = 16;
+		else
+			eeprom->address_bits = 8;
+		DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
+			  "%d\n", eeprom->type, eeprom->word_size,
+			  eeprom->address_bits);
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to write
+ *  @words: number of word(s)
+ *  @data: 16 bit word(s) to write to EEPROM
+ *
+ *  Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+					       u16 words, u16 *data)
+{
+	s32 status = IXGBE_SUCCESS;
+	u16 i, count;
+
+	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
+
+	hw->eeprom.ops.init_params(hw);
+
+	if (words == 0) {
+		status = IXGBE_ERR_INVALID_ARGUMENT;
+		goto out;
+	}
+
+	if (offset + words > hw->eeprom.word_size) {
+		status = IXGBE_ERR_EEPROM;
+		goto out;
+	}
+
+	/*
+	 * The EEPROM page size cannot be queried from the chip. We do lazy
+	 * initialization. It is worth to do that when we write large buffer.
+	 */
+	if ((hw->eeprom.word_page_size == 0) &&
+	    (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
+		ixgbe_detect_eeprom_page_size_generic(hw, offset);
+
+	/*
+	 * We cannot hold synchronization semaphores for too long
+	 * to avoid other entity starvation. However it is more efficient
+	 * to read in bursts than synchronizing access for each word.
+	 */
+	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
+		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
+			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
+		status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
+							    count, &data[i]);
+
+		if (status != IXGBE_SUCCESS)
+			break;
+	}
+
+out:
+	return status;
+}
+
+/**
+ *  ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of word(s)
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  If ixgbe_eeprom_update_checksum is not called after this function, the
+ *  EEPROM will most likely contain an invalid checksum.
+ **/
+STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+					      u16 words, u16 *data)
+{
+	s32 status;
+	u16 word;
+	u16 page_size;
+	u16 i;
+	u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
+
+	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
+
+	/* Prepare the EEPROM for writing  */
+	status = ixgbe_acquire_eeprom(hw);
+
+	if (status == IXGBE_SUCCESS) {
+		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
+			ixgbe_release_eeprom(hw);
+			status = IXGBE_ERR_EEPROM;
+		}
+	}
+
+	if (status == IXGBE_SUCCESS) {
+		for (i = 0; i < words; i++) {
+			ixgbe_standby_eeprom(hw);
+
+			/*  Send the WRITE ENABLE command (8 bit opcode )  */
+			ixgbe_shift_out_eeprom_bits(hw,
+						   IXGBE_EEPROM_WREN_OPCODE_SPI,
+						   IXGBE_EEPROM_OPCODE_BITS);
+
+			ixgbe_standby_eeprom(hw);
+
+			/*
+			 * Some SPI eeproms use the 8th address bit embedded
+			 * in the opcode
+			 */
+			if ((hw->eeprom.address_bits == 8) &&
+			    ((offset + i) >= 128))
+				write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+			/* Send the Write command (8-bit opcode + addr) */
+			ixgbe_shift_out_eeprom_bits(hw, write_opcode,
+						    IXGBE_EEPROM_OPCODE_BITS);
+			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
+						    hw->eeprom.address_bits);
+
+			page_size = hw->eeprom.word_page_size;
+
+			/* Send the data in burst via SPI*/
+			do {
+				word = data[i];
+				word = (word >> 8) | (word << 8);
+				ixgbe_shift_out_eeprom_bits(hw, word, 16);
+
+				if (page_size == 0)
+					break;
+
+				/* do not wrap around page */
+				if (((offset + i) & (page_size - 1)) ==
+				    (page_size - 1))
+					break;
+			} while (++i < words);
+
+			ixgbe_standby_eeprom(hw);
+			msec_delay(10);
+		}
+		/* Done with writing - release the EEPROM */
+		ixgbe_release_eeprom(hw);
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @data: 16 bit word to be written to the EEPROM
+ *
+ *  If ixgbe_eeprom_update_checksum is not called after this function, the
+ *  EEPROM will most likely contain an invalid checksum.
+ **/
+s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+	s32 status;
+
+	DEBUGFUNC("ixgbe_write_eeprom_generic");
+
+	hw->eeprom.ops.init_params(hw);
+
+	if (offset >= hw->eeprom.word_size) {
+		status = IXGBE_ERR_EEPROM;
+		goto out;
+	}
+
+	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
+
+out:
+	return status;
+}
+
+/**
+ *  ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be read
+ *  @data: read 16 bit words(s) from EEPROM
+ *  @words: number of word(s)
+ *
+ *  Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+					      u16 words, u16 *data)
+{
+	s32 status = IXGBE_SUCCESS;
+	u16 i, count;
+
+	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
+
+	hw->eeprom.ops.init_params(hw);
+
+	if (words == 0) {
+		status = IXGBE_ERR_INVALID_ARGUMENT;
+		goto out;
+	}
+
+	if (offset + words > hw->eeprom.word_size) {
+		status = IXGBE_ERR_EEPROM;
+		goto out;
+	}
+
+	/*
+	 * We cannot hold synchronization semaphores for too long
+	 * to avoid other entity starvation. However it is more efficient
+	 * to read in bursts than synchronizing access for each word.
+	 */
+	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
+		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
+			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
+
+		status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
+							   count, &data[i]);
+
+		if (status != IXGBE_SUCCESS)
+			break;
+	}
+
+out:
+	return status;
+}
+
+/**
+ *  ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be read
+ *  @words: number of word(s)
+ *  @data: read 16 bit word(s) from EEPROM
+ *
+ *  Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+					     u16 words, u16 *data)
+{
+	s32 status;
+	u16 word_in;
+	u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
+	u16 i;
+
+	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
+
+	/* Prepare the EEPROM for reading  */
+	status = ixgbe_acquire_eeprom(hw);
+
+	if (status == IXGBE_SUCCESS) {
+		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
+			ixgbe_release_eeprom(hw);
+			status = IXGBE_ERR_EEPROM;
+		}
+	}
+
+	if (status == IXGBE_SUCCESS) {
+		for (i = 0; i < words; i++) {
+			ixgbe_standby_eeprom(hw);
+			/*
+			 * Some SPI eeproms use the 8th address bit embedded
+			 * in the opcode
+			 */
+			if ((hw->eeprom.address_bits == 8) &&
+			    ((offset + i) >= 128))
+				read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+			/* Send the READ command (opcode + addr) */
+			ixgbe_shift_out_eeprom_bits(hw, read_opcode,
+						    IXGBE_EEPROM_OPCODE_BITS);
+			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
+						    hw->eeprom.address_bits);
+
+			/* Read the data. */
+			word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
+			data[i] = (word_in >> 8) | (word_in << 8);
+		}
+
+		/* End this read operation */
+		ixgbe_release_eeprom(hw);
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be read
+ *  @data: read 16 bit value from EEPROM
+ *
+ *  Reads 16 bit value from EEPROM through bit-bang method
+ **/
+s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+				       u16 *data)
+{
+	s32 status;
+
+	DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
+
+	hw->eeprom.ops.init_params(hw);
+
+	if (offset >= hw->eeprom.word_size) {
+		status = IXGBE_ERR_EEPROM;
+		goto out;
+	}
+
+	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
+
+out:
+	return status;
+}
+
+/**
+ *  ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of word(s)
+ *  @data: 16 bit word(s) from the EEPROM
+ *
+ *  Reads a 16 bit word(s) from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+				   u16 words, u16 *data)
+{
+	u32 eerd;
+	s32 status = IXGBE_SUCCESS;
+	u32 i;
+
+	DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
+
+	hw->eeprom.ops.init_params(hw);
+
+	if (words == 0) {
+		status = IXGBE_ERR_INVALID_ARGUMENT;
+		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
+		goto out;
+	}
+
+	if (offset >= hw->eeprom.word_size) {
+		status = IXGBE_ERR_EEPROM;
+		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
+		goto out;
+	}
+
+	for (i = 0; i < words; i++) {
+		eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+		       IXGBE_EEPROM_RW_REG_START;
+
+		IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
+		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
+
+		if (status == IXGBE_SUCCESS) {
+			data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
+				   IXGBE_EEPROM_RW_REG_DATA);
+		} else {
+			DEBUGOUT("Eeprom read timed out\n");
+			goto out;
+		}
+	}
+out:
+	return status;
+}
+
+/**
+ *  ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be used as a scratch pad
+ *
+ *  Discover EEPROM page size by writing marching data at given offset.
+ *  This function is called only when we are writing a new large buffer
+ *  at given offset so the data would be overwritten anyway.
+ **/
+STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+						 u16 offset)
+{
+	u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
+	s32 status = IXGBE_SUCCESS;
+	u16 i;
+
+	DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
+
+	for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
+		data[i] = i;
+
+	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
+	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
+					     IXGBE_EEPROM_PAGE_SIZE_MAX, data);
+	hw->eeprom.word_page_size = 0;
+	if (status != IXGBE_SUCCESS)
+		goto out;
+
+	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
+	if (status != IXGBE_SUCCESS)
+		goto out;
+
+	/*
+	 * When writing in burst more than the actual page size
+	 * EEPROM address wraps around current page.
+	 */
+	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
+
+	DEBUGOUT1("Detected EEPROM page size = %d words.",
+		  hw->eeprom.word_page_size);
+out:
+	return status;
+}
+
+/**
+ *  ixgbe_read_eerd_generic - Read EEPROM word using EERD
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+	return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
+}
+
+/**
+ *  ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to write
+ *  @words: number of word(s)
+ *  @data: word(s) write to the EEPROM
+ *
+ *  Write a 16 bit word(s) to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+				    u16 words, u16 *data)
+{
+	u32 eewr;
+	s32 status = IXGBE_SUCCESS;
+	u16 i;
+
+	DEBUGFUNC("ixgbe_write_eewr_generic");
+
+	hw->eeprom.ops.init_params(hw);
+
+	if (words == 0) {
+		status = IXGBE_ERR_INVALID_ARGUMENT;
+		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
+		goto out;
+	}
+
+	if (offset >= hw->eeprom.word_size) {
+		status = IXGBE_ERR_EEPROM;
+		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
+		goto out;
+	}
+
+	for (i = 0; i < words; i++) {
+		eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+			(data[i] << IXGBE_EEPROM_RW_REG_DATA) |
+			IXGBE_EEPROM_RW_REG_START;
+
+		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+		if (status != IXGBE_SUCCESS) {
+			DEBUGOUT("Eeprom write EEWR timed out\n");
+			goto out;
+		}
+
+		IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
+
+		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+		if (status != IXGBE_SUCCESS) {
+			DEBUGOUT("Eeprom write EEWR timed out\n");
+			goto out;
+		}
+	}
+
+out:
+	return status;
+}
+
+/**
+ *  ixgbe_write_eewr_generic - Write EEPROM word using EEWR
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to write
+ *  @data: word write to the EEPROM
+ *
+ *  Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+	return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
+}
+
+/**
+ *  ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
+ *  @hw: pointer to hardware structure
+ *  @ee_reg: EEPROM flag for polling
+ *
+ *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the
+ *  read or write is done respectively.
+ **/
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
+{
+	u32 i;
+	u32 reg;
+	s32 status = IXGBE_ERR_EEPROM;
+
+	DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
+
+	for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
+		if (ee_reg == IXGBE_NVM_POLL_READ)
+			reg = IXGBE_READ_REG(hw, IXGBE_EERD);
+		else
+			reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
+
+		if (reg & IXGBE_EEPROM_RW_REG_DONE) {
+			status = IXGBE_SUCCESS;
+			break;
+		}
+		usec_delay(5);
+	}
+
+	if (i == IXGBE_EERD_EEWR_ATTEMPTS)
+		ERROR_REPORT1(IXGBE_ERROR_POLLING,
+			     "EEPROM read/write done polling timed out");
+
+	return status;
+}
+
+/**
+ *  ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
+ *  @hw: pointer to hardware structure
+ *
+ *  Prepares EEPROM for access using bit-bang method. This function should
+ *  be called before issuing a command to the EEPROM.
+ **/
+STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_SUCCESS;
+	u32 eec;
+	u32 i;
+
+	DEBUGFUNC("ixgbe_acquire_eeprom");
+
+	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
+	    != IXGBE_SUCCESS)
+		status = IXGBE_ERR_SWFW_SYNC;
+
+	if (status == IXGBE_SUCCESS) {
+		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+		/* Request EEPROM Access */
+		eec |= IXGBE_EEC_REQ;
+		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+
+		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
+			eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+			if (eec & IXGBE_EEC_GNT)
+				break;
+			usec_delay(5);
+		}
+
+		/* Release if grant not acquired */
+		if (!(eec & IXGBE_EEC_GNT)) {
+			eec &= ~IXGBE_EEC_REQ;
+			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+			DEBUGOUT("Could not acquire EEPROM grant\n");
+
+			hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+			status = IXGBE_ERR_EEPROM;
+		}
+
+		/* Setup EEPROM for Read/Write */
+		if (status == IXGBE_SUCCESS) {
+			/* Clear CS and SK */
+			eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
+			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+			IXGBE_WRITE_FLUSH(hw);
+			usec_delay(1);
+		}
+	}
+	return status;
+}
+
+/**
+ *  ixgbe_get_eeprom_semaphore - Get hardware semaphore
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method
+ **/
+STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_ERR_EEPROM;
+	u32 timeout = 2000;
+	u32 i;
+	u32 swsm;
+
+	DEBUGFUNC("ixgbe_get_eeprom_semaphore");
+
+
+	/* Get SMBI software semaphore between device drivers first */
+	for (i = 0; i < timeout; i++) {
+		/*
+		 * If the SMBI bit is 0 when we read it, then the bit will be
+		 * set and we have the semaphore
+		 */
+		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+		if (!(swsm & IXGBE_SWSM_SMBI)) {
+			status = IXGBE_SUCCESS;
+			break;
+		}
+		usec_delay(50);
+	}
+
+	if (i == timeout) {
+		DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
+			 "not granted.\n");
+		/*
+		 * this release is particularly important because our attempts
+		 * above to get the semaphore may have succeeded, and if there
+		 * was a timeout, we should unconditionally clear the semaphore
+		 * bits to free the driver to make progress
+		 */
+		ixgbe_release_eeprom_semaphore(hw);
+
+		usec_delay(50);
+		/*
+		 * one last try
+		 * If the SMBI bit is 0 when we read it, then the bit will be
+		 * set and we have the semaphore
+		 */
+		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+		if (!(swsm & IXGBE_SWSM_SMBI))
+			status = IXGBE_SUCCESS;
+	}
+
+	/* Now get the semaphore between SW/FW through the SWESMBI bit */
+	if (status == IXGBE_SUCCESS) {
+		for (i = 0; i < timeout; i++) {
+			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+
+			/* Set the SW EEPROM semaphore bit to request access */
+			swsm |= IXGBE_SWSM_SWESMBI;
+			IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+
+			/*
+			 * If we set the bit successfully then we got the
+			 * semaphore.
+			 */
+			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+			if (swsm & IXGBE_SWSM_SWESMBI)
+				break;
+
+			usec_delay(50);
+		}
+
+		/*
+		 * Release semaphores and return error if SW EEPROM semaphore
+		 * was not granted because we don't have access to the EEPROM
+		 */
+		if (i >= timeout) {
+			ERROR_REPORT1(IXGBE_ERROR_POLLING,
+			    "SWESMBI Software EEPROM semaphore not granted.\n");
+			ixgbe_release_eeprom_semaphore(hw);
+			status = IXGBE_ERR_EEPROM;
+		}
+	} else {
+		ERROR_REPORT1(IXGBE_ERROR_POLLING,
+			     "Software semaphore SMBI between device drivers "
+			     "not granted.\n");
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_release_eeprom_semaphore - Release hardware semaphore
+ *  @hw: pointer to hardware structure
+ *
+ *  This function clears hardware semaphore bits.
+ **/
+STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
+{
+	u32 swsm;
+
+	DEBUGFUNC("ixgbe_release_eeprom_semaphore");
+
+	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+
+	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
+	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
+	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+	IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ *  ixgbe_ready_eeprom - Polls for EEPROM ready
+ *  @hw: pointer to hardware structure
+ **/
+STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_SUCCESS;
+	u16 i;
+	u8 spi_stat_reg;
+
+	DEBUGFUNC("ixgbe_ready_eeprom");
+
+	/*
+	 * Read "Status Register" repeatedly until the LSB is cleared.  The
+	 * EEPROM will signal that the command has been completed by clearing
+	 * bit 0 of the internal status register.  If it's not cleared within
+	 * 5 milliseconds, then error out.
+	 */
+	for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
+		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
+					    IXGBE_EEPROM_OPCODE_BITS);
+		spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
+		if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
+			break;
+
+		usec_delay(5);
+		ixgbe_standby_eeprom(hw);
+	};
+
+	/*
+	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
+	 * devices (and only 0-5mSec on 5V devices)
+	 */
+	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
+		DEBUGOUT("SPI EEPROM Status error\n");
+		status = IXGBE_ERR_EEPROM;
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
+ *  @hw: pointer to hardware structure
+ **/
+STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
+{
+	u32 eec;
+
+	DEBUGFUNC("ixgbe_standby_eeprom");
+
+	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+	/* Toggle CS to flush commands */
+	eec |= IXGBE_EEC_CS;
+	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+	IXGBE_WRITE_FLUSH(hw);
+	usec_delay(1);
+	eec &= ~IXGBE_EEC_CS;
+	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+	IXGBE_WRITE_FLUSH(hw);
+	usec_delay(1);
+}
+
+/**
+ *  ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
+ *  @hw: pointer to hardware structure
+ *  @data: data to send to the EEPROM
+ *  @count: number of bits to shift out
+ **/
+STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
+					u16 count)
+{
+	u32 eec;
+	u32 mask;
+	u32 i;
+
+	DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
+
+	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+	/*
+	 * Mask is used to shift "count" bits of "data" out to the EEPROM
+	 * one bit at a time.  Determine the starting bit based on count
+	 */
+	mask = 0x01 << (count - 1);
+
+	for (i = 0; i < count; i++) {
+		/*
+		 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
+		 * "1", and then raising and then lowering the clock (the SK
+		 * bit controls the clock input to the EEPROM).  A "0" is
+		 * shifted out to the EEPROM by setting "DI" to "0" and then
+		 * raising and then lowering the clock.
+		 */
+		if (data & mask)
+			eec |= IXGBE_EEC_DI;
+		else
+			eec &= ~IXGBE_EEC_DI;
+
+		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+		IXGBE_WRITE_FLUSH(hw);
+
+		usec_delay(1);
+
+		ixgbe_raise_eeprom_clk(hw, &eec);
+		ixgbe_lower_eeprom_clk(hw, &eec);
+
+		/*
+		 * Shift mask to signify next bit of data to shift in to the
+		 * EEPROM
+		 */
+		mask = mask >> 1;
+	};
+
+	/* We leave the "DI" bit set to "0" when we leave this routine. */
+	eec &= ~IXGBE_EEC_DI;
+	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+	IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
+ *  @hw: pointer to hardware structure
+ **/
+STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
+{
+	u32 eec;
+	u32 i;
+	u16 data = 0;
+
+	DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
+
+	/*
+	 * In order to read a register from the EEPROM, we need to shift
+	 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
+	 * the clock input to the EEPROM (setting the SK bit), and then reading
+	 * the value of the "DO" bit.  During this "shifting in" process the
+	 * "DI" bit should always be clear.
+	 */
+	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
+
+	for (i = 0; i < count; i++) {
+		data = data << 1;
+		ixgbe_raise_eeprom_clk(hw, &eec);
+
+		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+		eec &= ~(IXGBE_EEC_DI);
+		if (eec & IXGBE_EEC_DO)
+			data |= 1;
+
+		ixgbe_lower_eeprom_clk(hw, &eec);
+	}
+
+	return data;
+}
+
+/**
+ *  ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
+ *  @hw: pointer to hardware structure
+ *  @eec: EEC register's current value
+ **/
+STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
+{
+	DEBUGFUNC("ixgbe_raise_eeprom_clk");
+
+	/*
+	 * Raise the clock input to the EEPROM
+	 * (setting the SK bit), then delay
+	 */
+	*eec = *eec | IXGBE_EEC_SK;
+	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+	IXGBE_WRITE_FLUSH(hw);
+	usec_delay(1);
+}
+
+/**
+ *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
+ *  @hw: pointer to hardware structure
+ *  @eecd: EECD's current value
+ **/
+STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
+{
+	DEBUGFUNC("ixgbe_lower_eeprom_clk");
+
+	/*
+	 * Lower the clock input to the EEPROM (clearing the SK bit), then
+	 * delay
+	 */
+	*eec = *eec & ~IXGBE_EEC_SK;
+	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+	IXGBE_WRITE_FLUSH(hw);
+	usec_delay(1);
+}
+
+/**
+ *  ixgbe_release_eeprom - Release EEPROM, release semaphores
+ *  @hw: pointer to hardware structure
+ **/
+STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw)
+{
+	u32 eec;
+
+	DEBUGFUNC("ixgbe_release_eeprom");
+
+	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+	eec |= IXGBE_EEC_CS;  /* Pull CS high */
+	eec &= ~IXGBE_EEC_SK; /* Lower SCK */
+
+	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+	IXGBE_WRITE_FLUSH(hw);
+
+	usec_delay(1);
+
+	/* Stop requesting EEPROM access */
+	eec &= ~IXGBE_EEC_REQ;
+	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+
+	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+	/* Delay before attempt to obtain semaphore again to allow FW access */
+	msec_delay(hw->eeprom.semaphore_delay);
+}
+
+/**
+ *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns a negative error code on error, or the 16-bit checksum
+ **/
+s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
+{
+	u16 i;
+	u16 j;
+	u16 checksum = 0;
+	u16 length = 0;
+	u16 pointer = 0;
+	u16 word = 0;
+
+	DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
+
+	/* Include 0x0-0x3F in the checksum */
+	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
+		if (hw->eeprom.ops.read(hw, i, &word)) {
+			DEBUGOUT("EEPROM read failed\n");
+			return IXGBE_ERR_EEPROM;
+		}
+		checksum += word;
+	}
+
+	/* Include all data from pointers except for the fw pointer */
+	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+		if (hw->eeprom.ops.read(hw, i, &pointer)) {
+			DEBUGOUT("EEPROM read failed\n");
+			return IXGBE_ERR_EEPROM;
+		}
+
+		/* If the pointer seems invalid */
+		if (pointer == 0xFFFF || pointer == 0)
+			continue;
+
+		if (hw->eeprom.ops.read(hw, pointer, &length)) {
+			DEBUGOUT("EEPROM read failed\n");
+			return IXGBE_ERR_EEPROM;
+		}
+
+		if (length == 0xFFFF || length == 0)
+			continue;
+
+		for (j = pointer + 1; j <= pointer + length; j++) {
+			if (hw->eeprom.ops.read(hw, j, &word)) {
+				DEBUGOUT("EEPROM read failed\n");
+				return IXGBE_ERR_EEPROM;
+			}
+			checksum += word;
+		}
+	}
+
+	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+	return (s32)checksum;
+}
+
+/**
+ *  ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
+ *  @hw: pointer to hardware structure
+ *  @checksum_val: calculated checksum
+ *
+ *  Performs checksum calculation and validates the EEPROM checksum.  If the
+ *  caller does not need checksum_val, the value can be NULL.
+ **/
+s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+					   u16 *checksum_val)
+{
+	s32 status;
+	u16 checksum;
+	u16 read_checksum = 0;
+
+	DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
+
+	/* Read the first word from the EEPROM. If this times out or fails, do
+	 * not continue or we could be in for a very long wait while every
+	 * EEPROM read fails
+	 */
+	status = hw->eeprom.ops.read(hw, 0, &checksum);
+	if (status) {
+		DEBUGOUT("EEPROM read failed\n");
+		return status;
+	}
+
+	status = hw->eeprom.ops.calc_checksum(hw);
+	if (status < 0)
+		return status;
+
+	checksum = (u16)(status & 0xffff);
+
+	status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
+	if (status) {
+		DEBUGOUT("EEPROM read failed\n");
+		return status;
+	}
+
+	/* Verify read checksum from EEPROM is the same as
+	 * calculated checksum
+	 */
+	if (read_checksum != checksum)
+		status = IXGBE_ERR_EEPROM_CHECKSUM;
+
+	/* If the user cares, return the calculated checksum */
+	if (checksum_val)
+		*checksum_val = checksum;
+
+	return status;
+}
+
+/**
+ *  ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
+ *  @hw: pointer to hardware structure
+ **/
+s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
+{
+	s32 status;
+	u16 checksum;
+
+	DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
+
+	/* Read the first word from the EEPROM. If this times out or fails, do
+	 * not continue or we could be in for a very long wait while every
+	 * EEPROM read fails
+	 */
+	status = hw->eeprom.ops.read(hw, 0, &checksum);
+	if (status) {
+		DEBUGOUT("EEPROM read failed\n");
+		return status;
+	}
+
+	status = hw->eeprom.ops.calc_checksum(hw);
+	if (status < 0)
+		return status;
+
+	checksum = (u16)(status & 0xffff);
+
+	status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
+
+	return status;
+}
+
+/**
+ *  ixgbe_validate_mac_addr - Validate MAC address
+ *  @mac_addr: pointer to MAC address.
+ *
+ *  Tests a MAC address to ensure it is a valid Individual Address
+ **/
+s32 ixgbe_validate_mac_addr(u8 *mac_addr)
+{
+	s32 status = IXGBE_SUCCESS;
+
+	DEBUGFUNC("ixgbe_validate_mac_addr");
+
+	/* Make sure it is not a multicast address */
+	if (IXGBE_IS_MULTICAST(mac_addr)) {
+		DEBUGOUT("MAC address is multicast\n");
+		status = IXGBE_ERR_INVALID_MAC_ADDR;
+	/* Not a broadcast address */
+	} else if (IXGBE_IS_BROADCAST(mac_addr)) {
+		DEBUGOUT("MAC address is broadcast\n");
+		status = IXGBE_ERR_INVALID_MAC_ADDR;
+	/* Reject the zero address */
+	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+		   mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
+		DEBUGOUT("MAC address is all zeros\n");
+		status = IXGBE_ERR_INVALID_MAC_ADDR;
+	}
+	return status;
+}
+
+/**
+ *  ixgbe_set_rar_generic - Set Rx address register
+ *  @hw: pointer to hardware structure
+ *  @index: Receive address register to write
+ *  @addr: Address to put into receive address register
+ *  @vmdq: VMDq "set" or "pool" index
+ *  @enable_addr: set flag that address is active
+ *
+ *  Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+			  u32 enable_addr)
+{
+	u32 rar_low, rar_high;
+	u32 rar_entries = hw->mac.num_rar_entries;
+
+	DEBUGFUNC("ixgbe_set_rar_generic");
+
+	/* Make sure we are using a valid rar index range */
+	if (index >= rar_entries) {
+		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+			     "RAR index %d is out of range.\n", index);
+		return IXGBE_ERR_INVALID_ARGUMENT;
+	}
+
+	/* setup VMDq pool selection before this RAR gets enabled */
+	hw->mac.ops.set_vmdq(hw, index, vmdq);
+
+	/*
+	 * HW expects these in little endian so we reverse the byte
+	 * order from network order (big endian) to little endian
+	 */
+	rar_low = ((u32)addr[0] |
+		   ((u32)addr[1] << 8) |
+		   ((u32)addr[2] << 16) |
+		   ((u32)addr[3] << 24));
+	/*
+	 * Some parts put the VMDq setting in the extra RAH bits,
+	 * so save everything except the lower 16 bits that hold part
+	 * of the address and the address valid bit.
+	 */
+	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+	rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
+
+	if (enable_addr != 0)
+		rar_high |= IXGBE_RAH_AV;
+
+	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_clear_rar_generic - Remove Rx address register
+ *  @hw: pointer to hardware structure
+ *  @index: Receive address register to write
+ *
+ *  Clears an ethernet address from a receive address register.
+ **/
+s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
+{
+	u32 rar_high;
+	u32 rar_entries = hw->mac.num_rar_entries;
+
+	DEBUGFUNC("ixgbe_clear_rar_generic");
+
+	/* Make sure we are using a valid rar index range */
+	if (index >= rar_entries) {
+		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+			     "RAR index %d is out of range.\n", index);
+		return IXGBE_ERR_INVALID_ARGUMENT;
+	}
+
+	/*
+	 * Some parts put the VMDq setting in the extra RAH bits,
+	 * so save everything except the lower 16 bits that hold part
+	 * of the address and the address valid bit.
+	 */
+	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+
+	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
+	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+
+	/* clear VMDq pool/queue selection for this RAR */
+	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_init_rx_addrs_generic - Initializes receive address filters.
+ *  @hw: pointer to hardware structure
+ *
+ *  Places the MAC address in receive address register 0 and clears the rest
+ *  of the receive address registers. Clears the multicast table. Assumes
+ *  the receiver is in reset when the routine is called.
+ **/
+s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
+{
+	u32 i;
+	u32 rar_entries = hw->mac.num_rar_entries;
+
+	DEBUGFUNC("ixgbe_init_rx_addrs_generic");
+
+	/*
+	 * If the current mac address is valid, assume it is a software override
+	 * to the permanent address.
+	 * Otherwise, use the permanent address from the eeprom.
+	 */
+	if (ixgbe_validate_mac_addr(hw->mac.addr) ==
+	    IXGBE_ERR_INVALID_MAC_ADDR) {
+		/* Get the MAC address from the RAR0 for later reference */
+		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+		DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
+			  hw->mac.addr[0], hw->mac.addr[1],
+			  hw->mac.addr[2]);
+		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
+			  hw->mac.addr[4], hw->mac.addr[5]);
+	} else {
+		/* Setup the receive address. */
+		DEBUGOUT("Overriding MAC Address in RAR[0]\n");
+		DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
+			  hw->mac.addr[0], hw->mac.addr[1],
+			  hw->mac.addr[2]);
+		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
+			  hw->mac.addr[4], hw->mac.addr[5]);
+
+		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+		/* clear VMDq pool/queue selection for RAR 0 */
+		hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
+	}
+	hw->addr_ctrl.overflow_promisc = 0;
+
+	hw->addr_ctrl.rar_used_count = 1;
+
+	/* Zero out the other receive addresses. */
+	DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
+	for (i = 1; i < rar_entries; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+	}
+
+	/* Clear the MTA */
+	hw->addr_ctrl.mta_in_use = 0;
+	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
+
+	DEBUGOUT(" Clearing MTA\n");
+	for (i = 0; i < hw->mac.mcft_size; i++)
+		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
+
+	ixgbe_init_uta_tables(hw);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_add_uc_addr - Adds a secondary unicast address.
+ *  @hw: pointer to hardware structure
+ *  @addr: new address
+ *
+ *  Adds it to unused receive address register or goes into promiscuous mode.
+ **/
+void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+	u32 rar_entries = hw->mac.num_rar_entries;
+	u32 rar;
+
+	DEBUGFUNC("ixgbe_add_uc_addr");
+
+	DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
+		  addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+
+	/*
+	 * Place this address in the RAR if there is room,
+	 * else put the controller into promiscuous mode
+	 */
+	if (hw->addr_ctrl.rar_used_count < rar_entries) {
+		rar = hw->addr_ctrl.rar_used_count;
+		hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
+		DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
+		hw->addr_ctrl.rar_used_count++;
+	} else {
+		hw->addr_ctrl.overflow_promisc++;
+	}
+
+	DEBUGOUT("ixgbe_add_uc_addr Complete\n");
+}
+
+/**
+ *  ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
+ *  @hw: pointer to hardware structure
+ *  @addr_list: the list of new addresses
+ *  @addr_count: number of addresses
+ *  @next: iterator function to walk the address list
+ *
+ *  The given list replaces any existing list.  Clears the secondary addrs from
+ *  receive address registers.  Uses unused receive address registers for the
+ *  first secondary addresses, and falls back to promiscuous mode as needed.
+ *
+ *  Drivers using secondary unicast addresses must set user_set_promisc when
+ *  manually putting the device into promiscuous mode.
+ **/
+s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
+				      u32 addr_count, ixgbe_mc_addr_itr next)
+{
+	u8 *addr;
+	u32 i;
+	u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
+	u32 uc_addr_in_use;
+	u32 fctrl;
+	u32 vmdq;
+
+	DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
+
+	/*
+	 * Clear accounting of old secondary address list,
+	 * don't count RAR[0]
+	 */
+	uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
+	hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
+	hw->addr_ctrl.overflow_promisc = 0;
+
+	/* Zero out the other receive addresses */
+	DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
+	for (i = 0; i < uc_addr_in_use; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
+	}
+
+	/* Add the new addresses */
+	for (i = 0; i < addr_count; i++) {
+		DEBUGOUT(" Adding the secondary addresses:\n");
+		addr = next(hw, &addr_list, &vmdq);
+		ixgbe_add_uc_addr(hw, addr, vmdq);
+	}
+
+	if (hw->addr_ctrl.overflow_promisc) {
+		/* enable promisc if not already in overflow or set by user */
+		if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
+			DEBUGOUT(" Entering address overflow promisc mode\n");
+			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+			fctrl |= IXGBE_FCTRL_UPE;
+			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+		}
+	} else {
+		/* only disable if set by overflow, not by user */
+		if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
+			DEBUGOUT(" Leaving address overflow promisc mode\n");
+			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+			fctrl &= ~IXGBE_FCTRL_UPE;
+			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+		}
+	}
+
+	DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
+ *  @hw: pointer to hardware structure
+ *  @mc_addr: the multicast address
+ *
+ *  Extracts the 12 bits, from a multicast address, to determine which
+ *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ *  incoming rx multicast addresses, to determine the bit-vector to check in
+ *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ *  by the MO field of the MCSTCTRL. The MO field is set during initialization
+ *  to mc_filter_type.
+ **/
+STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+	u32 vector = 0;
+
+	DEBUGFUNC("ixgbe_mta_vector");
+
+	switch (hw->mac.mc_filter_type) {
+	case 0:   /* use bits [47:36] of the address */
+		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+		break;
+	case 1:   /* use bits [46:35] of the address */
+		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+		break;
+	case 2:   /* use bits [45:34] of the address */
+		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+		break;
+	case 3:   /* use bits [43:32] of the address */
+		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+		break;
+	default:  /* Invalid mc_filter_type */
+		DEBUGOUT("MC filter type param set incorrectly\n");
+		ASSERT(0);
+		break;
+	}
+
+	/* vector can only be 12-bits or boundary will be exceeded */
+	vector &= 0xFFF;
+	return vector;
+}
+
+/**
+ *  ixgbe_set_mta - Set bit-vector in multicast table
+ *  @hw: pointer to hardware structure
+ *  @hash_value: Multicast address hash value
+ *
+ *  Sets the bit-vector in the multicast table.
+ **/
+void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+	u32 vector;
+	u32 vector_bit;
+	u32 vector_reg;
+
+	DEBUGFUNC("ixgbe_set_mta");
+
+	hw->addr_ctrl.mta_in_use++;
+
+	vector = ixgbe_mta_vector(hw, mc_addr);
+	DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
+
+	/*
+	 * The MTA is a register array of 128 32-bit registers. It is treated
+	 * like an array of 4096 bits.  We want to set bit
+	 * BitArray[vector_value]. So we figure out what register the bit is
+	 * in, read it, OR in the new bit, then write back the new value.  The
+	 * register is determined by the upper 7 bits of the vector value and
+	 * the bit within that register are determined by the lower 5 bits of
+	 * the value.
+	 */
+	vector_reg = (vector >> 5) & 0x7F;
+	vector_bit = vector & 0x1F;
+	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
+}
+
+/**
+ *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
+ *  @hw: pointer to hardware structure
+ *  @mc_addr_list: the list of new multicast addresses
+ *  @mc_addr_count: number of addresses
+ *  @next: iterator function to walk the multicast address list
+ *  @clear: flag, when set clears the table beforehand
+ *
+ *  When the clear flag is set, the given list replaces any existing list.
+ *  Hashes the given addresses into the multicast table.
+ **/
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
+				      u32 mc_addr_count, ixgbe_mc_addr_itr next,
+				      bool clear)
+{
+	u32 i;
+	u32 vmdq;
+
+	DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
+
+	/*
+	 * Set the new number of MC addresses that we are being requested to
+	 * use.
+	 */
+	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
+	hw->addr_ctrl.mta_in_use = 0;
+
+	/* Clear mta_shadow */
+	if (clear) {
+		DEBUGOUT(" Clearing MTA\n");
+		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+	}
+
+	/* Update mta_shadow */
+	for (i = 0; i < mc_addr_count; i++) {
+		DEBUGOUT(" Adding the multicast addresses:\n");
+		ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
+	}
+
+	/* Enable mta */
+	for (i = 0; i < hw->mac.mcft_size; i++)
+		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
+				      hw->mac.mta_shadow[i]);
+
+	if (hw->addr_ctrl.mta_in_use > 0)
+		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
+				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+
+	DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_enable_mc_generic - Enable multicast address in RAR
+ *  @hw: pointer to hardware structure
+ *
+ *  Enables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
+{
+	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
+
+	DEBUGFUNC("ixgbe_enable_mc_generic");
+
+	if (a->mta_in_use > 0)
+		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
+				hw->mac.mc_filter_type);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_disable_mc_generic - Disable multicast address in RAR
+ *  @hw: pointer to hardware structure
+ *
+ *  Disables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
+{
+	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
+
+	DEBUGFUNC("ixgbe_disable_mc_generic");
+
+	if (a->mta_in_use > 0)
+		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_fc_enable_generic - Enable flow control
+ *  @hw: pointer to hardware structure
+ *
+ *  Enable flow control according to the current settings.
+ **/
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
+{
+	s32 ret_val = IXGBE_SUCCESS;
+	u32 mflcn_reg, fccfg_reg;
+	u32 reg;
+	u32 fcrtl, fcrth;
+	int i;
+
+	DEBUGFUNC("ixgbe_fc_enable_generic");
+
+	/* Validate the water mark configuration */
+	if (!hw->fc.pause_time) {
+		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+		goto out;
+	}
+
+	/* Low water mark of zero causes XOFF floods */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+		    hw->fc.high_water[i]) {
+			if (!hw->fc.low_water[i] ||
+			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+				DEBUGOUT("Invalid water mark configuration\n");
+				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+				goto out;
+			}
+		}
+	}
+
+	/* Negotiate the fc mode to use */
+	ixgbe_fc_autoneg(hw);
+
+	/* Disable any previous flow control settings */
+	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+	mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
+
+	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
+
+	/*
+	 * The possible values of fc.current_mode are:
+	 * 0: Flow control is completely disabled
+	 * 1: Rx flow control is enabled (we can receive pause frames,
+	 *    but not send pause frames).
+	 * 2: Tx flow control is enabled (we can send pause frames but
+	 *    we do not support receiving pause frames).
+	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
+	 * other: Invalid.
+	 */
+	switch (hw->fc.current_mode) {
+	case ixgbe_fc_none:
+		/*
+		 * Flow control is disabled by software override or autoneg.
+		 * The code below will actually disable it in the HW.
+		 */
+		break;
+	case ixgbe_fc_rx_pause:
+		/*
+		 * Rx Flow control is enabled and Tx Flow control is
+		 * disabled by software override. Since there really
+		 * isn't a way to advertise that we are capable of RX
+		 * Pause ONLY, we will advertise that we support both
+		 * symmetric and asymmetric Rx PAUSE.  Later, we will
+		 * disable the adapter's ability to send PAUSE frames.
+		 */
+		mflcn_reg |= IXGBE_MFLCN_RFCE;
+		break;
+	case ixgbe_fc_tx_pause:
+		/*
+		 * Tx Flow control is enabled, and Rx Flow control is
+		 * disabled by software override.
+		 */
+		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
+		break;
+	case ixgbe_fc_full:
+		/* Flow control (both Rx and Tx) is enabled by SW override. */
+		mflcn_reg |= IXGBE_MFLCN_RFCE;
+		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
+		break;
+	default:
+		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
+			     "Flow control param set incorrectly\n");
+		ret_val = IXGBE_ERR_CONFIG;
+		goto out;
+		break;
+	}
+
+	/* Set 802.3x based flow control settings. */
+	mflcn_reg |= IXGBE_MFLCN_DPF;
+	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
+	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
+
+
+	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+		    hw->fc.high_water[i]) {
+			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
+			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+		} else {
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+			/*
+			 * In order to prevent Tx hangs when the internal Tx
+			 * switch is enabled we must set the high water mark
+			 * to the Rx packet buffer size - 24KB.  This allows
+			 * the Tx switch to function even under heavy Rx
+			 * workloads.
+			 */
+			fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
+		}
+
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
+	}
+
+	/* Configure pause time (2 TCs per register) */
+	reg = hw->fc.pause_time * 0x00010001;
+	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+	/* Configure flow control refresh threshold value */
+	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  ixgbe_negotiate_fc - Negotiate flow control
+ *  @hw: pointer to hardware structure
+ *  @adv_reg: flow control advertised settings
+ *  @lp_reg: link partner's flow control settings
+ *  @adv_sym: symmetric pause bit in advertisement
+ *  @adv_asm: asymmetric pause bit in advertisement
+ *  @lp_sym: symmetric pause bit in link partner advertisement
+ *  @lp_asm: asymmetric pause bit in link partner advertisement
+ *
+ *  Find the intersection between advertised settings and link partner's
+ *  advertised settings
+ **/
+STATIC s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+			      u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
+{
+	if ((!(adv_reg)) ||  (!(lp_reg))) {
+		ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
+			     "Local or link partner's advertised flow control "
+			     "settings are NULL. Local: %x, link partner: %x\n",
+			     adv_reg, lp_reg);
+		return IXGBE_ERR_FC_NOT_NEGOTIATED;
+	}
+
+	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
+		/*
+		 * Now we need to check if the user selected Rx ONLY
+		 * of pause frames.  In this case, we had to advertise
+		 * FULL flow control because we could not advertise RX
+		 * ONLY. Hence, we must now check to see if we need to
+		 * turn OFF the TRANSMISSION of PAUSE frames.
+		 */
+		if (hw->fc.requested_mode == ixgbe_fc_full) {
+			hw->fc.current_mode = ixgbe_fc_full;
+			DEBUGOUT("Flow Control = FULL.\n");
+		} else {
+			hw->fc.current_mode = ixgbe_fc_rx_pause;
+			DEBUGOUT("Flow Control=RX PAUSE frames only\n");
+		}
+	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+		   (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+		hw->fc.current_mode = ixgbe_fc_tx_pause;
+		DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
+	} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+		   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+		hw->fc.current_mode = ixgbe_fc_rx_pause;
+		DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+	} else {
+		hw->fc.current_mode = ixgbe_fc_none;
+		DEBUGOUT("Flow Control = NONE.\n");
+	}
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
+ *  @hw: pointer to hardware structure
+ *
+ *  Enable flow control according on 1 gig fiber.
+ **/
+STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
+{
+	u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
+	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+
+	/*
+	 * On multispeed fiber at 1g, bail out if
+	 * - link is up but AN did not complete, or if
+	 * - link is up and AN completed but timed out
+	 */
+
+	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
+	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+		DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
+		goto out;
+	}
+
+	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+
+	ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
+				      pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
+				      IXGBE_PCS1GANA_ASM_PAUSE,
+				      IXGBE_PCS1GANA_SYM_PAUSE,
+				      IXGBE_PCS1GANA_ASM_PAUSE);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
+ *  @hw: pointer to hardware structure
+ *
+ *  Enable flow control according to IEEE clause 37.
+ **/
+STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
+{
+	u32 links2, anlp1_reg, autoc_reg, links;
+	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+
+	/*
+	 * On backplane, bail out if
+	 * - backplane autoneg was not completed, or if
+	 * - we are 82599 and link partner is not AN enabled
+	 */
+	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
+	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
+		DEBUGOUT("Auto-Negotiation did not complete\n");
+		goto out;
+	}
+
+	if (hw->mac.type == ixgbe_mac_82599EB) {
+		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
+		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
+			DEBUGOUT("Link partner is not AN enabled\n");
+			goto out;
+		}
+	}
+	/*
+	 * Read the 10g AN autoc and LP ability registers and resolve
+	 * local flow control settings accordingly
+	 */
+	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+
+	ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
+		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
+		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
+ *  @hw: pointer to hardware structure
+ *
+ *  Enable flow control according to IEEE clause 37.
+ **/
+STATIC s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
+{
+	u16 technology_ability_reg = 0;
+	u16 lp_technology_ability_reg = 0;
+
+	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+			     &technology_ability_reg);
+	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
+			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+			     &lp_technology_ability_reg);
+
+	return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
+				  (u32)lp_technology_ability_reg,
+				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
+				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
+}
+
+/**
+ *  ixgbe_fc_autoneg - Configure flow control
+ *  @hw: pointer to hardware structure
+ *
+ *  Compares our advertised flow control capabilities to those advertised by
+ *  our link partner, and determines the proper flow control mode to use.
+ **/
+void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
+{
+	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+	ixgbe_link_speed speed;
+	bool link_up;
+
+	DEBUGFUNC("ixgbe_fc_autoneg");
+
+	/*
+	 * AN should have completed when the cable was plugged in.
+	 * Look for reasons to bail out.  Bail out if:
+	 * - FC autoneg is disabled, or if
+	 * - link is not up.
+	 */
+	if (hw->fc.disable_fc_autoneg) {
+		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+			     "Flow control autoneg is disabled");
+		goto out;
+	}
+
+	hw->mac.ops.check_link(hw, &speed, &link_up, false);
+	if (!link_up) {
+		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
+		goto out;
+	}
+
+	switch (hw->phy.media_type) {
+	/* Autoneg flow control on fiber adapters */
+	case ixgbe_media_type_fiber_qsfp:
+	case ixgbe_media_type_fiber:
+		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
+			ret_val = ixgbe_fc_autoneg_fiber(hw);
+		break;
+
+	/* Autoneg flow control on backplane adapters */
+	case ixgbe_media_type_backplane:
+		ret_val = ixgbe_fc_autoneg_backplane(hw);
+		break;
+
+	/* Autoneg flow control on copper adapters */
+	case ixgbe_media_type_copper:
+		if (ixgbe_device_supports_autoneg_fc(hw))
+			ret_val = ixgbe_fc_autoneg_copper(hw);
+		break;
+
+	default:
+		break;
+	}
+
+out:
+	if (ret_val == IXGBE_SUCCESS) {
+		hw->fc.fc_was_autonegged = true;
+	} else {
+		hw->fc.fc_was_autonegged = false;
+		hw->fc.current_mode = hw->fc.requested_mode;
+	}
+}
+
+/*
+ * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
+ * @hw: pointer to hardware structure
+ *
+ * System-wide timeout range is encoded in PCIe Device Control2 register.
+ *
+ * Add 10% to specified maximum and return the number of times to poll for
+ * completion timeout, in units of 100 microsec.  Never return less than
+ * 800 = 80 millisec.
+ */
+STATIC u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
+{
+	s16 devctl2;
+	u32 pollcnt;
+
+	devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
+	devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
+
+	switch (devctl2) {
+	case IXGBE_PCIDEVCTRL2_65_130ms:
+		pollcnt = 1300;		/* 130 millisec */
+		break;
+	case IXGBE_PCIDEVCTRL2_260_520ms:
+		pollcnt = 5200;		/* 520 millisec */
+		break;
+	case IXGBE_PCIDEVCTRL2_1_2s:
+		pollcnt = 20000;	/* 2 sec */
+		break;
+	case IXGBE_PCIDEVCTRL2_4_8s:
+		pollcnt = 80000;	/* 8 sec */
+		break;
+	case IXGBE_PCIDEVCTRL2_17_34s:
+		pollcnt = 34000;	/* 34 sec */
+		break;
+	case IXGBE_PCIDEVCTRL2_50_100us:	/* 100 microsecs */
+	case IXGBE_PCIDEVCTRL2_1_2ms:		/* 2 millisecs */
+	case IXGBE_PCIDEVCTRL2_16_32ms:		/* 32 millisec */
+	case IXGBE_PCIDEVCTRL2_16_32ms_def:	/* 32 millisec default */
+	default:
+		pollcnt = 800;		/* 80 millisec minimum */
+		break;
+	}
+
+	/* add 10% to spec maximum */
+	return (pollcnt * 11) / 10;
+}
+
+/**
+ *  ixgbe_disable_pcie_master - Disable PCI-express master access
+ *  @hw: pointer to hardware structure
+ *
+ *  Disables PCI-Express master access and verifies there are no pending
+ *  requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
+ *  bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
+ *  is returned signifying master requests disabled.
+ **/
+s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_SUCCESS;
+	u32 i, poll;
+	u16 value;
+
+	DEBUGFUNC("ixgbe_disable_pcie_master");
+
+	/* Always set this bit to ensure any future transactions are blocked */
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
+
+	/* Exit if master requests are blocked */
+	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
+	    IXGBE_REMOVED(hw->hw_addr))
+		goto out;
+
+	/* Poll for master request bit to clear */
+	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+		usec_delay(100);
+		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+			goto out;
+	}
+
+	/*
+	 * Two consecutive resets are required via CTRL.RST per datasheet
+	 * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
+	 * of this need.  The first reset prevents new master requests from
+	 * being issued by our device.  We then must wait 1usec or more for any
+	 * remaining completions from the PCIe bus to trickle in, and then reset
+	 * again to clear out any effects they may have had on our device.
+	 */
+	DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
+	hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+
+	/*
+	 * Before proceeding, make sure that the PCIe block does not have
+	 * transactions pending.
+	 */
+	poll = ixgbe_pcie_timeout_poll(hw);
+	for (i = 0; i < poll; i++) {
+		usec_delay(100);
+		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
+		if (IXGBE_REMOVED(hw->hw_addr))
+			goto out;
+		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
+			goto out;
+	}
+
+	ERROR_REPORT1(IXGBE_ERROR_POLLING,
+		     "PCIe transaction pending bit also did not clear.\n");
+	status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+
+out:
+	return status;
+}
+
+/**
+ *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
+ *  @hw: pointer to hardware structure
+ *  @mask: Mask to specify which semaphore to acquire
+ *
+ *  Acquires the SWFW semaphore through the GSSR register for the specified
+ *  function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
+{
+	u32 gssr = 0;
+	u32 swmask = mask;
+	u32 fwmask = mask << 5;
+	u32 timeout = 200;
+	u32 i;
+
+	DEBUGFUNC("ixgbe_acquire_swfw_sync");
+
+	for (i = 0; i < timeout; i++) {
+		/*
+		 * SW NVM semaphore bit is used for access to all
+		 * SW_FW_SYNC bits (not just NVM)
+		 */
+		if (ixgbe_get_eeprom_semaphore(hw))
+			return IXGBE_ERR_SWFW_SYNC;
+
+		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
+		if (!(gssr & (fwmask | swmask))) {
+			gssr |= swmask;
+			IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+			ixgbe_release_eeprom_semaphore(hw);
+			return IXGBE_SUCCESS;
+		} else {
+			/* Resource is currently in use by FW or SW */
+			ixgbe_release_eeprom_semaphore(hw);
+			msec_delay(5);
+		}
+	}
+
+	/* If time expired clear the bits holding the lock and retry */
+	if (gssr & (fwmask | swmask))
+		ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
+
+	msec_delay(5);
+	return IXGBE_ERR_SWFW_SYNC;
+}
+
+/**
+ *  ixgbe_release_swfw_sync - Release SWFW semaphore
+ *  @hw: pointer to hardware structure
+ *  @mask: Mask to specify which semaphore to release
+ *
+ *  Releases the SWFW semaphore through the GSSR register for the specified
+ *  function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
+{
+	u32 gssr;
+	u32 swmask = mask;
+
+	DEBUGFUNC("ixgbe_release_swfw_sync");
+
+	ixgbe_get_eeprom_semaphore(hw);
+
+	gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
+	gssr &= ~swmask;
+	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+
+	ixgbe_release_eeprom_semaphore(hw);
+}
+
+/**
+ *  ixgbe_disable_sec_rx_path_generic - Stops the receive data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Stops the receive data path and waits for the HW to internally empty
+ *  the Rx security block
+ **/
+s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
+{
+#define IXGBE_MAX_SECRX_POLL 40
+
+	int i;
+	int secrxreg;
+
+	DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
+
+
+	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+	for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
+		secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
+		if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
+			break;
+		else
+			/* Use interrupt-safe sleep just in case */
+			usec_delay(1000);
+	}
+
+	/* For informational purposes only */
+	if (i >= IXGBE_MAX_SECRX_POLL)
+		DEBUGOUT("Rx unit being enabled before security "
+			 "path fully disabled.  Continuing with init.\n");
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
+ *  @hw: pointer to hardware structure
+ *  @reg_val: Value we read from AUTOC
+ *
+ *  The default case requires no protection so just to the register read.
+ */
+s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
+{
+	*locked = false;
+	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
+ * @hw: pointer to hardware structure
+ * @reg_val: value to write to AUTOC
+ * @locked: bool to indicate whether the SW/FW lock was already taken by
+ *           previous read.
+ *
+ * The default case requires no protection so just to the register write.
+ */
+s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
+{
+	UNREFERENCED_1PARAMETER(locked);
+
+	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_enable_sec_rx_path_generic - Enables the receive data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Enables the receive data path.
+ **/
+s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
+{
+	int secrxreg;
+
+	DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
+
+	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
+ *  @hw: pointer to hardware structure
+ *  @regval: register value to write to RXCTRL
+ *
+ *  Enables the Rx DMA unit
+ **/
+s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
+{
+	DEBUGFUNC("ixgbe_enable_rx_dma_generic");
+
+	if (regval & IXGBE_RXCTRL_RXEN)
+		ixgbe_enable_rx(hw);
+	else
+		ixgbe_disable_rx(hw);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_blink_led_start_generic - Blink LED based on index.
+ *  @hw: pointer to hardware structure
+ *  @index: led number to blink
+ **/
+s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
+{
+	ixgbe_link_speed speed = 0;
+	bool link_up = 0;
+	u32 autoc_reg = 0;
+	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+	s32 ret_val = IXGBE_SUCCESS;
+	bool locked = false;
+
+	DEBUGFUNC("ixgbe_blink_led_start_generic");
+
+	/*
+	 * Link must be up to auto-blink the LEDs;
+	 * Force it if link is down.
+	 */
+	hw->mac.ops.check_link(hw, &speed, &link_up, false);
+
+	if (!link_up) {
+		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
+		if (ret_val != IXGBE_SUCCESS)
+			goto out;
+
+		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+		autoc_reg |= IXGBE_AUTOC_FLU;
+
+		ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
+		if (ret_val != IXGBE_SUCCESS)
+			goto out;
+
+		IXGBE_WRITE_FLUSH(hw);
+		msec_delay(10);
+	}
+
+	led_reg &= ~IXGBE_LED_MODE_MASK(index);
+	led_reg |= IXGBE_LED_BLINK(index);
+	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+	IXGBE_WRITE_FLUSH(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
+ *  @hw: pointer to hardware structure
+ *  @index: led number to stop blinking
+ **/
+s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
+{
+	u32 autoc_reg = 0;
+	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+	s32 ret_val = IXGBE_SUCCESS;
+	bool locked = false;
+
+	DEBUGFUNC("ixgbe_blink_led_stop_generic");
+
+	ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
+	if (ret_val != IXGBE_SUCCESS)
+		goto out;
+
+	autoc_reg &= ~IXGBE_AUTOC_FLU;
+	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+
+	ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
+	if (ret_val != IXGBE_SUCCESS)
+		goto out;
+
+	led_reg &= ~IXGBE_LED_MODE_MASK(index);
+	led_reg &= ~IXGBE_LED_BLINK(index);
+	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
+	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+	IXGBE_WRITE_FLUSH(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
+ *  @hw: pointer to hardware structure
+ *  @san_mac_offset: SAN MAC address offset
+ *
+ *  This function will read the EEPROM location for the SAN MAC address
+ *  pointer, and returns the value at that location.  This is used in both
+ *  get and set mac_addr routines.
+ **/
+STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
+					 u16 *san_mac_offset)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
+
+	/*
+	 * First read the EEPROM pointer to see if the MAC addresses are
+	 * available.
+	 */
+	ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
+				      san_mac_offset);
+	if (ret_val) {
+		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+			      "eeprom at offset %d failed",
+			      IXGBE_SAN_MAC_ADDR_PTR);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
+ *  @hw: pointer to hardware structure
+ *  @san_mac_addr: SAN MAC address
+ *
+ *  Reads the SAN MAC address from the EEPROM, if it's available.  This is
+ *  per-port, so set_lan_id() must be called before reading the addresses.
+ *  set_lan_id() is called by identify_sfp(), but this cannot be relied
+ *  upon for non-SFP connections, so we must call it here.
+ **/
+s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+	u16 san_mac_data, san_mac_offset;
+	u8 i;
+	s32 ret_val;
+
+	DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
+
+	/*
+	 * First read the EEPROM pointer to see if the MAC addresses are
+	 * available.  If they're not, no point in calling set_lan_id() here.
+	 */
+	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
+		goto san_mac_addr_out;
+
+	/* make sure we know which port we need to program */
+	hw->mac.ops.set_lan_id(hw);
+	/* apply the port offset to the address offset */
+	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
+			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+	for (i = 0; i < 3; i++) {
+		ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
+					      &san_mac_data);
+		if (ret_val) {
+			ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+				      "eeprom read at offset %d failed",
+				      san_mac_offset);
+			goto san_mac_addr_out;
+		}
+		san_mac_addr[i * 2] = (u8)(san_mac_data);
+		san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
+		san_mac_offset++;
+	}
+	return IXGBE_SUCCESS;
+
+san_mac_addr_out:
+	/*
+	 * No addresses available in this EEPROM.  It's not an
+	 * error though, so just wipe the local address and return.
+	 */
+	for (i = 0; i < 6; i++)
+		san_mac_addr[i] = 0xFF;
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
+ *  @hw: pointer to hardware structure
+ *  @san_mac_addr: SAN MAC address
+ *
+ *  Write a SAN MAC address to the EEPROM.
+ **/
+s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+	s32 ret_val;
+	u16 san_mac_data, san_mac_offset;
+	u8 i;
+
+	DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
+
+	/* Look for SAN mac address pointer.  If not defined, return */
+	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
+		return IXGBE_ERR_NO_SAN_ADDR_PTR;
+
+	/* Make sure we know which port we need to write */
+	hw->mac.ops.set_lan_id(hw);
+	/* Apply the port offset to the address offset */
+	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
+			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+
+	for (i = 0; i < 3; i++) {
+		san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
+		san_mac_data |= (u16)(san_mac_addr[i * 2]);
+		hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
+		san_mac_offset++;
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
+ *  @hw: pointer to hardware structure
+ *
+ *  Read PCIe configuration space, and get the MSI-X vector count from
+ *  the capabilities table.
+ **/
+u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
+{
+	u16 msix_count = 1;
+	u16 max_msix_count;
+	u16 pcie_offset;
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
+		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+	case ixgbe_mac_X550:
+	case ixgbe_mac_X550EM_x:
+		pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
+		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+		break;
+	default:
+		return msix_count;
+	}
+
+	DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
+	msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
+	if (IXGBE_REMOVED(hw->hw_addr))
+		msix_count = 0;
+	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
+
+	/* MSI-X count is zero-based in HW */
+	msix_count++;
+
+	if (msix_count > max_msix_count)
+		msix_count = max_msix_count;
+
+	return msix_count;
+}
+
+/**
+ *  ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
+ *  @hw: pointer to hardware structure
+ *  @addr: Address to put into receive address register
+ *  @vmdq: VMDq pool to assign
+ *
+ *  Puts an ethernet address into a receive address register, or
+ *  finds the rar that it is aleady in; adds to the pool list
+ **/
+s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+	static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
+	u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
+	u32 rar;
+	u32 rar_low, rar_high;
+	u32 addr_low, addr_high;
+
+	DEBUGFUNC("ixgbe_insert_mac_addr_generic");
+
+	/* swap bytes for HW little endian */
+	addr_low  = addr[0] | (addr[1] << 8)
+			    | (addr[2] << 16)
+			    | (addr[3] << 24);
+	addr_high = addr[4] | (addr[5] << 8);
+
+	/*
+	 * Either find the mac_id in rar or find the first empty space.
+	 * rar_highwater points to just after the highest currently used
+	 * rar in order to shorten the search.  It grows when we add a new
+	 * rar to the top.
+	 */
+	for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
+		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+
+		if (((IXGBE_RAH_AV & rar_high) == 0)
+		    && first_empty_rar == NO_EMPTY_RAR_FOUND) {
+			first_empty_rar = rar;
+		} else if ((rar_high & 0xFFFF) == addr_high) {
+			rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
+			if (rar_low == addr_low)
+				break;    /* found it already in the rars */
+		}
+	}
+
+	if (rar < hw->mac.rar_highwater) {
+		/* already there so just add to the pool bits */
+		ixgbe_set_vmdq(hw, rar, vmdq);
+	} else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
+		/* stick it into first empty RAR slot we found */
+		rar = first_empty_rar;
+		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
+	} else if (rar == hw->mac.rar_highwater) {
+		/* add it to the top of the list and inc the highwater mark */
+		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
+		hw->mac.rar_highwater++;
+	} else if (rar >= hw->mac.num_rar_entries) {
+		return IXGBE_ERR_INVALID_MAC_ADDR;
+	}
+
+	/*
+	 * If we found rar[0], make sure the default pool bit (we use pool 0)
+	 * remains cleared to be sure default pool packets will get delivered
+	 */
+	if (rar == 0)
+		ixgbe_clear_vmdq(hw, rar, 0);
+
+	return rar;
+}
+
+/**
+ *  ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
+ *  @hw: pointer to hardware struct
+ *  @rar: receive address register index to disassociate
+ *  @vmdq: VMDq pool index to remove from the rar
+ **/
+s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+	u32 mpsar_lo, mpsar_hi;
+	u32 rar_entries = hw->mac.num_rar_entries;
+
+	DEBUGFUNC("ixgbe_clear_vmdq_generic");
+
+	/* Make sure we are using a valid rar index range */
+	if (rar >= rar_entries) {
+		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+			     "RAR index %d is out of range.\n", rar);
+		return IXGBE_ERR_INVALID_ARGUMENT;
+	}
+
+	mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+	mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+
+	if (IXGBE_REMOVED(hw->hw_addr))
+		goto done;
+
+	if (!mpsar_lo && !mpsar_hi)
+		goto done;
+
+	if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
+		if (mpsar_lo) {
+			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+			mpsar_lo = 0;
+		}
+		if (mpsar_hi) {
+			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+			mpsar_hi = 0;
+		}
+	} else if (vmdq < 32) {
+		mpsar_lo &= ~(1 << vmdq);
+		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
+	} else {
+		mpsar_hi &= ~(1 << (vmdq - 32));
+		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
+	}
+
+	/* was that the last pool using this rar? */
+	if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+		hw->mac.ops.clear_rar(hw, rar);
+done:
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
+ *  @hw: pointer to hardware struct
+ *  @rar: receive address register index to associate with a VMDq index
+ *  @vmdq: VMDq pool index
+ **/
+s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+	u32 mpsar;
+	u32 rar_entries = hw->mac.num_rar_entries;
+
+	DEBUGFUNC("ixgbe_set_vmdq_generic");
+
+	/* Make sure we are using a valid rar index range */
+	if (rar >= rar_entries) {
+		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+			     "RAR index %d is out of range.\n", rar);
+		return IXGBE_ERR_INVALID_ARGUMENT;
+	}
+
+	if (vmdq < 32) {
+		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+		mpsar |= 1 << vmdq;
+		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
+	} else {
+		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+		mpsar |= 1 << (vmdq - 32);
+		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
+	}
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  This function should only be involved in the IOV mode.
+ *  In IOV mode, Default pool is next pool after the number of
+ *  VFs advertized and not 0.
+ *  MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
+ *
+ *  ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
+ *  @hw: pointer to hardware struct
+ *  @vmdq: VMDq pool index
+ **/
+s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
+{
+	u32 rar = hw->mac.san_mac_rar_index;
+
+	DEBUGFUNC("ixgbe_set_vmdq_san_mac");
+
+	if (vmdq < 32) {
+		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
+		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+	} else {
+		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
+ *  @hw: pointer to hardware structure
+ **/
+s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
+{
+	int i;
+
+	DEBUGFUNC("ixgbe_init_uta_tables_generic");
+	DEBUGOUT(" Clearing UTA\n");
+
+	for (i = 0; i < 128; i++)
+		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
+ *  @hw: pointer to hardware structure
+ *  @vlan: VLAN id to write to VLAN filter
+ *
+ *  return the VLVF index where this VLAN id should be placed
+ *
+ **/
+s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
+{
+	u32 bits = 0;
+	u32 first_empty_slot = 0;
+	s32 regindex;
+
+	/* short cut the special case */
+	if (vlan == 0)
+		return 0;
+
+	/*
+	  * Search for the vlan id in the VLVF entries. Save off the first empty
+	  * slot found along the way
+	  */
+	for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
+		bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
+		if (!bits && !(first_empty_slot))
+			first_empty_slot = regindex;
+		else if ((bits & 0x0FFF) == vlan)
+			break;
+	}
+
+	/*
+	  * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
+	  * in the VLVF. Else use the first empty VLVF register for this
+	  * vlan id.
+	  */
+	if (regindex >= IXGBE_VLVF_ENTRIES) {
+		if (first_empty_slot)
+			regindex = first_empty_slot;
+		else {
+			ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
+				     "No space in VLVF.\n");
+			regindex = IXGBE_ERR_NO_SPACE;
+		}
+	}
+
+	return regindex;
+}
+
+/**
+ *  ixgbe_set_vfta_generic - Set VLAN filter table
+ *  @hw: pointer to hardware structure
+ *  @vlan: VLAN id to write to VLAN filter
+ *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
+ *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ *
+ *  Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+			   bool vlan_on)
+{
+	s32 regindex;
+	u32 bitindex;
+	u32 vfta;
+	u32 targetbit;
+	s32 ret_val = IXGBE_SUCCESS;
+	bool vfta_changed = false;
+
+	DEBUGFUNC("ixgbe_set_vfta_generic");
+
+	if (vlan > 4095)
+		return IXGBE_ERR_PARAM;
+
+	/*
+	 * this is a 2 part operation - first the VFTA, then the
+	 * VLVF and VLVFB if VT Mode is set
+	 * We don't write the VFTA until we know the VLVF part succeeded.
+	 */
+
+	/* Part 1
+	 * The VFTA is a bitstring made up of 128 32-bit registers
+	 * that enable the particular VLAN id, much like the MTA:
+	 *    bits[11-5]: which register
+	 *    bits[4-0]:  which bit in the register
+	 */
+	regindex = (vlan >> 5) & 0x7F;
+	bitindex = vlan & 0x1F;
+	targetbit = (1 << bitindex);
+	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+
+	if (vlan_on) {
+		if (!(vfta & targetbit)) {
+			vfta |= targetbit;
+			vfta_changed = true;
+		}
+	} else {
+		if ((vfta & targetbit)) {
+			vfta &= ~targetbit;
+			vfta_changed = true;
+		}
+	}
+
+	/* Part 2
+	 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
+	 */
+	ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
+					 &vfta_changed);
+	if (ret_val != IXGBE_SUCCESS)
+		return ret_val;
+
+	if (vfta_changed)
+		IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_set_vlvf_generic - Set VLAN Pool Filter
+ *  @hw: pointer to hardware structure
+ *  @vlan: VLAN id to write to VLAN filter
+ *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
+ *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ *  @vfta_changed: pointer to boolean flag which indicates whether VFTA
+ *                 should be changed
+ *
+ *  Turn on/off specified bit in VLVF table.
+ **/
+s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+			    bool vlan_on, bool *vfta_changed)
+{
+	u32 vt;
+
+	DEBUGFUNC("ixgbe_set_vlvf_generic");
+
+	if (vlan > 4095)
+		return IXGBE_ERR_PARAM;
+
+	/* If VT Mode is set
+	 *   Either vlan_on
+	 *     make sure the vlan is in VLVF
+	 *     set the vind bit in the matching VLVFB
+	 *   Or !vlan_on
+	 *     clear the pool bit and possibly the vind
+	 */
+	vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+	if (vt & IXGBE_VT_CTL_VT_ENABLE) {
+		s32 vlvf_index;
+		u32 bits;
+
+		vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
+		if (vlvf_index < 0)
+			return vlvf_index;
+
+		if (vlan_on) {
+			/* set the pool bit */
+			if (vind < 32) {
+				bits = IXGBE_READ_REG(hw,
+						IXGBE_VLVFB(vlvf_index * 2));
+				bits |= (1 << vind);
+				IXGBE_WRITE_REG(hw,
+						IXGBE_VLVFB(vlvf_index * 2),
+						bits);
+			} else {
+				bits = IXGBE_READ_REG(hw,
+					IXGBE_VLVFB((vlvf_index * 2) + 1));
+				bits |= (1 << (vind - 32));
+				IXGBE_WRITE_REG(hw,
+					IXGBE_VLVFB((vlvf_index * 2) + 1),
+					bits);
+			}
+		} else {
+			/* clear the pool bit */
+			if (vind < 32) {
+				bits = IXGBE_READ_REG(hw,
+						IXGBE_VLVFB(vlvf_index * 2));
+				bits &= ~(1 << vind);
+				IXGBE_WRITE_REG(hw,
+						IXGBE_VLVFB(vlvf_index * 2),
+						bits);
+				bits |= IXGBE_READ_REG(hw,
+					IXGBE_VLVFB((vlvf_index * 2) + 1));
+			} else {
+				bits = IXGBE_READ_REG(hw,
+					IXGBE_VLVFB((vlvf_index * 2) + 1));
+				bits &= ~(1 << (vind - 32));
+				IXGBE_WRITE_REG(hw,
+					IXGBE_VLVFB((vlvf_index * 2) + 1),
+					bits);
+				bits |= IXGBE_READ_REG(hw,
+						IXGBE_VLVFB(vlvf_index * 2));
+			}
+		}
+
+		/*
+		 * If there are still bits set in the VLVFB registers
+		 * for the VLAN ID indicated we need to see if the
+		 * caller is requesting that we clear the VFTA entry bit.
+		 * If the caller has requested that we clear the VFTA
+		 * entry bit but there are still pools/VFs using this VLAN
+		 * ID entry then ignore the request.  We're not worried
+		 * about the case where we're turning the VFTA VLAN ID
+		 * entry bit on, only when requested to turn it off as
+		 * there may be multiple pools and/or VFs using the
+		 * VLAN ID entry.  In that case we cannot clear the
+		 * VFTA bit until all pools/VFs using that VLAN ID have also
+		 * been cleared.  This will be indicated by "bits" being
+		 * zero.
+		 */
+		if (bits) {
+			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
+					(IXGBE_VLVF_VIEN | vlan));
+			if ((!vlan_on) && (vfta_changed != NULL)) {
+				/* someone wants to clear the vfta entry
+				 * but some pools/VFs are still using it.
+				 * Ignore it. */
+				*vfta_changed = false;
+			}
+		} else
+			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_clear_vfta_generic - Clear VLAN filter table
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
+{
+	u32 offset;
+
+	DEBUGFUNC("ixgbe_clear_vfta_generic");
+
+	for (offset = 0; offset < hw->mac.vft_size; offset++)
+		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+
+	for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
+		IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_check_mac_link_generic - Determine link and speed status
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @link_up: true when link is up
+ *  @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ *  Reads the links register to determine if link is up and the current speed
+ **/
+s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+				 bool *link_up, bool link_up_wait_to_complete)
+{
+	u32 links_reg, links_orig;
+	u32 i;
+
+	DEBUGFUNC("ixgbe_check_mac_link_generic");
+
+	/* clear the old state */
+	links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
+	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
+	if (links_orig != links_reg) {
+		DEBUGOUT2("LINKS changed from %08X to %08X\n",
+			  links_orig, links_reg);
+	}
+
+	if (link_up_wait_to_complete) {
+		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+			if (links_reg & IXGBE_LINKS_UP) {
+				*link_up = true;
+				break;
+			} else {
+				*link_up = false;
+			}
+			msec_delay(100);
+			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+		}
+	} else {
+		if (links_reg & IXGBE_LINKS_UP)
+			*link_up = true;
+		else
+			*link_up = false;
+	}
+
+	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+	case IXGBE_LINKS_SPEED_10G_82599:
+		*speed = IXGBE_LINK_SPEED_10GB_FULL;
+		if (hw->mac.type >= ixgbe_mac_X550) {
+			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+				*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+		}
+		break;
+	case IXGBE_LINKS_SPEED_1G_82599:
+		*speed = IXGBE_LINK_SPEED_1GB_FULL;
+		break;
+	case IXGBE_LINKS_SPEED_100_82599:
+		*speed = IXGBE_LINK_SPEED_100_FULL;
+		if (hw->mac.type >= ixgbe_mac_X550) {
+			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+				*speed = IXGBE_LINK_SPEED_5GB_FULL;
+		}
+		break;
+	default:
+		*speed = IXGBE_LINK_SPEED_UNKNOWN;
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
+ *  the EEPROM
+ *  @hw: pointer to hardware structure
+ *  @wwnn_prefix: the alternative WWNN prefix
+ *  @wwpn_prefix: the alternative WWPN prefix
+ *
+ *  This function will read the EEPROM from the alternative SAN MAC address
+ *  block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+				 u16 *wwpn_prefix)
+{
+	u16 offset, caps;
+	u16 alt_san_mac_blk_offset;
+
+	DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
+
+	/* clear output first */
+	*wwnn_prefix = 0xFFFF;
+	*wwpn_prefix = 0xFFFF;
+
+	/* check if alternative SAN MAC is supported */
+	offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
+	if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
+		goto wwn_prefix_err;
+
+	if ((alt_san_mac_blk_offset == 0) ||
+	    (alt_san_mac_blk_offset == 0xFFFF))
+		goto wwn_prefix_out;
+
+	/* check capability in alternative san mac address block */
+	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
+	if (hw->eeprom.ops.read(hw, offset, &caps))
+		goto wwn_prefix_err;
+	if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
+		goto wwn_prefix_out;
+
+	/* get the corresponding prefix for WWNN/WWPN */
+	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
+	if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
+		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+			      "eeprom read at offset %d failed", offset);
+	}
+
+	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
+	if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
+		goto wwn_prefix_err;
+
+wwn_prefix_out:
+	return IXGBE_SUCCESS;
+
+wwn_prefix_err:
+	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+		      "eeprom read at offset %d failed", offset);
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @bs: the fcoe boot status
+ *
+ *  This function will read the FCOE boot status from the iSCSI FCOE block
+ **/
+s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
+{
+	u16 offset, caps, flags;
+	s32 status;
+
+	DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
+
+	/* clear output first */
+	*bs = ixgbe_fcoe_bootstatus_unavailable;
+
+	/* check if FCOE IBA block is present */
+	offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
+	status = hw->eeprom.ops.read(hw, offset, &caps);
+	if (status != IXGBE_SUCCESS)
+		goto out;
+
+	if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
+		goto out;
+
+	/* check if iSCSI FCOE block is populated */
+	status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
+	if (status != IXGBE_SUCCESS)
+		goto out;
+
+	if ((offset == 0) || (offset == 0xFFFF))
+		goto out;
+
+	/* read fcoe flags in iSCSI FCOE block */
+	offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
+	status = hw->eeprom.ops.read(hw, offset, &flags);
+	if (status != IXGBE_SUCCESS)
+		goto out;
+
+	if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
+		*bs = ixgbe_fcoe_bootstatus_enabled;
+	else
+		*bs = ixgbe_fcoe_bootstatus_disabled;
+
+out:
+	return status;
+}
+
+/**
+ *  ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
+ *  @hw: pointer to hardware structure
+ *  @enable: enable or disable switch for anti-spoofing
+ *  @pf: Physical Function pool - do not enable anti-spoofing for the PF
+ *
+ **/
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
+{
+	int j;
+	int pf_target_reg = pf >> 3;
+	int pf_target_shift = pf % 8;
+	u32 pfvfspoof = 0;
+
+	if (hw->mac.type == ixgbe_mac_82598EB)
+		return;
+
+	if (enable)
+		pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
+
+	/*
+	 * PFVFSPOOF register array is size 8 with 8 bits assigned to
+	 * MAC anti-spoof enables in each register array element.
+	 */
+	for (j = 0; j < pf_target_reg; j++)
+		IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
+
+	/*
+	 * The PF should be allowed to spoof so that it can support
+	 * emulation mode NICs.  Do not set the bits assigned to the PF
+	 */
+	pfvfspoof &= (1 << pf_target_shift) - 1;
+	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
+
+	/*
+	 * Remaining pools belong to the PF so they do not need to have
+	 * anti-spoofing enabled.
+	 */
+	for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
+		IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
+}
+
+/**
+ *  ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
+ *  @hw: pointer to hardware structure
+ *  @enable: enable or disable switch for VLAN anti-spoofing
+ *  @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
+ *
+ **/
+void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
+{
+	int vf_target_reg = vf >> 3;
+	int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
+	u32 pfvfspoof;
+
+	if (hw->mac.type == ixgbe_mac_82598EB)
+		return;
+
+	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
+	if (enable)
+		pfvfspoof |= (1 << vf_target_shift);
+	else
+		pfvfspoof &= ~(1 << vf_target_shift);
+	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
+}
+
+/**
+ *  ixgbe_get_device_caps_generic - Get additional device capabilities
+ *  @hw: pointer to hardware structure
+ *  @device_caps: the EEPROM word with the extra device capabilities
+ *
+ *  This function will read the EEPROM location for the device capabilities,
+ *  and return the word through device_caps.
+ **/
+s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
+{
+	DEBUGFUNC("ixgbe_get_device_caps_generic");
+
+	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
+ *  @hw: pointer to hardware structure
+ *
+ **/
+void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
+{
+	u32 regval;
+	u32 i;
+
+	DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
+
+	/* Enable relaxed ordering */
+	for (i = 0; i < hw->mac.max_tx_queues; i++) {
+		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
+		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
+	}
+
+	for (i = 0; i < hw->mac.max_rx_queues; i++) {
+		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
+		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+	}
+
+}
+
+/**
+ *  ixgbe_calculate_checksum - Calculate checksum for buffer
+ *  @buffer: pointer to EEPROM
+ *  @length: size of EEPROM to calculate a checksum for
+ *  Calculates the checksum for some buffer on a specified length.  The
+ *  checksum calculated is returned.
+ **/
+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
+{
+	u32 i;
+	u8 sum = 0;
+
+	DEBUGFUNC("ixgbe_calculate_checksum");
+
+	if (!buffer)
+		return 0;
+
+	for (i = 0; i < length; i++)
+		sum += buffer[i];
+
+	return (u8) (0 - sum);
+}
+
+/**
+ *  ixgbe_get_hi_status - Get host interface command status
+ *  @hw: pointer to the HW structure
+ *  @return_code: reads and returns code
+ *
+ *  Check if command returned with success. On success return IXGBE_SUCCESS
+ *  else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ **/
+s32 ixgbe_get_hi_status(struct ixgbe_hw *hw, u8 *ret_status)
+{
+	struct ixgbe_hic_hdr response;
+	u32 *response_val = (u32 *)&response;
+
+	DEBUGFUNC("ixgbe_get_host_interface_status");
+
+	/* Read the command response */
+	*response_val = IXGBE_CPU_TO_LE32(IXGBE_READ_REG(hw, IXGBE_FLEX_MNG));
+
+	if (ret_status)
+		*ret_status = response.cmd_or_resp.ret_status;
+
+	if (response.cmd_or_resp.ret_status != FW_CEM_RESP_STATUS_SUCCESS) {
+		DEBUGOUT1("Host interface error=%x.\n",
+			  response.cmd_or_resp.ret_status);
+		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_host_interface_command - Issue command to manageability block
+ *  @hw: pointer to the HW structure
+ *  @buffer: contains the command to write and where the return status will
+ *   be placed
+ *  @length: length of buffer, must be multiple of 4 bytes
+ *  @timeout: time in ms to wait for command completion
+ *  @return_data: read and return data from the buffer (true) or not (false)
+ *   Needed because FW structures are big endian and decoding of
+ *   these fields can be 8 bit or 16 bit based on command. Decoding
+ *   is not easily understood without making a table of commands.
+ *   So we will leave this up to the caller to read back the data
+ *   in these cases.
+ *
+ *  Communicates with the manageability block.  On success return IXGBE_SUCCESS
+ *  else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ **/
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+				 u32 length, u32 timeout, bool return_data)
+{
+	u32 hicr, i, bi, fwsts;
+	u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
+	u16 buf_len;
+	u16 dword_len;
+
+	DEBUGFUNC("ixgbe_host_interface_command");
+
+	if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+		DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
+		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+	}
+	/* Set bit 9 of FWSTS clearing FW reset indication */
+	fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
+	IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
+
+	/* Check that the host interface is enabled. */
+	hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+	if ((hicr & IXGBE_HICR_EN) == 0) {
+		DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
+		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+	}
+
+	/* Calculate length in DWORDs. We must be DWORD aligned */
+	if ((length % (sizeof(u32))) != 0) {
+		DEBUGOUT("Buffer length failure, not aligned to dword");
+		return IXGBE_ERR_INVALID_ARGUMENT;
+	}
+
+	dword_len = length >> 2;
+
+	/* The device driver writes the relevant command block
+	 * into the ram area.
+	 */
+	for (i = 0; i < dword_len; i++)
+		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+				      i, IXGBE_CPU_TO_LE32(buffer[i]));
+
+	/* Setting this bit tells the ARC that a new command is pending. */
+	IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
+
+	for (i = 0; i < timeout; i++) {
+		hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+		if (!(hicr & IXGBE_HICR_C))
+			break;
+		msec_delay(1);
+	}
+
+	/* Check command completion */
+	if ((timeout != 0 && i == timeout) ||
+	    !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
+		ERROR_REPORT1(IXGBE_ERROR_CAUTION,
+			     "Command has failed with no status valid.\n");
+		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+	}
+
+	if (!return_data)
+		return 0;
+
+	/* Calculate length in DWORDs */
+	dword_len = hdr_size >> 2;
+
+	/* first pull in the header so we know the buffer length */
+	for (bi = 0; bi < dword_len; bi++) {
+		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+		IXGBE_LE32_TO_CPUS(&buffer[bi]);
+	}
+
+	/* If there is any thing in data position pull it in */
+	buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
+	if (buf_len == 0)
+		return 0;
+
+	if (length < buf_len + hdr_size) {
+		DEBUGOUT("Buffer not large enough for reply message.\n");
+		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+	}
+
+	/* Calculate length in DWORDs, add 3 for odd lengths */
+	dword_len = (buf_len + 3) >> 2;
+
+	/* Pull in the rest of the buffer (bi is where we left off) */
+	for (; bi <= dword_len; bi++) {
+		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+		IXGBE_LE32_TO_CPUS(&buffer[bi]);
+	}
+
+	return 0;
+}
+
+/**
+ *  ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
+ *  @hw: pointer to the HW structure
+ *  @maj: driver version major number
+ *  @min: driver version minor number
+ *  @build: driver version build number
+ *  @sub: driver version sub build number
+ *
+ *  Sends driver version number to firmware through the manageability
+ *  block.  On success return IXGBE_SUCCESS
+ *  else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+ *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+				 u8 build, u8 sub)
+{
+	struct ixgbe_hic_drv_info fw_cmd;
+	int i;
+	s32 ret_val = IXGBE_SUCCESS;
+
+	DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
+
+	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
+	    != IXGBE_SUCCESS) {
+		ret_val = IXGBE_ERR_SWFW_SYNC;
+		goto out;
+	}
+
+	fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+	fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
+	fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+	fw_cmd.port_num = (u8)hw->bus.func;
+	fw_cmd.ver_maj = maj;
+	fw_cmd.ver_min = min;
+	fw_cmd.ver_build = build;
+	fw_cmd.ver_sub = sub;
+	fw_cmd.hdr.checksum = 0;
+	fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+				(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+	fw_cmd.pad = 0;
+	fw_cmd.pad2 = 0;
+
+	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+		ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+						       sizeof(fw_cmd),
+						       IXGBE_HI_COMMAND_TIMEOUT,
+						       true);
+		if (ret_val != IXGBE_SUCCESS)
+			continue;
+
+		if (fw_cmd.hdr.cmd_or_resp.ret_status ==
+		    FW_CEM_RESP_STATUS_SUCCESS)
+			ret_val = IXGBE_SUCCESS;
+		else
+			ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+		break;
+	}
+
+	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+out:
+	return ret_val;
+}
+
+/**
+ * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
+			     int strategy)
+{
+	u32 pbsize = hw->mac.rx_pb_size;
+	int i = 0;
+	u32 rxpktsize, txpktsize, txpbthresh;
+
+	/* Reserve headroom */
+	pbsize -= headroom;
+
+	if (!num_pb)
+		num_pb = 1;
+
+	/* Divide remaining packet buffer space amongst the number of packet
+	 * buffers requested using supplied strategy.
+	 */
+	switch (strategy) {
+	case PBA_STRATEGY_WEIGHTED:
+		/* ixgbe_dcb_pba_80_48 strategy weight first half of packet
+		 * buffer with 5/8 of the packet buffer space.
+		 */
+		rxpktsize = (pbsize * 5) / (num_pb * 4);
+		pbsize -= rxpktsize * (num_pb / 2);
+		rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
+		for (; i < (num_pb / 2); i++)
+			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+		/* Fall through to configure remaining packet buffers */
+	case PBA_STRATEGY_EQUAL:
+		rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
+		for (; i < num_pb; i++)
+			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+		break;
+	default:
+		break;
+	}
+
+	/* Only support an equally distributed Tx packet buffer strategy. */
+	txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
+	txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
+	for (i = 0; i < num_pb; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
+		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
+	}
+
+	/* Clear unused TCs, if any, to zero buffer size*/
+	for (; i < IXGBE_MAX_PB; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
+	}
+}
+
+/**
+ * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
+ * @hw: pointer to the hardware structure
+ *
+ * The 82599 and x540 MACs can experience issues if TX work is still pending
+ * when a reset occurs.  This function prevents this by flushing the PCIe
+ * buffers on the system.
+ **/
+void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
+{
+	u32 gcr_ext, hlreg0, i, poll;
+	u16 value;
+
+	/*
+	 * If double reset is not requested then all transactions should
+	 * already be clear and as such there is no work to do
+	 */
+	if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
+		return;
+
+	/*
+	 * Set loopback enable to prevent any transmits from being sent
+	 * should the link come up.  This assumes that the RXCTRL.RXEN bit
+	 * has already been cleared.
+	 */
+	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
+
+	/* Wait for a last completion before clearing buffers */
+	IXGBE_WRITE_FLUSH(hw);
+	msec_delay(3);
+
+	/*
+	 * Before proceeding, make sure that the PCIe block does not have
+	 * transactions pending.
+	 */
+	poll = ixgbe_pcie_timeout_poll(hw);
+	for (i = 0; i < poll; i++) {
+		usec_delay(100);
+		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
+		if (IXGBE_REMOVED(hw->hw_addr))
+			goto out;
+		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
+			goto out;
+	}
+
+out:
+	/* initiate cleaning flow for buffers in the PCIe transaction layer */
+	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
+			gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
+
+	/* Flush all writes and allow 20usec for all transactions to clear */
+	IXGBE_WRITE_FLUSH(hw);
+	usec_delay(20);
+
+	/* restore previous register values */
+	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
+	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+}
+
+STATIC const u8 ixgbe_emc_temp_data[4] = {
+	IXGBE_EMC_INTERNAL_DATA,
+	IXGBE_EMC_DIODE1_DATA,
+	IXGBE_EMC_DIODE2_DATA,
+	IXGBE_EMC_DIODE3_DATA
+};
+STATIC const u8 ixgbe_emc_therm_limit[4] = {
+	IXGBE_EMC_INTERNAL_THERM_LIMIT,
+	IXGBE_EMC_DIODE1_THERM_LIMIT,
+	IXGBE_EMC_DIODE2_THERM_LIMIT,
+	IXGBE_EMC_DIODE3_THERM_LIMIT
+};
+
+/**
+ *  ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
+ *  @hw: pointer to hardware structure
+ *  @data: pointer to the thermal sensor data structure
+ *
+ *  Returns the thermal sensor data structure
+ **/
+s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_SUCCESS;
+	u16 ets_offset;
+	u16 ets_cfg;
+	u16 ets_sensor;
+	u8  num_sensors;
+	u8  sensor_index;
+	u8  sensor_location;
+	u8  i;
+	struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+	DEBUGFUNC("ixgbe_get_thermal_sensor_data_generic");
+
+	/* Only support thermal sensors attached to 82599 physical port 0 */
+	if ((hw->mac.type != ixgbe_mac_82599EB) ||
+	    (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
+		status = IXGBE_NOT_IMPLEMENTED;
+		goto out;
+	}
+
+	status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
+	if (status)
+		goto out;
+
+	if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) {
+		status = IXGBE_NOT_IMPLEMENTED;
+		goto out;
+	}
+
+	status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
+	if (status)
+		goto out;
+
+	if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
+		!= IXGBE_ETS_TYPE_EMC) {
+		status = IXGBE_NOT_IMPLEMENTED;
+		goto out;
+	}
+
+	num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
+	if (num_sensors > IXGBE_MAX_SENSORS)
+		num_sensors = IXGBE_MAX_SENSORS;
+
+	for (i = 0; i < num_sensors; i++) {
+		status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
+					     &ets_sensor);
+		if (status)
+			goto out;
+
+		sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
+				IXGBE_ETS_DATA_INDEX_SHIFT);
+		sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
+				   IXGBE_ETS_DATA_LOC_SHIFT);
+
+		if (sensor_location != 0) {
+			status = hw->phy.ops.read_i2c_byte(hw,
+					ixgbe_emc_temp_data[sensor_index],
+					IXGBE_I2C_THERMAL_SENSOR_ADDR,
+					&data->sensor[i].temp);
+			if (status)
+				goto out;
+		}
+	}
+out:
+	return status;
+}
+
+/**
+ *  ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
+ *  @hw: pointer to hardware structure
+ *
+ *  Inits the thermal sensor thresholds according to the NVM map
+ *  and save off the threshold and location values into mac.thermal_sensor_data
+ **/
+s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_SUCCESS;
+	u16 offset;
+	u16 ets_offset;
+	u16 ets_cfg;
+	u16 ets_sensor;
+	u8  low_thresh_delta;
+	u8  num_sensors;
+	u8  sensor_index;
+	u8  sensor_location;
+	u8  therm_limit;
+	u8  i;
+	struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+	DEBUGFUNC("ixgbe_init_thermal_sensor_thresh_generic");
+
+	memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
+
+	/* Only support thermal sensors attached to 82599 physical port 0 */
+	if ((hw->mac.type != ixgbe_mac_82599EB) ||
+	    (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
+		return IXGBE_NOT_IMPLEMENTED;
+
+	offset = IXGBE_ETS_CFG;
+	if (hw->eeprom.ops.read(hw, offset, &ets_offset))
+		goto eeprom_err;
+	if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
+		return IXGBE_NOT_IMPLEMENTED;
+
+	offset = ets_offset;
+	if (hw->eeprom.ops.read(hw, offset, &ets_cfg))
+		goto eeprom_err;
+	if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
+		!= IXGBE_ETS_TYPE_EMC)
+		return IXGBE_NOT_IMPLEMENTED;
+
+	low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
+			     IXGBE_ETS_LTHRES_DELTA_SHIFT);
+	num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
+
+	for (i = 0; i < num_sensors; i++) {
+		offset = ets_offset + 1 + i;
+		if (hw->eeprom.ops.read(hw, offset, &ets_sensor)) {
+			ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+				      "eeprom read at offset %d failed",
+				      offset);
+			continue;
+		}
+		sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
+				IXGBE_ETS_DATA_INDEX_SHIFT);
+		sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
+				   IXGBE_ETS_DATA_LOC_SHIFT);
+		therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
+
+		hw->phy.ops.write_i2c_byte(hw,
+			ixgbe_emc_therm_limit[sensor_index],
+			IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
+
+		if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) {
+			data->sensor[i].location = sensor_location;
+			data->sensor[i].caution_thresh = therm_limit;
+			data->sensor[i].max_op_thresh = therm_limit -
+							low_thresh_delta;
+		}
+	}
+	return status;
+
+eeprom_err:
+	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+		      "eeprom read at offset %d failed", offset);
+	return IXGBE_NOT_IMPLEMENTED;
+}
+
+
+/**
+ * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
+ * @hw: pointer to hardware structure
+ * @map: pointer to u8 arr for returning map
+ *
+ * Read the rtrup2tc HW register and resolve its content into map
+ **/
+void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
+{
+	u32 reg, i;
+
+	reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
+	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
+		map[i] = IXGBE_RTRUP2TC_UP_MASK &
+			(reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
+	return;
+}
+
+void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
+{
+	u32 pfdtxgswc;
+	u32 rxctrl;
+
+	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+	if (rxctrl & IXGBE_RXCTRL_RXEN) {
+		if (hw->mac.type != ixgbe_mac_82598EB) {
+			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+			if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+				pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+				IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+				hw->mac.set_lben = true;
+			} else {
+				hw->mac.set_lben = false;
+			}
+		}
+		rxctrl &= ~IXGBE_RXCTRL_RXEN;
+		IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+	}
+}
+
+void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
+{
+	u32 pfdtxgswc;
+	u32 rxctrl;
+
+	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
+
+	if (hw->mac.type != ixgbe_mac_82598EB) {
+		if (hw->mac.set_lben) {
+			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+			pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
+			IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+			hw->mac.set_lben = false;
+		}
+	}
+}
+
+/**
+ * ixgbe_mng_present - returns true when management capability is present
+ * @hw: pointer to hardware structure
+ */
+bool ixgbe_mng_present(struct ixgbe_hw *hw)
+{
+	u32 fwsm;
+
+	if (hw->mac.type < ixgbe_mac_82599EB)
+		return false;
+
+	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+	fwsm &= IXGBE_FWSM_MODE_MASK;
+	return fwsm == IXGBE_FWSM_FW_MODE_PT;
+}
+
+/**
+ * ixgbe_mng_enabled - Is the manageability engine enabled?
+ * @hw: pointer to hardware structure
+ *
+ * Returns true if the manageability engine is enabled.
+ **/
+bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
+{
+	u32 fwsm, manc, factps;
+
+	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+	if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
+		return false;
+
+	manc = IXGBE_READ_REG(hw, IXGBE_MANC);
+	if (!(manc & IXGBE_MANC_RCV_TCO_EN))
+		return false;
+
+	if (hw->mac.type <= ixgbe_mac_X540) {
+		factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+		if (factps & IXGBE_FACTPS_MNGCG)
+			return false;
+	}
+
+	return true;
+}
diff --git a/drivers/net/ixgbe/base/ixgbe_common.h b/drivers/net/ixgbe/base/ixgbe_common.h
new file mode 100644
index 0000000..9ebdd45
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_common.h
@@ -0,0 +1,183 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_COMMON_H_
+#define _IXGBE_COMMON_H_
+
+#include "ixgbe_type.h"
+#define IXGBE_WRITE_REG64(hw, reg, value) \
+	do { \
+		IXGBE_WRITE_REG(hw, reg, (u32) value); \
+		IXGBE_WRITE_REG(hw, reg + 4, (u32) (value >> 32)); \
+	} while (0)
+#define IXGBE_REMOVED(a) (0)
+struct ixgbe_pba {
+	u16 word[2];
+	u16 *pba_block;
+};
+
+void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map);
+
+u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
+s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
+s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
+s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
+s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
+s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+				  u32 pba_num_size);
+s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+		       u32 eeprom_buf_size, u16 max_pba_block_size,
+		       struct ixgbe_pba *pba);
+s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+			u32 eeprom_buf_size, struct ixgbe_pba *pba);
+s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
+			     u32 eeprom_buf_size, u16 *pba_block_size);
+s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
+void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status);
+void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
+
+s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
+
+s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
+s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+					       u16 words, u16 *data);
+s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+				   u16 words, u16 *data);
+s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+				    u16 words, u16 *data);
+s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+				       u16 *data);
+s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+					      u16 words, u16 *data);
+s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+					   u16 *checksum_val);
+s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
+
+s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+			  u32 enable_addr);
+s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
+				      u32 mc_addr_count,
+				      ixgbe_mc_addr_itr func, bool clear);
+s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
+				      u32 addr_count, ixgbe_mc_addr_itr func);
+s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
+s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
+s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw);
+s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw);
+
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
+
+s32 ixgbe_validate_mac_addr(u8 *mac_addr);
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask);
+s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+
+s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
+s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
+
+s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
+
+s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
+
+s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
+s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
+s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
+s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
+			 u32 vind, bool vlan_on);
+s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+			   bool vlan_on, bool *vfta_changed);
+s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
+s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan);
+
+s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
+			       ixgbe_link_speed *speed,
+			       bool *link_up, bool link_up_wait_to_complete);
+
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+				 u16 *wwpn_prefix);
+
+s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs);
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
+void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
+s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
+void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
+			     int strategy);
+void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw);
+s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+				 u8 build, u8 ver);
+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
+s32 ixgbe_get_hi_status(struct ixgbe_hw *hw, u8 *ret_status);
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+				 u32 length, u32 timeout, bool return_data);
+
+void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
+
+extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
+extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
+bool ixgbe_mng_present(struct ixgbe_hw *hw);
+bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
+
+#define IXGBE_I2C_THERMAL_SENSOR_ADDR	0xF8
+#define IXGBE_EMC_INTERNAL_DATA		0x00
+#define IXGBE_EMC_INTERNAL_THERM_LIMIT	0x20
+#define IXGBE_EMC_DIODE1_DATA		0x01
+#define IXGBE_EMC_DIODE1_THERM_LIMIT	0x19
+#define IXGBE_EMC_DIODE2_DATA		0x23
+#define IXGBE_EMC_DIODE2_THERM_LIMIT	0x1A
+#define IXGBE_EMC_DIODE3_DATA		0x2A
+#define IXGBE_EMC_DIODE3_THERM_LIMIT	0x30
+
+s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
+s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
+void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
+void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
+#endif /* IXGBE_COMMON */
diff --git a/drivers/net/ixgbe/base/ixgbe_dcb.c b/drivers/net/ixgbe/base/ixgbe_dcb.c
new file mode 100644
index 0000000..60c3b4e
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_dcb.c
@@ -0,0 +1,714 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+
+#include "ixgbe_type.h"
+#include "ixgbe_dcb.h"
+#include "ixgbe_dcb_82598.h"
+#include "ixgbe_dcb_82599.h"
+
+/**
+ * ixgbe_dcb_calculate_tc_credits - This calculates the ieee traffic class
+ * credits from the configured bandwidth percentages. Credits
+ * are the smallest unit programmable into the underlying
+ * hardware. The IEEE 802.1Qaz specification do not use bandwidth
+ * groups so this is much simplified from the CEE case.
+ */
+s32 ixgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max,
+				   int max_frame_size)
+{
+	int min_percent = 100;
+	int min_credit, multiplier;
+	int i;
+
+	min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) /
+			IXGBE_DCB_CREDIT_QUANTUM;
+
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		if (bw[i] < min_percent && bw[i])
+			min_percent = bw[i];
+	}
+
+	multiplier = (min_credit / min_percent) + 1;
+
+	/* Find out the hw credits for each TC */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		int val = min(bw[i] * multiplier, IXGBE_DCB_MAX_CREDIT_REFILL);
+
+		if (val < min_credit)
+			val = min_credit;
+		refill[i] = (u16)val;
+
+		max[i] = bw[i] ? (bw[i]*IXGBE_DCB_MAX_CREDIT)/100 : min_credit;
+	}
+
+	return 0;
+}
+
+/**
+ * ixgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits
+ * @ixgbe_dcb_config: Struct containing DCB settings.
+ * @direction: Configuring either Tx or Rx.
+ *
+ * This function calculates the credits allocated to each traffic class.
+ * It should be called only after the rules are checked by
+ * ixgbe_dcb_check_config_cee().
+ */
+s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *hw,
+				   struct ixgbe_dcb_config *dcb_config,
+				   u32 max_frame_size, u8 direction)
+{
+	struct ixgbe_dcb_tc_path *p;
+	u32 min_multiplier	= 0;
+	u16 min_percent		= 100;
+	s32 ret_val =		IXGBE_SUCCESS;
+	/* Initialization values default for Tx settings */
+	u32 min_credit		= 0;
+	u32 credit_refill	= 0;
+	u32 credit_max		= 0;
+	u16 link_percentage	= 0;
+	u8  bw_percent		= 0;
+	u8  i;
+
+	if (dcb_config == NULL) {
+		ret_val = IXGBE_ERR_CONFIG;
+		goto out;
+	}
+
+	min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) /
+		     IXGBE_DCB_CREDIT_QUANTUM;
+
+	/* Find smallest link percentage */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		p = &dcb_config->tc_config[i].path[direction];
+		bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
+		link_percentage = p->bwg_percent;
+
+		link_percentage = (link_percentage * bw_percent) / 100;
+
+		if (link_percentage && link_percentage < min_percent)
+			min_percent = link_percentage;
+	}
+
+	/*
+	 * The ratio between traffic classes will control the bandwidth
+	 * percentages seen on the wire. To calculate this ratio we use
+	 * a multiplier. It is required that the refill credits must be
+	 * larger than the max frame size so here we find the smallest
+	 * multiplier that will allow all bandwidth percentages to be
+	 * greater than the max frame size.
+	 */
+	min_multiplier = (min_credit / min_percent) + 1;
+
+	/* Find out the link percentage for each TC first */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		p = &dcb_config->tc_config[i].path[direction];
+		bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
+
+		link_percentage = p->bwg_percent;
+		/* Must be careful of integer division for very small nums */
+		link_percentage = (link_percentage * bw_percent) / 100;
+		if (p->bwg_percent > 0 && link_percentage == 0)
+			link_percentage = 1;
+
+		/* Save link_percentage for reference */
+		p->link_percent = (u8)link_percentage;
+
+		/* Calculate credit refill ratio using multiplier */
+		credit_refill = min(link_percentage * min_multiplier,
+				    (u32)IXGBE_DCB_MAX_CREDIT_REFILL);
+		p->data_credits_refill = (u16)credit_refill;
+
+		/* Calculate maximum credit for the TC */
+		credit_max = (link_percentage * IXGBE_DCB_MAX_CREDIT) / 100;
+
+		/*
+		 * Adjustment based on rule checking, if the percentage
+		 * of a TC is too small, the maximum credit may not be
+		 * enough to send out a jumbo frame in data plane arbitration.
+		 */
+		if (credit_max && (credit_max < min_credit))
+			credit_max = min_credit;
+
+		if (direction == IXGBE_DCB_TX_CONFIG) {
+			/*
+			 * Adjustment based on rule checking, if the
+			 * percentage of a TC is too small, the maximum
+			 * credit may not be enough to send out a TSO
+			 * packet in descriptor plane arbitration.
+			 */
+			if (credit_max && (credit_max <
+			    IXGBE_DCB_MIN_TSO_CREDIT)
+			    && (hw->mac.type == ixgbe_mac_82598EB))
+				credit_max = IXGBE_DCB_MIN_TSO_CREDIT;
+
+			dcb_config->tc_config[i].desc_credits_max =
+								(u16)credit_max;
+		}
+
+		p->data_credits_max = (u16)credit_max;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ * ixgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info
+ * @cfg: dcb configuration to unpack into hardware consumable fields
+ * @map: user priority to traffic class map
+ * @pfc_up: u8 to store user priority PFC bitmask
+ *
+ * This unpacks the dcb configuration PFC info which is stored per
+ * traffic class into a 8bit user priority bitmask that can be
+ * consumed by hardware routines. The priority to tc map must be
+ * updated before calling this routine to use current up-to maps.
+ */
+void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *cfg, u8 *map, u8 *pfc_up)
+{
+	struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+	int up;
+
+	/*
+	 * If the TC for this user priority has PFC enabled then set the
+	 * matching bit in 'pfc_up' to reflect that PFC is enabled.
+	 */
+	for (*pfc_up = 0, up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) {
+		if (tc_config[map[up]].pfc != ixgbe_dcb_pfc_disabled)
+			*pfc_up |= 1 << up;
+	}
+}
+
+void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *cfg, int direction,
+			     u16 *refill)
+{
+	struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+	int tc;
+
+	for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
+		refill[tc] = tc_config[tc].path[direction].data_credits_refill;
+}
+
+void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *cfg, u16 *max)
+{
+	struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+	int tc;
+
+	for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
+		max[tc] = tc_config[tc].desc_credits_max;
+}
+
+void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *cfg, int direction,
+			    u8 *bwgid)
+{
+	struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+	int tc;
+
+	for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
+		bwgid[tc] = tc_config[tc].path[direction].bwg_id;
+}
+
+void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *cfg, int direction,
+			   u8 *tsa)
+{
+	struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+	int tc;
+
+	for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
+		tsa[tc] = tc_config[tc].path[direction].tsa;
+}
+
+u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
+{
+	struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+	u8 prio_mask = 1 << up;
+	u8 tc = cfg->num_tcs.pg_tcs;
+
+	/* If tc is 0 then DCB is likely not enabled or supported */
+	if (!tc)
+		goto out;
+
+	/*
+	 * Test from maximum TC to 1 and report the first match we find.  If
+	 * we find no match we can assume that the TC is 0 since the TC must
+	 * be set for all user priorities
+	 */
+	for (tc--; tc; tc--) {
+		if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap)
+			break;
+	}
+out:
+	return tc;
+}
+
+void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *cfg, int direction,
+			      u8 *map)
+{
+	u8 up;
+
+	for (up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++)
+		map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up);
+}
+
+/**
+ * ixgbe_dcb_config - Struct containing DCB settings.
+ * @dcb_config: Pointer to DCB config structure
+ *
+ * This function checks DCB rules for DCB settings.
+ * The following rules are checked:
+ * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%.
+ * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth
+ *    Group must total 100.
+ * 3. A Traffic Class should not be set to both Link Strict Priority
+ *    and Group Strict Priority.
+ * 4. Link strict Bandwidth Groups can only have link strict traffic classes
+ *    with zero bandwidth.
+ */
+s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *dcb_config)
+{
+	struct ixgbe_dcb_tc_path *p;
+	s32 ret_val = IXGBE_SUCCESS;
+	u8 i, j, bw = 0, bw_id;
+	u8 bw_sum[2][IXGBE_DCB_MAX_BW_GROUP];
+	bool link_strict[2][IXGBE_DCB_MAX_BW_GROUP];
+
+	memset(bw_sum, 0, sizeof(bw_sum));
+	memset(link_strict, 0, sizeof(link_strict));
+
+	/* First Tx, then Rx */
+	for (i = 0; i < 2; i++) {
+		/* Check each traffic class for rule violation */
+		for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
+			p = &dcb_config->tc_config[j].path[i];
+
+			bw = p->bwg_percent;
+			bw_id = p->bwg_id;
+
+			if (bw_id >= IXGBE_DCB_MAX_BW_GROUP) {
+				ret_val = IXGBE_ERR_CONFIG;
+				goto err_config;
+			}
+			if (p->tsa == ixgbe_dcb_tsa_strict) {
+				link_strict[i][bw_id] = true;
+				/* Link strict should have zero bandwidth */
+				if (bw) {
+					ret_val = IXGBE_ERR_CONFIG;
+					goto err_config;
+				}
+			} else if (!bw) {
+				/*
+				 * Traffic classes without link strict
+				 * should have non-zero bandwidth.
+				 */
+				ret_val = IXGBE_ERR_CONFIG;
+				goto err_config;
+			}
+			bw_sum[i][bw_id] += bw;
+		}
+
+		bw = 0;
+
+		/* Check each bandwidth group for rule violation */
+		for (j = 0; j < IXGBE_DCB_MAX_BW_GROUP; j++) {
+			bw += dcb_config->bw_percentage[i][j];
+			/*
+			 * Sum of bandwidth percentages of all traffic classes
+			 * within a Bandwidth Group must total 100 except for
+			 * link strict group (zero bandwidth).
+			 */
+			if (link_strict[i][j]) {
+				if (bw_sum[i][j]) {
+					/*
+					 * Link strict group should have zero
+					 * bandwidth.
+					 */
+					ret_val = IXGBE_ERR_CONFIG;
+					goto err_config;
+				}
+			} else if (bw_sum[i][j] != IXGBE_DCB_BW_PERCENT &&
+				   bw_sum[i][j] != 0) {
+				ret_val = IXGBE_ERR_CONFIG;
+				goto err_config;
+			}
+		}
+
+		if (bw != IXGBE_DCB_BW_PERCENT) {
+			ret_val = IXGBE_ERR_CONFIG;
+			goto err_config;
+		}
+	}
+
+err_config:
+	DEBUGOUT2("DCB error code %d while checking %s settings.\n",
+		  ret_val, (i == IXGBE_DCB_TX_CONFIG) ? "Tx" : "Rx");
+
+	return ret_val;
+}
+
+/**
+ * ixgbe_dcb_get_tc_stats - Returns status of each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count:  Number of elements in bwg_array.
+ *
+ * This function returns the status data for each of the Traffic Classes in use.
+ */
+s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
+			   u8 tc_count)
+{
+	s32 ret = IXGBE_NOT_IMPLEMENTED;
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+	case ixgbe_mac_X550:
+	case ixgbe_mac_X550EM_x:
+		ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count);
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+/**
+ * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count:  Number of elements in bwg_array.
+ *
+ * This function returns the CBFC status data for each of the Traffic Classes.
+ */
+s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
+			    u8 tc_count)
+{
+	s32 ret = IXGBE_NOT_IMPLEMENTED;
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+	case ixgbe_mac_X550:
+	case ixgbe_mac_X550EM_x:
+		ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count);
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+/**
+ * ixgbe_dcb_config_rx_arbiter_cee - Config Rx arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Rx Data Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *hw,
+				struct ixgbe_dcb_config *dcb_config)
+{
+	s32 ret = IXGBE_NOT_IMPLEMENTED;
+	u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]	= { 0 };
+	u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]	= { 0 };
+	u8 map[IXGBE_DCB_MAX_USER_PRIORITY]	= { 0 };
+	u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]	= { 0 };
+	u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]	= { 0 };
+
+	ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+	ixgbe_dcb_unpack_max_cee(dcb_config, max);
+	ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+	ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ret = ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+	case ixgbe_mac_X550:
+	case ixgbe_mac_X550EM_x:
+		ret = ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwgid,
+							tsa, map);
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+/**
+ * ixgbe_dcb_config_tx_desc_arbiter_cee - Config Tx Desc arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Descriptor Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *hw,
+				     struct ixgbe_dcb_config *dcb_config)
+{
+	s32 ret = IXGBE_NOT_IMPLEMENTED;
+	u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+	u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+	u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+	u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+
+	ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+	ixgbe_dcb_unpack_max_cee(dcb_config, max);
+	ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+	ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
+							     bwgid, tsa);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+	case ixgbe_mac_X550:
+	case ixgbe_mac_X550EM_x:
+		ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
+							     bwgid, tsa);
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+/**
+ * ixgbe_dcb_config_tx_data_arbiter_cee - Config Tx data arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Data Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *hw,
+				     struct ixgbe_dcb_config *dcb_config)
+{
+	s32 ret = IXGBE_NOT_IMPLEMENTED;
+	u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+	u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+	u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
+	u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+	u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+
+	ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+	ixgbe_dcb_unpack_max_cee(dcb_config, max);
+	ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+	ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
+							     bwgid, tsa);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+	case ixgbe_mac_X550:
+	case ixgbe_mac_X550EM_x:
+		ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
+							     bwgid, tsa,
+							     map);
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+/**
+ * ixgbe_dcb_config_pfc_cee - Config priority flow control
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Priority Flow Control for each traffic class.
+ */
+s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *hw,
+			 struct ixgbe_dcb_config *dcb_config)
+{
+	s32 ret = IXGBE_NOT_IMPLEMENTED;
+	u8 pfc_en;
+	u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
+
+	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
+	ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+	case ixgbe_mac_X550:
+	case ixgbe_mac_X550EM_x:
+		ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map);
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+/**
+ * ixgbe_dcb_config_tc_stats - Config traffic class statistics
+ * @hw: pointer to hardware structure
+ *
+ * Configure queue statistics registers, all queues belonging to same traffic
+ * class uses a single set of queue statistics counters.
+ */
+s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw)
+{
+	s32 ret = IXGBE_NOT_IMPLEMENTED;
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ret = ixgbe_dcb_config_tc_stats_82598(hw);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+	case ixgbe_mac_X550:
+	case ixgbe_mac_X550EM_x:
+		ret = ixgbe_dcb_config_tc_stats_82599(hw, NULL);
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+/**
+ * ixgbe_dcb_hw_config_cee - Config and enable DCB
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure dcb settings and enable dcb mode.
+ */
+s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *hw,
+			struct ixgbe_dcb_config *dcb_config)
+{
+	s32 ret = IXGBE_NOT_IMPLEMENTED;
+	u8 pfc_en;
+	u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+	u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+	u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
+	u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+	u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+
+	/* Unpack CEE standard containers */
+	ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+	ixgbe_dcb_unpack_max_cee(dcb_config, max);
+	ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+	ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->link_speed,
+						refill, max, bwgid, tsa);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+	case ixgbe_mac_X550:
+	case ixgbe_mac_X550EM_x:
+		ixgbe_dcb_config_82599(hw, dcb_config);
+		ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->link_speed,
+						refill, max, bwgid,
+						tsa, map);
+
+		ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
+		break;
+	default:
+		break;
+	}
+
+	if (!ret && dcb_config->pfc_mode_enable) {
+		ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
+		ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
+	}
+
+	return ret;
+}
+
+/* Helper routines to abstract HW specifics from DCB netlink ops */
+s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
+{
+	int ret = IXGBE_ERR_PARAM;
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+	case ixgbe_mac_X550:
+	case ixgbe_mac_X550EM_x:
+		ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map);
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
+			    u8 *bwg_id, u8 *tsa, u8 *map)
+{
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
+		ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,
+						       tsa);
+		ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,
+						       tsa);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+	case ixgbe_mac_X550:
+	case ixgbe_mac_X550EM_x:
+		ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
+						  tsa, map);
+		ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
+						       tsa);
+		ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
+						       tsa, map);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
diff --git a/drivers/net/ixgbe/base/ixgbe_dcb.h b/drivers/net/ixgbe/base/ixgbe_dcb.h
new file mode 100644
index 0000000..47c593f
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_dcb.h
@@ -0,0 +1,174 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_DCB_H_
+#define _IXGBE_DCB_H_
+
+#include "ixgbe_type.h"
+
+/* DCB defines */
+/* DCB credit calculation defines */
+#define IXGBE_DCB_CREDIT_QUANTUM	64
+#define IXGBE_DCB_MAX_CREDIT_REFILL	200   /* 200 * 64B = 12800B */
+#define IXGBE_DCB_MAX_TSO_SIZE		(32 * 1024) /* Max TSO pkt size in DCB*/
+#define IXGBE_DCB_MAX_CREDIT		(2 * IXGBE_DCB_MAX_CREDIT_REFILL)
+
+/* 513 for 32KB TSO packet */
+#define IXGBE_DCB_MIN_TSO_CREDIT	\
+	((IXGBE_DCB_MAX_TSO_SIZE / IXGBE_DCB_CREDIT_QUANTUM) + 1)
+
+/* DCB configuration defines */
+#define IXGBE_DCB_MAX_USER_PRIORITY	8
+#define IXGBE_DCB_MAX_BW_GROUP		8
+#define IXGBE_DCB_BW_PERCENT		100
+
+#define IXGBE_DCB_TX_CONFIG		0
+#define IXGBE_DCB_RX_CONFIG		1
+
+/* DCB capability defines */
+#define IXGBE_DCB_PG_SUPPORT	0x00000001
+#define IXGBE_DCB_PFC_SUPPORT	0x00000002
+#define IXGBE_DCB_BCN_SUPPORT	0x00000004
+#define IXGBE_DCB_UP2TC_SUPPORT	0x00000008
+#define IXGBE_DCB_GSP_SUPPORT	0x00000010
+
+struct ixgbe_dcb_support {
+	u32 capabilities; /* DCB capabilities */
+
+	/* Each bit represents a number of TCs configurable in the hw.
+	 * If 8 traffic classes can be configured, the value is 0x80. */
+	u8 traffic_classes;
+	u8 pfc_traffic_classes;
+};
+
+enum ixgbe_dcb_tsa {
+	ixgbe_dcb_tsa_ets = 0,
+	ixgbe_dcb_tsa_group_strict_cee,
+	ixgbe_dcb_tsa_strict
+};
+
+/* Traffic class bandwidth allocation per direction */
+struct ixgbe_dcb_tc_path {
+	u8 bwg_id; /* Bandwidth Group (BWG) ID */
+	u8 bwg_percent; /* % of BWG's bandwidth */
+	u8 link_percent; /* % of link bandwidth */
+	u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
+	u16 data_credits_refill; /* Credit refill amount in 64B granularity */
+	u16 data_credits_max; /* Max credits for a configured packet buffer
+			       * in 64B granularity.*/
+	enum ixgbe_dcb_tsa tsa; /* Link or Group Strict Priority */
+};
+
+enum ixgbe_dcb_pfc {
+	ixgbe_dcb_pfc_disabled = 0,
+	ixgbe_dcb_pfc_enabled,
+	ixgbe_dcb_pfc_enabled_txonly,
+	ixgbe_dcb_pfc_enabled_rxonly
+};
+
+/* Traffic class configuration */
+struct ixgbe_dcb_tc_config {
+	struct ixgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */
+	enum ixgbe_dcb_pfc pfc; /* Class based flow control setting */
+
+	u16 desc_credits_max; /* For Tx Descriptor arbitration */
+	u8 tc; /* Traffic class (TC) */
+};
+
+enum ixgbe_dcb_pba {
+	/* PBA[0-7] each use 64KB FIFO */
+	ixgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL,
+	/* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
+	ixgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED
+};
+
+struct ixgbe_dcb_num_tcs {
+	u8 pg_tcs;
+	u8 pfc_tcs;
+};
+
+struct ixgbe_dcb_config {
+	struct ixgbe_dcb_tc_config tc_config[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+	struct ixgbe_dcb_support support;
+	struct ixgbe_dcb_num_tcs num_tcs;
+	u8 bw_percentage[2][IXGBE_DCB_MAX_BW_GROUP]; /* One each for Tx/Rx */
+	bool pfc_mode_enable;
+	bool round_robin_enable;
+
+	enum ixgbe_dcb_pba rx_pba_cfg;
+
+	u32 dcb_cfg_version; /* Not used...OS-specific? */
+	u32 link_speed; /* For bandwidth allocation validation purpose */
+	bool vt_mode;
+};
+
+/* DCB driver APIs */
+
+/* DCB rule checking */
+s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *);
+
+/* DCB credits calculation */
+s32 ixgbe_dcb_calculate_tc_credits(u8 *, u16 *, u16 *, int);
+s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *,
+				       struct ixgbe_dcb_config *, u32, u8);
+
+/* DCB PFC */
+s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, u8, u8 *);
+s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+
+/* DCB stats */
+s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *);
+s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
+s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
+
+/* DCB config arbiters */
+s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *,
+					 struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *,
+					 struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *,
+				    struct ixgbe_dcb_config *);
+
+/* DCB unpack routines */
+void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *, u8 *, u8 *);
+void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *, int, u16 *);
+void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *, u16 *);
+void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *, int, u8 *);
+void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *, int, u8 *);
+void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *, int, u8 *);
+u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8);
+
+/* DCB initialization */
+s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, u16 *, u16 *, u8 *, u8 *, u8 *);
+s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+#endif /* _IXGBE_DCB_H_ */
diff --git a/drivers/net/ixgbe/base/ixgbe_dcb_82598.c b/drivers/net/ixgbe/base/ixgbe_dcb_82598.c
new file mode 100644
index 0000000..08d44d4
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_dcb_82598.c
@@ -0,0 +1,360 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+
+#include "ixgbe_type.h"
+#include "ixgbe_dcb.h"
+#include "ixgbe_dcb_82598.h"
+
+/**
+ * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count:  Number of elements in bwg_array.
+ *
+ * This function returns the status data for each of the Traffic Classes in use.
+ */
+s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw,
+				 struct ixgbe_hw_stats *stats,
+				 u8 tc_count)
+{
+	int tc;
+
+	DEBUGFUNC("dcb_get_tc_stats");
+
+	if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+		return IXGBE_ERR_PARAM;
+
+	/* Statistics pertaining to each traffic class */
+	for (tc = 0; tc < tc_count; tc++) {
+		/* Transmitted Packets */
+		stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
+		/* Transmitted Bytes */
+		stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
+		/* Received Packets */
+		stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
+		/* Received Bytes */
+		stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
+
+#if 0
+		/* Can we get rid of these??  Consequently, getting rid
+		 * of the tc_stats structure.
+		 */
+		tc_stats_array[up]->in_overflow_discards = 0;
+		tc_stats_array[up]->out_overflow_discards = 0;
+#endif
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count:  Number of elements in bwg_array.
+ *
+ * This function returns the CBFC status data for each of the Traffic Classes.
+ */
+s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
+				  struct ixgbe_hw_stats *stats,
+				  u8 tc_count)
+{
+	int tc;
+
+	DEBUGFUNC("dcb_get_pfc_stats");
+
+	if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+		return IXGBE_ERR_PARAM;
+
+	for (tc = 0; tc < tc_count; tc++) {
+		/* Priority XOFF Transmitted */
+		stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
+		/* Priority XOFF Received */
+		stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc));
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Rx Data Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+				      u16 *max, u8 *tsa)
+{
+	u32 reg = 0;
+	u32 credit_refill = 0;
+	u32 credit_max = 0;
+	u8 i = 0;
+
+	reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA;
+	IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg);
+
+	reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+	/* Enable Arbiter */
+	reg &= ~IXGBE_RMCS_ARBDIS;
+	/* Enable Receive Recycle within the BWG */
+	reg |= IXGBE_RMCS_RRM;
+	/* Enable Deficit Fixed Priority arbitration*/
+	reg |= IXGBE_RMCS_DFP;
+
+	IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
+
+	/* Configure traffic class credits and priority */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		credit_refill = refill[i];
+		credit_max = max[i];
+
+		reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
+
+		if (tsa[i] == ixgbe_dcb_tsa_strict)
+			reg |= IXGBE_RT2CR_LSP;
+
+		IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
+	}
+
+	reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+	reg |= IXGBE_RDRXCTL_RDMTS_1_2;
+	reg |= IXGBE_RDRXCTL_MPBEN;
+	reg |= IXGBE_RDRXCTL_MCEN;
+	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
+
+	reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+	/* Make sure there is enough descriptors before arbitration */
+	reg &= ~IXGBE_RXCTRL_DMBYPS;
+	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Descriptor Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
+					   u16 *refill, u16 *max, u8 *bwg_id,
+					   u8 *tsa)
+{
+	u32 reg, max_credits;
+	u8 i;
+
+	reg = IXGBE_READ_REG(hw, IXGBE_DPMCS);
+
+	/* Enable arbiter */
+	reg &= ~IXGBE_DPMCS_ARBDIS;
+	reg |= IXGBE_DPMCS_TSOEF;
+
+	/* Configure Max TSO packet size 34KB including payload and headers */
+	reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
+
+	IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg);
+
+	/* Configure traffic class credits and priority */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		max_credits = max[i];
+		reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
+		reg |= refill[i];
+		reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
+
+		if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
+			reg |= IXGBE_TDTQ2TCCR_GSP;
+
+		if (tsa[i] == ixgbe_dcb_tsa_strict)
+			reg |= IXGBE_TDTQ2TCCR_LSP;
+
+		IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Data Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
+					   u16 *refill, u16 *max, u8 *bwg_id,
+					   u8 *tsa)
+{
+	u32 reg;
+	u8 i;
+
+	reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
+	/* Enable Data Plane Arbiter */
+	reg &= ~IXGBE_PDPMCS_ARBDIS;
+	/* Enable DFP and Transmit Recycle Mode */
+	reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM);
+
+	IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg);
+
+	/* Configure traffic class credits and priority */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		reg = refill[i];
+		reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
+		reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
+
+		if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
+			reg |= IXGBE_TDPT2TCCR_GSP;
+
+		if (tsa[i] == ixgbe_dcb_tsa_strict)
+			reg |= IXGBE_TDPT2TCCR_LSP;
+
+		IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
+	}
+
+	/* Enable Tx packet buffer division */
+	reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
+	reg |= IXGBE_DTXCTL_ENDBUBD;
+	IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_pfc_82598 - Config priority flow control
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Priority Flow Control for each traffic class.
+ */
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
+{
+	u32 fcrtl, reg;
+	u8 i;
+
+	/* Enable Transmit Priority Flow Control */
+	reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+	reg &= ~IXGBE_RMCS_TFCE_802_3X;
+	reg |= IXGBE_RMCS_TFCE_PRIORITY;
+	IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
+
+	/* Enable Receive Priority Flow Control */
+	reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+	reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE);
+
+	if (pfc_en)
+		reg |= IXGBE_FCTRL_RPFCE;
+
+	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
+
+	/* Configure PFC Tx thresholds per TC */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		if (!(pfc_en & (1 << i))) {
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
+			continue;
+		}
+
+		fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+		reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
+	}
+
+	/* Configure pause time */
+	reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
+	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+	/* Configure flow control refresh threshold value */
+	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics
+ * @hw: pointer to hardware structure
+ *
+ * Configure queue statistics registers, all queues belonging to same traffic
+ * class uses a single set of queue statistics counters.
+ */
+s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
+{
+	u32 reg = 0;
+	u8 i = 0;
+	u8 j = 0;
+
+	/* Receive Queues stats setting -  8 queues per statistics reg */
+	for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) {
+		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i));
+		reg |= ((0x1010101) * j);
+		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
+		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1));
+		reg |= ((0x1010101) * j);
+		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
+	}
+	/* Transmit Queues stats setting -  4 queues per statistics reg*/
+	for (i = 0; i < 8; i++) {
+		reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
+		reg |= ((0x1010101) * i);
+		IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg);
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_hw_config_82598 - Config and enable DCB
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure dcb settings and enable dcb mode.
+ */
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, int link_speed,
+			      u16 *refill, u16 *max, u8 *bwg_id,
+			      u8 *tsa)
+{
+	UNREFERENCED_1PARAMETER(link_speed);
+
+	ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
+	ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,
+					       tsa);
+	ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,
+					       tsa);
+	ixgbe_dcb_config_tc_stats_82598(hw);
+
+
+	return IXGBE_SUCCESS;
+}
diff --git a/drivers/net/ixgbe/base/ixgbe_dcb_82598.h b/drivers/net/ixgbe/base/ixgbe_dcb_82598.h
new file mode 100644
index 0000000..06ffaa4
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_dcb_82598.h
@@ -0,0 +1,99 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_DCB_82598_H_
+#define _IXGBE_DCB_82598_H_
+
+/* DCB register definitions */
+
+#define IXGBE_DPMCS_MTSOS_SHIFT	16
+#define IXGBE_DPMCS_TDPAC	0x00000001 /* 0 Round Robin,
+					    * 1 DFP - Deficit Fixed Priority */
+#define IXGBE_DPMCS_TRM		0x00000010 /* Transmit Recycle Mode */
+#define IXGBE_DPMCS_ARBDIS	0x00000040 /* DCB arbiter disable */
+#define IXGBE_DPMCS_TSOEF	0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */
+
+#define IXGBE_RUPPBMR_MQA	0x80000000 /* Enable UP to queue mapping */
+
+#define IXGBE_RT2CR_MCL_SHIFT	12 /* Offset to Max Credit Limit setting */
+#define IXGBE_RT2CR_LSP		0x80000000 /* LSP enable bit */
+
+#define IXGBE_RDRXCTL_MPBEN	0x00000010 /* DMA config for multiple packet
+					    * buffers enable */
+#define IXGBE_RDRXCTL_MCEN	0x00000040 /* DMA config for multiple cores
+					    * (RSS) enable */
+
+#define IXGBE_TDTQ2TCCR_MCL_SHIFT	12
+#define IXGBE_TDTQ2TCCR_BWG_SHIFT	9
+#define IXGBE_TDTQ2TCCR_GSP	0x40000000
+#define IXGBE_TDTQ2TCCR_LSP	0x80000000
+
+#define IXGBE_TDPT2TCCR_MCL_SHIFT	12
+#define IXGBE_TDPT2TCCR_BWG_SHIFT	9
+#define IXGBE_TDPT2TCCR_GSP	0x40000000
+#define IXGBE_TDPT2TCCR_LSP	0x80000000
+
+#define IXGBE_PDPMCS_TPPAC	0x00000020 /* 0 Round Robin,
+					    * 1 DFP - Deficit Fixed Priority */
+#define IXGBE_PDPMCS_ARBDIS	0x00000040 /* Arbiter disable */
+#define IXGBE_PDPMCS_TRM	0x00000100 /* Transmit Recycle Mode enable */
+
+#define IXGBE_DTXCTL_ENDBUBD	0x00000004 /* Enable DBU buffer division */
+
+#define IXGBE_TXPBSIZE_40KB	0x0000A000 /* 40KB Packet Buffer */
+#define IXGBE_RXPBSIZE_48KB	0x0000C000 /* 48KB Packet Buffer */
+#define IXGBE_RXPBSIZE_64KB	0x00010000 /* 64KB Packet Buffer */
+#define IXGBE_RXPBSIZE_80KB	0x00014000 /* 80KB Packet Buffer */
+
+/* DCB driver APIs */
+
+/* DCB PFC */
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8);
+
+/* DCB stats */
+s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *);
+s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *,
+				 struct ixgbe_hw_stats *, u8);
+s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *,
+				  struct ixgbe_hw_stats *, u8);
+
+/* DCB config arbiters */
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *,
+					   u8 *, u8 *);
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *,
+					   u8 *, u8 *);
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, u8 *);
+
+/* DCB initialization */
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, int, u16 *, u16 *, u8 *, u8 *);
+#endif /* _IXGBE_DCB_82958_H_ */
diff --git a/drivers/net/ixgbe/base/ixgbe_dcb_82599.c b/drivers/net/ixgbe/base/ixgbe_dcb_82599.c
new file mode 100644
index 0000000..9778164
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_dcb_82599.c
@@ -0,0 +1,593 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+
+#include "ixgbe_type.h"
+#include "ixgbe_dcb.h"
+#include "ixgbe_dcb_82599.h"
+
+/**
+ * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count:  Number of elements in bwg_array.
+ *
+ * This function returns the status data for each of the Traffic Classes in use.
+ */
+s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
+				 struct ixgbe_hw_stats *stats,
+				 u8 tc_count)
+{
+	int tc;
+
+	DEBUGFUNC("dcb_get_tc_stats");
+
+	if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+		return IXGBE_ERR_PARAM;
+
+	/* Statistics pertaining to each traffic class */
+	for (tc = 0; tc < tc_count; tc++) {
+		/* Transmitted Packets */
+		stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
+		/* Transmitted Bytes (read low first to prevent missed carry) */
+		stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc));
+		stats->qbtc[tc] +=
+			(((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32);
+		/* Received Packets */
+		stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
+		/* Received Bytes (read low first to prevent missed carry) */
+		stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc));
+		stats->qbrc[tc] +=
+			(((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32);
+
+		/* Received Dropped Packet */
+		stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc));
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count:  Number of elements in bwg_array.
+ *
+ * This function returns the CBFC status data for each of the Traffic Classes.
+ */
+s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
+				  struct ixgbe_hw_stats *stats,
+				  u8 tc_count)
+{
+	int tc;
+
+	DEBUGFUNC("dcb_get_pfc_stats");
+
+	if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+		return IXGBE_ERR_PARAM;
+
+	for (tc = 0; tc < tc_count; tc++) {
+		/* Priority XOFF Transmitted */
+		stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
+		/* Priority XOFF Received */
+		stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Rx Packet Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+				      u16 *max, u8 *bwg_id, u8 *tsa,
+				      u8 *map)
+{
+	u32 reg = 0;
+	u32 credit_refill = 0;
+	u32 credit_max = 0;
+	u8  i = 0;
+
+	/*
+	 * Disable the arbiter before changing parameters
+	 * (always enable recycle mode; WSP)
+	 */
+	reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
+	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
+
+	/*
+	 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
+	 * bits sets for the UPs that needs to be mappped to that TC.
+	 * e.g if priorities 6 and 7 are to be mapped to a TC then the
+	 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
+	 */
+	reg = 0;
+	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
+		reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
+
+	IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
+
+	/* Configure traffic class credits and priority */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		credit_refill = refill[i];
+		credit_max = max[i];
+		reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
+
+		reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
+
+		if (tsa[i] == ixgbe_dcb_tsa_strict)
+			reg |= IXGBE_RTRPT4C_LSP;
+
+		IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
+	}
+
+	/*
+	 * Configure Rx packet plane (recycle mode; WSP) and
+	 * enable arbiter
+	 */
+	reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
+	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Descriptor Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+					   u16 *max, u8 *bwg_id, u8 *tsa)
+{
+	u32 reg, max_credits;
+	u8  i;
+
+	/* Clear the per-Tx queue credits; we use per-TC instead */
+	for (i = 0; i < 128; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
+		IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
+	}
+
+	/* Configure traffic class credits and priority */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		max_credits = max[i];
+		reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
+		reg |= refill[i];
+		reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
+
+		if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
+			reg |= IXGBE_RTTDT2C_GSP;
+
+		if (tsa[i] == ixgbe_dcb_tsa_strict)
+			reg |= IXGBE_RTTDT2C_LSP;
+
+		IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
+	}
+
+	/*
+	 * Configure Tx descriptor plane (recycle mode; WSP) and
+	 * enable arbiter
+	 */
+	reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
+	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Packet Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+					   u16 *max, u8 *bwg_id, u8 *tsa,
+					   u8 *map)
+{
+	u32 reg;
+	u8 i;
+
+	/*
+	 * Disable the arbiter before changing parameters
+	 * (always enable recycle mode; SP; arb delay)
+	 */
+	reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
+	      (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
+	      IXGBE_RTTPCS_ARBDIS;
+	IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
+
+	/*
+	 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
+	 * bits sets for the UPs that needs to be mappped to that TC.
+	 * e.g if priorities 6 and 7 are to be mapped to a TC then the
+	 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
+	 */
+	reg = 0;
+	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
+		reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
+
+	IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
+
+	/* Configure traffic class credits and priority */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		reg = refill[i];
+		reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
+		reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
+
+		if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
+			reg |= IXGBE_RTTPT2C_GSP;
+
+		if (tsa[i] == ixgbe_dcb_tsa_strict)
+			reg |= IXGBE_RTTPT2C_LSP;
+
+		IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
+	}
+
+	/*
+	 * Configure Tx packet plane (recycle mode; SP; arb delay) and
+	 * enable arbiter
+	 */
+	reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
+	      (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
+	IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
+ * @hw: pointer to hardware structure
+ * @pfc_en: enabled pfc bitmask
+ * @map: priority to tc assignments indexed by priority
+ *
+ * Configure Priority Flow Control (PFC) for each traffic class.
+ */
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
+{
+	u32 i, j, fcrtl, reg;
+	u8 max_tc = 0;
+
+	/* Enable Transmit Priority Flow Control */
+	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY);
+
+	/* Enable Receive Priority Flow Control */
+	reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+	reg |= IXGBE_MFLCN_DPF;
+
+	/*
+	 * X540 supports per TC Rx priority flow control.  So
+	 * clear all TCs and only enable those that should be
+	 * enabled.
+	 */
+	reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
+
+	if (hw->mac.type >= ixgbe_mac_X540)
+		reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
+
+	if (pfc_en)
+		reg |= IXGBE_MFLCN_RPFCE;
+
+	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
+
+	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) {
+		if (map[i] > max_tc)
+			max_tc = map[i];
+	}
+
+
+	/* Configure PFC Tx thresholds per TC */
+	for (i = 0; i <= max_tc; i++) {
+		int enabled = 0;
+
+		for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) {
+			if ((map[j] == i) && (pfc_en & (1 << j))) {
+				enabled = 1;
+				break;
+			}
+		}
+
+		if (enabled) {
+			reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
+		} else {
+			/*
+			 * In order to prevent Tx hangs when the internal Tx
+			 * switch is enabled we must set the high water mark
+			 * to the Rx packet buffer size - 24KB.  This allows
+			 * the Tx switch to function even under heavy Rx
+			 * workloads.
+			 */
+			reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+		}
+
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
+	}
+
+	for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
+	}
+
+	/* Configure pause time (2 TCs per register) */
+	reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
+	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+	/* Configure flow control refresh threshold value */
+	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
+ * @hw: pointer to hardware structure
+ *
+ * Configure queue statistics registers, all queues belonging to same traffic
+ * class uses a single set of queue statistics counters.
+ */
+s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw,
+				    struct ixgbe_dcb_config *dcb_config)
+{
+	u32 reg = 0;
+	u8  i   = 0;
+	u8 tc_count = 8;
+	bool vt_mode = false;
+
+	if (dcb_config != NULL) {
+		tc_count = dcb_config->num_tcs.pg_tcs;
+		vt_mode = dcb_config->vt_mode;
+	}
+
+	if (!((tc_count == 8 && vt_mode == false) || tc_count == 4))
+		return IXGBE_ERR_PARAM;
+
+	if (tc_count == 8 && vt_mode == false) {
+		/*
+		 * Receive Queues stats setting
+		 * 32 RQSMR registers, each configuring 4 queues.
+		 *
+		 * Set all 16 queues of each TC to the same stat
+		 * with TC 'n' going to stat 'n'.
+		 */
+		for (i = 0; i < 32; i++) {
+			reg = 0x01010101 * (i / 4);
+			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
+		}
+		/*
+		 * Transmit Queues stats setting
+		 * 32 TQSM registers, each controlling 4 queues.
+		 *
+		 * Set all queues of each TC to the same stat
+		 * with TC 'n' going to stat 'n'.
+		 * Tx queues are allocated non-uniformly to TCs:
+		 * 32, 32, 16, 16, 8, 8, 8, 8.
+		 */
+		for (i = 0; i < 32; i++) {
+			if (i < 8)
+				reg = 0x00000000;
+			else if (i < 16)
+				reg = 0x01010101;
+			else if (i < 20)
+				reg = 0x02020202;
+			else if (i < 24)
+				reg = 0x03030303;
+			else if (i < 26)
+				reg = 0x04040404;
+			else if (i < 28)
+				reg = 0x05050505;
+			else if (i < 30)
+				reg = 0x06060606;
+			else
+				reg = 0x07070707;
+			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
+		}
+	} else if (tc_count == 4 && vt_mode == false) {
+		/*
+		 * Receive Queues stats setting
+		 * 32 RQSMR registers, each configuring 4 queues.
+		 *
+		 * Set all 16 queues of each TC to the same stat
+		 * with TC 'n' going to stat 'n'.
+		 */
+		for (i = 0; i < 32; i++) {
+			if (i % 8 > 3)
+				/* In 4 TC mode, odd 16-queue ranges are
+				 *  not used.
+				*/
+				continue;
+			reg = 0x01010101 * (i / 8);
+			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
+		}
+		/*
+		 * Transmit Queues stats setting
+		 * 32 TQSM registers, each controlling 4 queues.
+		 *
+		 * Set all queues of each TC to the same stat
+		 * with TC 'n' going to stat 'n'.
+		 * Tx queues are allocated non-uniformly to TCs:
+		 * 64, 32, 16, 16.
+		 */
+		for (i = 0; i < 32; i++) {
+			if (i < 16)
+				reg = 0x00000000;
+			else if (i < 24)
+				reg = 0x01010101;
+			else if (i < 28)
+				reg = 0x02020202;
+			else
+				reg = 0x03030303;
+			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
+		}
+	} else if (tc_count == 4 && vt_mode == true) {
+		/*
+		 * Receive Queues stats setting
+		 * 32 RQSMR registers, each configuring 4 queues.
+		 *
+		 * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
+		 * pool. Set all 32 queues of each TC across pools to the same
+		 * stat with TC 'n' going to stat 'n'.
+		 */
+		for (i = 0; i < 32; i++)
+			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100);
+		/*
+		 * Transmit Queues stats setting
+		 * 32 TQSM registers, each controlling 4 queues.
+		 *
+		 * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
+		 * pool. Set all 32 queues of each TC across pools to the same
+		 * stat with TC 'n' going to stat 'n'.
+		 */
+		for (i = 0; i < 32; i++)
+			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100);
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_82599 - Configure general DCB parameters
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure general DCB parameters.
+ */
+s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw,
+			   struct ixgbe_dcb_config *dcb_config)
+{
+	u32 reg;
+	u32 q;
+
+	/* Disable the Tx desc arbiter so that MTQC can be changed */
+	reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+	reg |= IXGBE_RTTDCS_ARBDIS;
+	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+	reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+	if (dcb_config->num_tcs.pg_tcs == 8) {
+		/* Enable DCB for Rx with 8 TCs */
+		switch (reg & IXGBE_MRQC_MRQE_MASK) {
+		case 0:
+		case IXGBE_MRQC_RT4TCEN:
+			/* RSS disabled cases */
+			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+			      IXGBE_MRQC_RT8TCEN;
+			break;
+		case IXGBE_MRQC_RSSEN:
+		case IXGBE_MRQC_RTRSS4TCEN:
+			/* RSS enabled cases */
+			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+			      IXGBE_MRQC_RTRSS8TCEN;
+			break;
+		default:
+			/*
+			 * Unsupported value, assume stale data,
+			 * overwrite no RSS
+			 */
+			ASSERT(0);
+			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+			      IXGBE_MRQC_RT8TCEN;
+		}
+	}
+	if (dcb_config->num_tcs.pg_tcs == 4) {
+		/* We support both VT-on and VT-off with 4 TCs. */
+		if (dcb_config->vt_mode)
+			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+			      IXGBE_MRQC_VMDQRT4TCEN;
+		else
+			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+			      IXGBE_MRQC_RTRSS4TCEN;
+	}
+	IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
+
+	/* Enable DCB for Tx with 8 TCs */
+	if (dcb_config->num_tcs.pg_tcs == 8)
+		reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+	else {
+		/* We support both VT-on and VT-off with 4 TCs. */
+		reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+		if (dcb_config->vt_mode)
+			reg |= IXGBE_MTQC_VT_ENA;
+	}
+	IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
+
+	/* Disable drop for all queues */
+	for (q = 0; q < 128; q++)
+		IXGBE_WRITE_REG(hw, IXGBE_QDE,
+				(IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
+
+	/* Enable the Tx desc arbiter */
+	reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+	reg &= ~IXGBE_RTTDCS_ARBDIS;
+	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+	/* Enable Security TX Buffer IFG for DCB */
+	reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+	reg |= IXGBE_SECTX_DCB;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure dcb settings and enable dcb mode.
+ */
+s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed,
+			      u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa,
+			      u8 *map)
+{
+	UNREFERENCED_1PARAMETER(link_speed);
+
+	ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa,
+					  map);
+	ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
+					       tsa);
+	ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
+					       tsa, map);
+
+	return IXGBE_SUCCESS;
+}
+
diff --git a/drivers/net/ixgbe/base/ixgbe_dcb_82599.h b/drivers/net/ixgbe/base/ixgbe_dcb_82599.h
new file mode 100644
index 0000000..2e18350
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_dcb_82599.h
@@ -0,0 +1,153 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_DCB_82599_H_
+#define _IXGBE_DCB_82599_H_
+
+/* DCB register definitions */
+#define IXGBE_RTTDCS_TDPAC	0x00000001 /* 0 Round Robin,
+					    * 1 WSP - Weighted Strict Priority
+					    */
+#define IXGBE_RTTDCS_VMPAC	0x00000002 /* 0 Round Robin,
+					    * 1 WRR - Weighted Round Robin
+					    */
+#define IXGBE_RTTDCS_TDRM	0x00000010 /* Transmit Recycle Mode */
+#define IXGBE_RTTDCS_BDPM	0x00400000 /* Bypass Data Pipe - must clear! */
+#define IXGBE_RTTDCS_BPBFSM	0x00800000 /* Bypass PB Free Space - must
+					     * clear!
+					     */
+#define IXGBE_RTTDCS_SPEED_CHG	0x80000000 /* Link speed change */
+
+/* Receive UP2TC mapping */
+#define IXGBE_RTRUP2TC_UP_SHIFT	3
+#define IXGBE_RTRUP2TC_UP_MASK	7
+/* Transmit UP2TC mapping */
+#define IXGBE_RTTUP2TC_UP_SHIFT	3
+
+#define IXGBE_RTRPT4C_MCL_SHIFT	12 /* Offset to Max Credit Limit setting */
+#define IXGBE_RTRPT4C_BWG_SHIFT	9  /* Offset to BWG index */
+#define IXGBE_RTRPT4C_GSP	0x40000000 /* GSP enable bit */
+#define IXGBE_RTRPT4C_LSP	0x80000000 /* LSP enable bit */
+
+#define IXGBE_RDRXCTL_MPBEN	0x00000010 /* DMA config for multiple packet
+					    * buffers enable
+					    */
+#define IXGBE_RDRXCTL_MCEN	0x00000040 /* DMA config for multiple cores
+					    * (RSS) enable
+					    */
+
+/* RTRPCS Bit Masks */
+#define IXGBE_RTRPCS_RRM	0x00000002 /* Receive Recycle Mode enable */
+/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
+#define IXGBE_RTRPCS_RAC	0x00000004
+#define IXGBE_RTRPCS_ARBDIS	0x00000040 /* Arbitration disable bit */
+
+/* RTTDT2C Bit Masks */
+#define IXGBE_RTTDT2C_MCL_SHIFT	12
+#define IXGBE_RTTDT2C_BWG_SHIFT	9
+#define IXGBE_RTTDT2C_GSP	0x40000000
+#define IXGBE_RTTDT2C_LSP	0x80000000
+
+#define IXGBE_RTTPT2C_MCL_SHIFT	12
+#define IXGBE_RTTPT2C_BWG_SHIFT	9
+#define IXGBE_RTTPT2C_GSP	0x40000000
+#define IXGBE_RTTPT2C_LSP	0x80000000
+
+/* RTTPCS Bit Masks */
+#define IXGBE_RTTPCS_TPPAC	0x00000020 /* 0 Round Robin,
+					    * 1 SP - Strict Priority
+					    */
+#define IXGBE_RTTPCS_ARBDIS	0x00000040 /* Arbiter disable */
+#define IXGBE_RTTPCS_TPRM	0x00000100 /* Transmit Recycle Mode enable */
+#define IXGBE_RTTPCS_ARBD_SHIFT	22
+#define IXGBE_RTTPCS_ARBD_DCB	0x4 /* Arbitration delay in DCB mode */
+
+#define IXGBE_TXPBTHRESH_DCB	0xA /* THRESH value for DCB mode */
+
+/* SECTXMINIFG DCB */
+#define IXGBE_SECTX_DCB		0x00001F00 /* DCB TX Buffer SEC IFG */
+
+/* BCN register definitions */
+#define IXGBE_RTTBCNRC_RF_INT_SHIFT	14
+#define IXGBE_RTTBCNRC_RS_ENA		0x80000000
+
+#define IXGBE_RTTBCNCR_MNG_CMTGI	0x00000001
+#define IXGBE_RTTBCNCR_MGN_BCNA_MODE	0x00000002
+#define IXGBE_RTTBCNCR_RSV7_11_SHIFT	5
+#define IXGBE_RTTBCNCR_G		0x00000400
+#define IXGBE_RTTBCNCR_I		0x00000800
+#define IXGBE_RTTBCNCR_H		0x00001000
+#define IXGBE_RTTBCNCR_VER_SHIFT	14
+#define IXGBE_RTTBCNCR_CMT_ETH_SHIFT	16
+
+#define IXGBE_RTTBCNACL_SMAC_L_SHIFT	16
+
+#define IXGBE_RTTBCNTG_BCNA_MODE	0x80000000
+
+#define IXGBE_RTTBCNRTT_TS_SHIFT	3
+#define IXGBE_RTTBCNRTT_TXQ_IDX_SHIFT	16
+
+#define IXGBE_RTTBCNRD_BCN_CLEAR_ALL	0x00000002
+#define IXGBE_RTTBCNRD_DRIFT_FAC_SHIFT	2
+#define IXGBE_RTTBCNRD_DRIFT_INT_SHIFT	16
+#define IXGBE_RTTBCNRD_DRIFT_ENA	0x80000000
+
+
+/* DCB driver APIs */
+
+/* DCB PFC */
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *, u8, u8 *);
+
+/* DCB stats */
+s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *,
+				    struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *,
+				 struct ixgbe_hw_stats *, u8);
+s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *,
+				  struct ixgbe_hw_stats *, u8);
+
+/* DCB config arbiters */
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *,
+					   u8 *, u8 *);
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *,
+					   u8 *, u8 *, u8 *);
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, u8 *,
+				      u8 *, u8 *);
+
+/* DCB initialization */
+s32 ixgbe_dcb_config_82599(struct ixgbe_hw *,
+			   struct ixgbe_dcb_config *);
+
+s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *, int, u16 *, u16 *, u8 *,
+			      u8 *, u8 *);
+#endif /* _IXGBE_DCB_82959_H_ */
diff --git a/drivers/net/ixgbe/base/ixgbe_mbx.c b/drivers/net/ixgbe/base/ixgbe_mbx.c
new file mode 100644
index 0000000..c00c2f7
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_mbx.c
@@ -0,0 +1,789 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_type.h"
+#include "ixgbe_mbx.h"
+
+/**
+ *  ixgbe_read_mbx - Reads a message from the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to read
+ *
+ *  returns SUCCESS if it successfully read message from buffer
+ **/
+s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	DEBUGFUNC("ixgbe_read_mbx");
+
+	/* limit read to size of mailbox */
+	if (size > mbx->size)
+		size = mbx->size;
+
+	if (mbx->ops.read)
+		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_write_mbx - Write a message to the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = IXGBE_SUCCESS;
+
+	DEBUGFUNC("ixgbe_write_mbx");
+
+	if (size > mbx->size) {
+		ret_val = IXGBE_ERR_MBX;
+		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+			     "Invalid mailbox message size %d", size);
+	} else if (mbx->ops.write)
+		ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_msg - checks to see if someone sent us mail
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	DEBUGFUNC("ixgbe_check_for_msg");
+
+	if (mbx->ops.check_for_msg)
+		ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_ack - checks to see if someone sent us ACK
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	DEBUGFUNC("ixgbe_check_for_ack");
+
+	if (mbx->ops.check_for_ack)
+		ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_rst - checks to see if other side has reset
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	DEBUGFUNC("ixgbe_check_for_rst");
+
+	if (mbx->ops.check_for_rst)
+		ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_poll_for_msg - Wait for message notification
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification
+ **/
+STATIC s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	DEBUGFUNC("ixgbe_poll_for_msg");
+
+	if (!countdown || !mbx->ops.check_for_msg)
+		goto out;
+
+	while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+		countdown--;
+		if (!countdown)
+			break;
+		usec_delay(mbx->usec_delay);
+	}
+
+	if (countdown == 0)
+		ERROR_REPORT2(IXGBE_ERROR_POLLING,
+			   "Polling for VF%d mailbox message timedout", mbx_id);
+
+out:
+	return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
+}
+
+/**
+ *  ixgbe_poll_for_ack - Wait for message acknowledgement
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message acknowledgement
+ **/
+STATIC s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	DEBUGFUNC("ixgbe_poll_for_ack");
+
+	if (!countdown || !mbx->ops.check_for_ack)
+		goto out;
+
+	while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+		countdown--;
+		if (!countdown)
+			break;
+		usec_delay(mbx->usec_delay);
+	}
+
+	if (countdown == 0)
+		ERROR_REPORT2(IXGBE_ERROR_POLLING,
+			     "Polling for VF%d mailbox ack timedout", mbx_id);
+
+out:
+	return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
+}
+
+/**
+ *  ixgbe_read_posted_mbx - Wait for message notification and receive message
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification and
+ *  copied it into the receive buffer.
+ **/
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	DEBUGFUNC("ixgbe_read_posted_mbx");
+
+	if (!mbx->ops.read)
+		goto out;
+
+	ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+
+	/* if ack received read message, otherwise we timed out */
+	if (!ret_val)
+		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+	return ret_val;
+}
+
+/**
+ *  ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer and
+ *  received an ack to that message within delay * timeout period
+ **/
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+			   u16 mbx_id)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	DEBUGFUNC("ixgbe_write_posted_mbx");
+
+	/* exit if either we can't write or there isn't a defined timeout */
+	if (!mbx->ops.write || !mbx->timeout)
+		goto out;
+
+	/* send msg */
+	ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+	/* if msg sent wait until we receive an ack */
+	if (!ret_val)
+		ret_val = ixgbe_poll_for_ack(hw, mbx_id);
+out:
+	return ret_val;
+}
+
+/**
+ *  ixgbe_init_mbx_ops_generic - Initialize MB function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Setups up the mailbox read and write message function pointers
+ **/
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+	mbx->ops.read_posted = ixgbe_read_posted_mbx;
+	mbx->ops.write_posted = ixgbe_write_posted_mbx;
+}
+
+/**
+ *  ixgbe_read_v2p_mailbox - read v2p mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  This function is used to read the v2p mailbox without losing the read to
+ *  clear status bits.
+ **/
+STATIC u32 ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw)
+{
+	u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
+
+	v2p_mailbox |= hw->mbx.v2p_mailbox;
+	hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
+
+	return v2p_mailbox;
+}
+
+/**
+ *  ixgbe_check_for_bit_vf - Determine if a status bit was set
+ *  @hw: pointer to the HW structure
+ *  @mask: bitmask for bits to be tested and cleared
+ *
+ *  This function is used to check for the read to clear bits within
+ *  the V2P mailbox.
+ **/
+STATIC s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
+{
+	u32 v2p_mailbox = ixgbe_read_v2p_mailbox(hw);
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	if (v2p_mailbox & mask)
+		ret_val = IXGBE_SUCCESS;
+
+	hw->mbx.v2p_mailbox &= ~mask;
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_msg_vf - checks to see if the PF has sent mail
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the PF has set the Status bit or else ERR_MBX
+ **/
+STATIC s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id)
+{
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	UNREFERENCED_1PARAMETER(mbx_id);
+	DEBUGFUNC("ixgbe_check_for_msg_vf");
+
+	if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
+		ret_val = IXGBE_SUCCESS;
+		hw->mbx.stats.reqs++;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_ack_vf - checks to see if the PF has ACK'd
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
+ **/
+STATIC s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id)
+{
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	UNREFERENCED_1PARAMETER(mbx_id);
+	DEBUGFUNC("ixgbe_check_for_ack_vf");
+
+	if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
+		ret_val = IXGBE_SUCCESS;
+		hw->mbx.stats.acks++;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_rst_vf - checks to see if the PF has reset
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns true if the PF has set the reset done bit or else false
+ **/
+STATIC s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id)
+{
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	UNREFERENCED_1PARAMETER(mbx_id);
+	DEBUGFUNC("ixgbe_check_for_rst_vf");
+
+	if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
+	    IXGBE_VFMAILBOX_RSTI))) {
+		ret_val = IXGBE_SUCCESS;
+		hw->mbx.stats.rsts++;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_obtain_mbx_lock_vf - obtain mailbox lock
+ *  @hw: pointer to the HW structure
+ *
+ *  return SUCCESS if we obtained the mailbox lock
+ **/
+STATIC s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
+{
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	DEBUGFUNC("ixgbe_obtain_mbx_lock_vf");
+
+	/* Take ownership of the buffer */
+	IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
+
+	/* reserve mailbox for vf use */
+	if (ixgbe_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
+		ret_val = IXGBE_SUCCESS;
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_write_mbx_vf - Write a message to the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+STATIC s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+			      u16 mbx_id)
+{
+	s32 ret_val;
+	u16 i;
+
+	UNREFERENCED_1PARAMETER(mbx_id);
+
+	DEBUGFUNC("ixgbe_write_mbx_vf");
+
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = ixgbe_obtain_mbx_lock_vf(hw);
+	if (ret_val)
+		goto out_no_write;
+
+	/* flush msg and acks as we are overwriting the message buffer */
+	ixgbe_check_for_msg_vf(hw, 0);
+	ixgbe_check_for_ack_vf(hw, 0);
+
+	/* copy the caller specified message to the mailbox memory buffer */
+	for (i = 0; i < size; i++)
+		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+	/*
+	 * Complete the remaining mailbox data registers with zero to reset
+	 * the data sent in a previous exchange (in either side) with the PF,
+	 * including exchanges performed by another Guest OS to which that VF
+	 * was previously assigned.
+	 */
+	while (i < hw->mbx.size) {
+		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, 0);
+		i++;
+	}
+
+	/* update stats */
+	hw->mbx.stats.msgs_tx++;
+
+	/* Drop VFU and interrupt the PF to tell it a message has been sent */
+	IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
+
+out_no_write:
+	return ret_val;
+}
+
+/**
+ *  ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to read
+ *
+ *  returns SUCCESS if it successfully read message from buffer
+ **/
+STATIC s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+			     u16 mbx_id)
+{
+	s32 ret_val = IXGBE_SUCCESS;
+	u16 i;
+
+	DEBUGFUNC("ixgbe_read_mbx_vf");
+	UNREFERENCED_1PARAMETER(mbx_id);
+
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = ixgbe_obtain_mbx_lock_vf(hw);
+	if (ret_val)
+		goto out_no_read;
+
+	/* copy the message from the mailbox memory buffer */
+	for (i = 0; i < size; i++)
+		msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+	/* Acknowledge receipt and release mailbox, then we're done */
+	IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
+
+	/* update stats */
+	hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+	return ret_val;
+}
+
+/**
+ *  ixgbe_init_mbx_params_vf - set initial values for vf mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+	/* start mailbox as timed out and let the reset_hw call set the timeout
+	 * value to begin communications */
+	mbx->timeout = 0;
+	mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
+
+	mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+	mbx->ops.read = ixgbe_read_mbx_vf;
+	mbx->ops.write = ixgbe_write_mbx_vf;
+	mbx->ops.read_posted = ixgbe_read_posted_mbx;
+	mbx->ops.write_posted = ixgbe_write_posted_mbx;
+	mbx->ops.check_for_msg = ixgbe_check_for_msg_vf;
+	mbx->ops.check_for_ack = ixgbe_check_for_ack_vf;
+	mbx->ops.check_for_rst = ixgbe_check_for_rst_vf;
+
+	mbx->stats.msgs_tx = 0;
+	mbx->stats.msgs_rx = 0;
+	mbx->stats.reqs = 0;
+	mbx->stats.acks = 0;
+	mbx->stats.rsts = 0;
+}
+
+STATIC s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
+{
+	u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	if (mbvficr & mask) {
+		ret_val = IXGBE_SUCCESS;
+		IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+STATIC s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+	s32 ret_val = IXGBE_ERR_MBX;
+	s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+	u32 vf_bit = vf_number % 16;
+
+	DEBUGFUNC("ixgbe_check_for_msg_pf");
+
+	if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
+				    index)) {
+		ret_val = IXGBE_SUCCESS;
+		hw->mbx.stats.reqs++;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+STATIC s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+	s32 ret_val = IXGBE_ERR_MBX;
+	s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+	u32 vf_bit = vf_number % 16;
+
+	DEBUGFUNC("ixgbe_check_for_ack_pf");
+
+	if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
+				    index)) {
+		ret_val = IXGBE_SUCCESS;
+		hw->mbx.stats.acks++;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_rst_pf - checks to see if the VF has reset
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+STATIC s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+	u32 reg_offset = (vf_number < 32) ? 0 : 1;
+	u32 vf_shift = vf_number % 32;
+	u32 vflre = 0;
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	DEBUGFUNC("ixgbe_check_for_rst_pf");
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82599EB:
+		vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
+		break;
+	case ixgbe_mac_X550:
+	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_X540:
+		vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
+		break;
+	default:
+		break;
+	}
+
+	if (vflre & (1 << vf_shift)) {
+		ret_val = IXGBE_SUCCESS;
+		IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+		hw->mbx.stats.rsts++;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  return SUCCESS if we obtained the mailbox lock
+ **/
+STATIC s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+	s32 ret_val = IXGBE_ERR_MBX;
+	u32 p2v_mailbox;
+
+	DEBUGFUNC("ixgbe_obtain_mbx_lock_pf");
+
+	/* Take ownership of the buffer */
+	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
+
+	/* reserve mailbox for vf use */
+	p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
+	if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
+		ret_val = IXGBE_SUCCESS;
+	else
+		ERROR_REPORT2(IXGBE_ERROR_POLLING,
+			   "Failed to obtain mailbox lock for VF%d", vf_number);
+
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_write_mbx_pf - Places a message in the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+STATIC s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+			      u16 vf_number)
+{
+	s32 ret_val;
+	u16 i;
+
+	DEBUGFUNC("ixgbe_write_mbx_pf");
+
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+	if (ret_val)
+		goto out_no_write;
+
+	/* flush msg and acks as we are overwriting the message buffer */
+	ixgbe_check_for_msg_pf(hw, vf_number);
+	ixgbe_check_for_ack_pf(hw, vf_number);
+
+	/* copy the caller specified message to the mailbox memory buffer */
+	for (i = 0; i < size; i++)
+		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
+
+	/*
+	 * Complete the remaining mailbox data registers with zero to reset
+	 * the data sent in a previous exchange (in either side) with the VF,
+	 * including exchanges performed by another Guest OS to which that VF
+	 * was previously assigned.
+	 */
+	while (i < hw->mbx.size) {
+		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, 0);
+		i++;
+	}
+
+	/* Interrupt VF to tell it a message has been sent and release buffer*/
+	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
+
+	/* update stats */
+	hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+	return ret_val;
+
+}
+
+/**
+ *  ixgbe_read_mbx_pf - Read a message from the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @vf_number: the VF index
+ *
+ *  This function copies a message from the mailbox buffer to the caller's
+ *  memory buffer.  The presumption is that the caller knows that there was
+ *  a message due to a VF request so no polling for message is needed.
+ **/
+STATIC s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+			     u16 vf_number)
+{
+	s32 ret_val;
+	u16 i;
+
+	DEBUGFUNC("ixgbe_read_mbx_pf");
+
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+	if (ret_val)
+		goto out_no_read;
+
+	/* copy the message to the mailbox memory buffer */
+	for (i = 0; i < size; i++)
+		msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
+
+	/* Acknowledge the message and release buffer */
+	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
+
+	/* update stats */
+	hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+	return ret_val;
+}
+
+/**
+ *  ixgbe_init_mbx_params_pf - set initial values for pf mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+	if (hw->mac.type != ixgbe_mac_82599EB &&
+	    hw->mac.type != ixgbe_mac_X550 &&
+	    hw->mac.type != ixgbe_mac_X550EM_x &&
+	    hw->mac.type != ixgbe_mac_X540)
+		return;
+
+	mbx->timeout = 0;
+	mbx->usec_delay = 0;
+
+	mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+	mbx->ops.read = ixgbe_read_mbx_pf;
+	mbx->ops.write = ixgbe_write_mbx_pf;
+	mbx->ops.read_posted = ixgbe_read_posted_mbx;
+	mbx->ops.write_posted = ixgbe_write_posted_mbx;
+	mbx->ops.check_for_msg = ixgbe_check_for_msg_pf;
+	mbx->ops.check_for_ack = ixgbe_check_for_ack_pf;
+	mbx->ops.check_for_rst = ixgbe_check_for_rst_pf;
+
+	mbx->stats.msgs_tx = 0;
+	mbx->stats.msgs_rx = 0;
+	mbx->stats.reqs = 0;
+	mbx->stats.acks = 0;
+	mbx->stats.rsts = 0;
+}
diff --git a/drivers/net/ixgbe/base/ixgbe_mbx.h b/drivers/net/ixgbe/base/ixgbe_mbx.h
new file mode 100644
index 0000000..4594572
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_mbx.h
@@ -0,0 +1,150 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_MBX_H_
+#define _IXGBE_MBX_H_
+
+#include "ixgbe_type.h"
+
+#define IXGBE_VFMAILBOX_SIZE	16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX		-100
+
+#define IXGBE_VFMAILBOX		0x002FC
+#define IXGBE_VFMBMEM		0x00200
+
+/* Define mailbox register bits */
+#define IXGBE_VFMAILBOX_REQ	0x00000001 /* Request for PF Ready bit */
+#define IXGBE_VFMAILBOX_ACK	0x00000002 /* Ack PF message received */
+#define IXGBE_VFMAILBOX_VFU	0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFU	0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFSTS	0x00000010 /* PF wrote a message in the MB */
+#define IXGBE_VFMAILBOX_PFACK	0x00000020 /* PF ack the previous VF msg */
+#define IXGBE_VFMAILBOX_RSTI	0x00000040 /* PF has reset indication */
+#define IXGBE_VFMAILBOX_RSTD	0x00000080 /* PF has indicated reset done */
+#define IXGBE_VFMAILBOX_R2C_BITS	0x000000B0 /* All read to clear bits */
+
+#define IXGBE_PFMAILBOX_STS	0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK	0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU	0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU	0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU	0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK	0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1		0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK	0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1		0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF.  The reverse is true if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK	0x80000000 /* Messages below or'd with
+					    * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK	0x40000000 /* Messages below or'd with
+					    * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS	0x20000000 /* Indicates that VF is still
+					    * clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT	16
+/* bits 23:16 are used for extra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK	(0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+/* definitions to support mailbox API version negotiation */
+
+/*
+ * each element denotes a version of the API; existing numbers may not
+ * change; any additions must go at the end
+ */
+enum ixgbe_pfvf_api_rev {
+	ixgbe_mbox_api_10,	/* API version 1.0, linux/freebsd VF driver */
+	ixgbe_mbox_api_20,	/* API version 2.0, solaris Phase1 VF driver */
+	ixgbe_mbox_api_11,	/* API version 1.1, linux/freebsd VF driver */
+	/* This value should always be last */
+	ixgbe_mbox_api_unknown,	/* indicates that API version is not known */
+};
+
+/* mailbox API, legacy requests */
+#define IXGBE_VF_RESET		0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR	0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST	0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN	0x04 /* VF requests PF to set VLAN */
+
+/* mailbox API, version 1.0 VF requests */
+#define IXGBE_VF_SET_LPE	0x05 /* VF requests PF to set VMOLR.LPE */
+#define IXGBE_VF_SET_MACVLAN	0x06 /* VF requests PF for unicast filter */
+#define IXGBE_VF_API_NEGOTIATE	0x08 /* negotiate API version */
+
+/* mailbox API, version 1.1 VF requests */
+#define IXGBE_VF_GET_QUEUES	0x09 /* get queue configuration */
+
+/* GET_QUEUES return data indices within the mailbox */
+#define IXGBE_VF_TX_QUEUES	1	/* number of Tx queues supported */
+#define IXGBE_VF_RX_QUEUES	2	/* number of Rx queues supported */
+#define IXGBE_VF_TRANS_VLAN	3	/* Indication of port vlan */
+#define IXGBE_VF_DEF_QUEUE	4	/* Default queue offset */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN	4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD		3
+
+#define IXGBE_PF_CONTROL_MSG		0x0100 /* PF control message */
+
+/* mailbox API, version 2.0 VF requests */
+#define IXGBE_VF_API_NEGOTIATE		0x08 /* negotiate API version */
+#define IXGBE_VF_GET_QUEUES		0x09 /* get queue configuration */
+#define IXGBE_VF_ENABLE_MACADDR		0x0A /* enable MAC address */
+#define IXGBE_VF_DISABLE_MACADDR	0x0B /* disable MAC address */
+#define IXGBE_VF_GET_MACADDRS		0x0C /* get all configured MAC addrs */
+#define IXGBE_VF_SET_MCAST_PROMISC	0x0D /* enable multicast promiscuous */
+#define IXGBE_VF_GET_MTU		0x0E /* get bounds on MTU */
+#define IXGBE_VF_SET_MTU		0x0F /* set a specific MTU */
+
+/* mailbox API, version 2.0 PF requests */
+#define IXGBE_PF_TRANSPARENT_VLAN	0x0101 /* enable transparent vlan */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT	2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY		500  /* microseconds between retries */
+
+s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
+void ixgbe_init_mbx_params_vf(struct ixgbe_hw *);
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+
+#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbe/base/ixgbe_osdep.h b/drivers/net/ixgbe/base/ixgbe_osdep.h
new file mode 100644
index 0000000..4fb8dd7
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_osdep.h
@@ -0,0 +1,155 @@ 
+/******************************************************************************
+
+  Copyright (c) 2001-2014, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _IXGBE_OS_H_
+#define _IXGBE_OS_H_
+
+#include <string.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_byteorder.h>
+
+#include "../ixgbe_logs.h"
+#include "../ixgbe_bypass_defines.h"
+
+#define ASSERT(x) if(!(x)) rte_panic("IXGBE: x")
+
+#define DELAY(x) rte_delay_us(x)
+#define usec_delay(x) DELAY(x)
+#define msec_delay(x) DELAY(1000*(x))
+
+#define DEBUGFUNC(F)            DEBUGOUT(F "\n");
+#define DEBUGOUT(S, args...)    PMD_DRV_LOG_RAW(DEBUG, S, ##args)
+#define DEBUGOUT1(S, args...)   DEBUGOUT(S, ##args)
+#define DEBUGOUT2(S, args...)   DEBUGOUT(S, ##args)
+#define DEBUGOUT3(S, args...)   DEBUGOUT(S, ##args)
+#define DEBUGOUT6(S, args...)   DEBUGOUT(S, ##args)
+#define DEBUGOUT7(S, args...)   DEBUGOUT(S, ##args)
+
+#define ERROR_REPORT1(e, S, args...)   DEBUGOUT(S, ##args)
+#define ERROR_REPORT2(e, S, args...)   DEBUGOUT(S, ##args)
+#define ERROR_REPORT3(e, S, args...)   DEBUGOUT(S, ##args)
+
+#define FALSE               0
+#define TRUE                1
+
+#define false               0
+#define true                1
+#define min(a,b)	RTE_MIN(a,b) 
+
+#define EWARN(hw, S, args...)     DEBUGOUT1(S, ##args)
+
+/* Bunch of defines for shared code bogosity */
+#define UNREFERENCED_PARAMETER(_p)  
+#define UNREFERENCED_1PARAMETER(_p) 
+#define UNREFERENCED_2PARAMETER(_p, _q)
+#define UNREFERENCED_3PARAMETER(_p, _q, _r) 
+#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) 
+
+/* Shared code error reporting */
+enum {
+	IXGBE_ERROR_SOFTWARE,
+	IXGBE_ERROR_POLLING,
+	IXGBE_ERROR_INVALID_STATE,
+	IXGBE_ERROR_UNSUPPORTED,
+	IXGBE_ERROR_ARGUMENT,
+	IXGBE_ERROR_CAUTION,
+};
+
+#define STATIC static
+#define IXGBE_NTOHL(_i)	rte_be_to_cpu_32(_i)
+#define IXGBE_NTOHS(_i)	rte_be_to_cpu_16(_i)
+#define IXGBE_CPU_TO_LE32(_i)  rte_cpu_to_le_32(_i)
+#define IXGBE_LE32_TO_CPUS(_i) rte_le_to_cpu_32(_i)
+#define IXGBE_CPU_TO_BE16(_i)  rte_cpu_to_be_16(_i)
+#define IXGBE_CPU_TO_BE32(_i)  rte_cpu_to_be_32(_i)
+
+typedef uint8_t		u8;
+typedef int8_t		s8;
+typedef uint16_t	u16;
+typedef int16_t		s16;
+typedef uint32_t	u32;
+typedef int32_t		s32;
+typedef uint64_t	u64;
+typedef int		bool;
+
+#define mb()	rte_mb()
+#define wmb()	rte_wmb()
+#define rmb()	rte_rmb()
+
+#define IOMEM
+
+#define prefetch(x) rte_prefetch0(x)
+
+#define IXGBE_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+
+static inline uint32_t ixgbe_read_addr(volatile void* addr)
+{
+	return rte_le_to_cpu_32(IXGBE_PCI_REG(addr));
+}
+
+#define IXGBE_PCI_REG_WRITE(reg, value) do { \
+	IXGBE_PCI_REG((reg)) = (rte_cpu_to_le_32(value)); \
+} while(0)
+
+#define IXGBE_PCI_REG_ADDR(hw, reg) \
+	((volatile uint32_t *)((char *)(hw)->hw_addr + (reg)))
+
+#define IXGBE_PCI_REG_ARRAY_ADDR(hw, reg, index) \
+	IXGBE_PCI_REG_ADDR((hw), (reg) + ((index) << 2))
+
+/* Not implemented !! */
+#define IXGBE_READ_PCIE_WORD(hw, reg) 0	
+#define IXGBE_WRITE_PCIE_WORD(hw, reg, value) do { } while(0)
+
+#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
+
+#define IXGBE_READ_REG(hw, reg) \
+	ixgbe_read_addr(IXGBE_PCI_REG_ADDR((hw), (reg)))
+
+#define IXGBE_WRITE_REG(hw, reg, value) \
+	IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ADDR((hw), (reg)), (value))
+
+#define IXGBE_READ_REG_ARRAY(hw, reg, index) \
+	IXGBE_PCI_REG(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index)))
+
+#define IXGBE_WRITE_REG_ARRAY(hw, reg, index, value) \
+	IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value))
+
+#endif /* _IXGBE_OS_H_ */
diff --git a/drivers/net/ixgbe/base/ixgbe_phy.c b/drivers/net/ixgbe/base/ixgbe_phy.c
new file mode 100644
index 0000000..4a3463a
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_phy.c
@@ -0,0 +1,2583 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw);
+STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
+STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
+STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
+STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
+STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
+STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
+STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
+STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl);
+STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+					  u8 *sff8472_data);
+
+/**
+ * ixgbe_out_i2c_byte_ack - Send I2C byte with ack
+ * @hw: pointer to the hardware structure
+ * @byte: byte to send
+ *
+ * Returns an error code on error.
+ */
+STATIC s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
+{
+	s32 status;
+
+	status = ixgbe_clock_out_i2c_byte(hw, byte);
+	if (status)
+		return status;
+	return ixgbe_get_i2c_ack(hw);
+}
+
+/**
+ * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack
+ * @hw: pointer to the hardware structure
+ * @byte: pointer to a u8 to receive the byte
+ *
+ * Returns an error code on error.
+ */
+STATIC s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
+{
+	s32 status;
+
+	status = ixgbe_clock_in_i2c_byte(hw, byte);
+	if (status)
+		return status;
+	/* ACK */
+	return ixgbe_clock_out_i2c_bit(hw, false);
+}
+
+/**
+ * ixgbe_ones_comp_byte_add - Perform one's complement addition
+ * @add1 - addend 1
+ * @add2 - addend 2
+ *
+ * Returns one's complement 8-bit sum.
+ */
+STATIC u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
+{
+	u16 sum = add1 + add2;
+
+	sum = (sum & 0xFF) + (sum >> 8);
+	return sum & 0xFF;
+}
+
+/**
+ * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to read from
+ * @reg: I2C device register to read from
+ * @val: pointer to location to receive read value
+ *
+ * Returns an error code on error.
+ */
+STATIC s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+					   u16 reg, u16 *val)
+{
+	u32 swfw_mask = hw->phy.phy_semaphore_mask;
+	int max_retry = 10;
+	int retry = 0;
+	u8 csum_byte;
+	u8 high_bits;
+	u8 low_bits;
+	u8 reg_high;
+	u8 csum;
+
+	reg_high = ((reg >> 7) & 0xFE) | 1;	/* Indicate read combined */
+	csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
+	csum = ~csum;
+	do {
+		if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+			return IXGBE_ERR_SWFW_SYNC;
+		ixgbe_i2c_start(hw);
+		/* Device Address and write indication */
+		if (ixgbe_out_i2c_byte_ack(hw, addr))
+			goto fail;
+		/* Write bits 14:8 */
+		if (ixgbe_out_i2c_byte_ack(hw, reg_high))
+			goto fail;
+		/* Write bits 7:0 */
+		if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
+			goto fail;
+		/* Write csum */
+		if (ixgbe_out_i2c_byte_ack(hw, csum))
+			goto fail;
+		/* Re-start condition */
+		ixgbe_i2c_start(hw);
+		/* Device Address and read indication */
+		if (ixgbe_out_i2c_byte_ack(hw, addr | 1))
+			goto fail;
+		/* Get upper bits */
+		if (ixgbe_in_i2c_byte_ack(hw, &high_bits))
+			goto fail;
+		/* Get low bits */
+		if (ixgbe_in_i2c_byte_ack(hw, &low_bits))
+			goto fail;
+		/* Get csum */
+		if (ixgbe_clock_in_i2c_byte(hw, &csum_byte))
+			goto fail;
+		/* NACK */
+		if (ixgbe_clock_out_i2c_bit(hw, false))
+			goto fail;
+		ixgbe_i2c_stop(hw);
+		hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+		*val = (high_bits << 8) | low_bits;
+		return 0;
+
+fail:
+		ixgbe_i2c_bus_clear(hw);
+		hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+		retry++;
+		if (retry < max_retry)
+			DEBUGOUT("I2C byte read combined error - Retrying.\n");
+		else
+			DEBUGOUT("I2C byte read combined error.\n");
+	} while (retry < max_retry);
+
+	return IXGBE_ERR_I2C;
+}
+
+/**
+ * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to write to
+ * @reg: I2C device register to write to
+ * @val: value to write
+ *
+ * Returns an error code on error.
+ */
+STATIC s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
+					    u8 addr, u16 reg, u16 val)
+{
+	int max_retry = 1;
+	int retry = 0;
+	u8 reg_high;
+	u8 csum;
+
+	reg_high = (reg >> 7) & 0xFE;	/* Indicate write combined */
+	csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
+	csum = ixgbe_ones_comp_byte_add(csum, val >> 8);
+	csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
+	csum = ~csum;
+	do {
+		ixgbe_i2c_start(hw);
+		/* Device Address and write indication */
+		if (ixgbe_out_i2c_byte_ack(hw, addr))
+			goto fail;
+		/* Write bits 14:8 */
+		if (ixgbe_out_i2c_byte_ack(hw, reg_high))
+			goto fail;
+		/* Write bits 7:0 */
+		if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
+			goto fail;
+		/* Write data 15:8 */
+		if (ixgbe_out_i2c_byte_ack(hw, val >> 8))
+			goto fail;
+		/* Write data 7:0 */
+		if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF))
+			goto fail;
+		/* Write csum */
+		if (ixgbe_out_i2c_byte_ack(hw, csum))
+			goto fail;
+		ixgbe_i2c_stop(hw);
+		return 0;
+
+fail:
+		ixgbe_i2c_bus_clear(hw);
+		retry++;
+		if (retry < max_retry)
+			DEBUGOUT("I2C byte write combined error - Retrying.\n");
+		else
+			DEBUGOUT("I2C byte write combined error.\n");
+	} while (retry < max_retry);
+
+	return IXGBE_ERR_I2C;
+}
+
+/**
+ *  ixgbe_init_phy_ops_generic - Inits PHY function ptrs
+ *  @hw: pointer to the hardware structure
+ *
+ *  Initialize the function pointers.
+ **/
+s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
+{
+	struct ixgbe_phy_info *phy = &hw->phy;
+
+	DEBUGFUNC("ixgbe_init_phy_ops_generic");
+
+	/* PHY */
+	phy->ops.identify = ixgbe_identify_phy_generic;
+	phy->ops.reset = ixgbe_reset_phy_generic;
+	phy->ops.read_reg = ixgbe_read_phy_reg_generic;
+	phy->ops.write_reg = ixgbe_write_phy_reg_generic;
+	phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi;
+	phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi;
+	phy->ops.setup_link = ixgbe_setup_phy_link_generic;
+	phy->ops.setup_link_speed = ixgbe_setup_phy_link_speed_generic;
+	phy->ops.check_link = NULL;
+	phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic;
+	phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_generic;
+	phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_generic;
+	phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_generic;
+	phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_generic;
+	phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_generic;
+	phy->ops.i2c_bus_clear = ixgbe_i2c_bus_clear;
+	phy->ops.identify_sfp = ixgbe_identify_module_generic;
+	phy->sfp_type = ixgbe_sfp_type_unknown;
+	phy->ops.read_i2c_combined = ixgbe_read_i2c_combined_generic;
+	phy->ops.write_i2c_combined = ixgbe_write_i2c_combined_generic;
+	phy->ops.check_overtemp = ixgbe_tn_check_overtemp;
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_identify_phy_generic - Get physical layer module
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines the physical layer module found on the current adapter.
+ **/
+s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+	u32 phy_addr;
+	u16 ext_ability = 0;
+
+	DEBUGFUNC("ixgbe_identify_phy_generic");
+
+	if (!hw->phy.phy_semaphore_mask) {
+		if (hw->bus.lan_id)
+			hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+		else
+			hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+	}
+
+	if (hw->phy.type == ixgbe_phy_unknown) {
+		for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
+			if (ixgbe_validate_phy_addr(hw, phy_addr)) {
+				hw->phy.addr = phy_addr;
+				ixgbe_get_phy_id(hw);
+				hw->phy.type =
+					ixgbe_get_phy_type_from_id(hw->phy.id);
+
+				if (hw->phy.type == ixgbe_phy_unknown) {
+					hw->phy.ops.read_reg(hw,
+						  IXGBE_MDIO_PHY_EXT_ABILITY,
+						  IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+						  &ext_ability);
+					if (ext_ability &
+					    (IXGBE_MDIO_PHY_10GBASET_ABILITY |
+					     IXGBE_MDIO_PHY_1000BASET_ABILITY))
+						hw->phy.type =
+							 ixgbe_phy_cu_unknown;
+					else
+						hw->phy.type =
+							 ixgbe_phy_generic;
+				}
+
+				status = IXGBE_SUCCESS;
+				break;
+			}
+		}
+
+		/* Certain media types do not have a phy so an address will not
+		 * be found and the code will take this path.  Caller has to
+		 * decide if it is an error or not.
+		 */
+		if (status != IXGBE_SUCCESS) {
+			hw->phy.addr = 0;
+		}
+	} else {
+		status = IXGBE_SUCCESS;
+	}
+
+	return status;
+}
+
+/**
+ * ixgbe_check_reset_blocked - check status of MNG FW veto bit
+ * @hw: pointer to the hardware structure
+ *
+ * This function checks the MMNGC.MNG_VETO bit to see if there are
+ * any constraints on link from manageability.  For MAC's that don't
+ * have this bit just return faluse since the link can not be blocked
+ * via this method.
+ **/
+s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
+{
+	u32 mmngc;
+
+	DEBUGFUNC("ixgbe_check_reset_blocked");
+
+	/* If we don't have this bit, it can't be blocking */
+	if (hw->mac.type == ixgbe_mac_82598EB)
+		return false;
+
+	mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC);
+	if (mmngc & IXGBE_MMNGC_MNG_VETO) {
+		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
+			      "MNG_VETO bit detected.\n");
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ *  ixgbe_validate_phy_addr - Determines phy address is valid
+ *  @hw: pointer to hardware structure
+ *
+ **/
+bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
+{
+	u16 phy_id = 0;
+	bool valid = false;
+
+	DEBUGFUNC("ixgbe_validate_phy_addr");
+
+	hw->phy.addr = phy_addr;
+	hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
+			     IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
+
+	if (phy_id != 0xFFFF && phy_id != 0x0)
+		valid = true;
+
+	return valid;
+}
+
+/**
+ *  ixgbe_get_phy_id - Get the phy type
+ *  @hw: pointer to hardware structure
+ *
+ **/
+s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
+{
+	u32 status;
+	u16 phy_id_high = 0;
+	u16 phy_id_low = 0;
+
+	DEBUGFUNC("ixgbe_get_phy_id");
+
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
+				      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+				      &phy_id_high);
+
+	if (status == IXGBE_SUCCESS) {
+		hw->phy.id = (u32)(phy_id_high << 16);
+		status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW,
+					      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+					      &phy_id_low);
+		hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
+		hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
+	}
+	return status;
+}
+
+/**
+ *  ixgbe_get_phy_type_from_id - Get the phy type
+ *  @hw: pointer to hardware structure
+ *
+ **/
+enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
+{
+	enum ixgbe_phy_type phy_type;
+
+	DEBUGFUNC("ixgbe_get_phy_type_from_id");
+
+	switch (phy_id) {
+	case TN1010_PHY_ID:
+		phy_type = ixgbe_phy_tn;
+		break;
+	case X550_PHY_ID:
+	case X540_PHY_ID:
+		phy_type = ixgbe_phy_aq;
+		break;
+	case QT2022_PHY_ID:
+		phy_type = ixgbe_phy_qt;
+		break;
+	case ATH_PHY_ID:
+		phy_type = ixgbe_phy_nl;
+		break;
+	case X557_PHY_ID:
+		phy_type = ixgbe_phy_x550em_ext_t;
+		break;
+	default:
+		phy_type = ixgbe_phy_unknown;
+		break;
+	}
+
+	DEBUGOUT1("phy type found is %d\n", phy_type);
+	return phy_type;
+}
+
+/**
+ *  ixgbe_reset_phy_generic - Performs a PHY reset
+ *  @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
+{
+	u32 i;
+	u16 ctrl = 0;
+	s32 status = IXGBE_SUCCESS;
+
+	DEBUGFUNC("ixgbe_reset_phy_generic");
+
+	if (hw->phy.type == ixgbe_phy_unknown)
+		status = ixgbe_identify_phy_generic(hw);
+
+	if (status != IXGBE_SUCCESS || hw->phy.type == ixgbe_phy_none)
+		goto out;
+
+	/* Don't reset PHY if it's shut down due to overtemp. */
+	if (!hw->phy.reset_if_overtemp &&
+	    (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
+		goto out;
+
+	/* Blocked by MNG FW so bail */
+	if (ixgbe_check_reset_blocked(hw))
+		goto out;
+
+	/*
+	 * Perform soft PHY reset to the PHY_XS.
+	 * This will cause a soft reset to the PHY
+	 */
+	hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+			      IXGBE_MDIO_PHY_XS_DEV_TYPE,
+			      IXGBE_MDIO_PHY_XS_RESET);
+
+	/*
+	 * Poll for reset bit to self-clear indicating reset is complete.
+	 * Some PHYs could take up to 3 seconds to complete and need about
+	 * 1.7 usec delay after the reset is complete.
+	 */
+	for (i = 0; i < 30; i++) {
+		msec_delay(100);
+		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+				     IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl);
+		if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
+			usec_delay(2);
+			break;
+		}
+	}
+
+	if (ctrl & IXGBE_MDIO_PHY_XS_RESET) {
+		status = IXGBE_ERR_RESET_FAILED;
+		ERROR_REPORT1(IXGBE_ERROR_POLLING,
+			     "PHY reset polling failed to complete.\n");
+	}
+
+out:
+	return status;
+}
+
+/**
+ *  ixgbe_read_phy_mdi - Reads a value from a specified PHY register without
+ *  the SWFW lock
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit address of PHY register to read
+ *  @phy_data: Pointer to read data from PHY register
+ **/
+s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+		       u16 *phy_data)
+{
+	u32 i, data, command;
+
+	/* Setup and write the address cycle command */
+	command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
+		   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+		   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+		   (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+	IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+	/*
+	 * Check every 10 usec to see if the address cycle completed.
+	 * The MDI Command bit will clear when the operation is
+	 * complete
+	 */
+	for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+		usec_delay(10);
+
+		command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+		if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+				break;
+	}
+
+
+	if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+		ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address command did not complete.\n");
+		return IXGBE_ERR_PHY;
+	}
+
+	/*
+	 * Address cycle complete, setup and write the read
+	 * command
+	 */
+	command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
+		   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+		   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+		   (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
+
+	IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+	/*
+	 * Check every 10 usec to see if the address cycle
+	 * completed. The MDI Command bit will clear when the
+	 * operation is complete
+	 */
+	for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+		usec_delay(10);
+
+		command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+		if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+			break;
+	}
+
+	if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+		ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY read command didn't complete\n");
+		return IXGBE_ERR_PHY;
+	}
+
+	/*
+	 * Read operation is complete.  Get the data
+	 * from MSRWD
+	 */
+	data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+	data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
+	*phy_data = (u16)(data);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
+ *  using the SWFW lock - this function is needed in most cases
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit address of PHY register to read
+ *  @phy_data: Pointer to read data from PHY register
+ **/
+s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+			       u32 device_type, u16 *phy_data)
+{
+	s32 status;
+	u32 gssr = hw->phy.phy_semaphore_mask;
+
+	DEBUGFUNC("ixgbe_read_phy_reg_generic");
+
+	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) {
+		status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
+						phy_data);
+		hw->mac.ops.release_swfw_sync(hw, gssr);
+	} else {
+		status = IXGBE_ERR_SWFW_SYNC;
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register
+ *  without SWFW lock
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit PHY register to write
+ *  @device_type: 5 bit device type
+ *  @phy_data: Data to write to the PHY register
+ **/
+s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+				u32 device_type, u16 phy_data)
+{
+	u32 i, command;
+
+	/* Put the data in the MDI single read and write data register*/
+	IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
+
+	/* Setup and write the address cycle command */
+	command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
+		   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+		   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+		   (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+	IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+	/*
+	 * Check every 10 usec to see if the address cycle completed.
+	 * The MDI Command bit will clear when the operation is
+	 * complete
+	 */
+	for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+		usec_delay(10);
+
+		command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+		if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+			break;
+	}
+
+	if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+		ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address cmd didn't complete\n");
+		return IXGBE_ERR_PHY;
+	}
+
+	/*
+	 * Address cycle complete, setup and write the write
+	 * command
+	 */
+	command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
+		   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+		   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+		   (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
+
+	IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+	/*
+	 * Check every 10 usec to see if the address cycle
+	 * completed. The MDI Command bit will clear when the
+	 * operation is complete
+	 */
+	for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+		usec_delay(10);
+
+		command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+		if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+			break;
+	}
+
+	if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+		ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY write cmd didn't complete\n");
+		return IXGBE_ERR_PHY;
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
+ *  using SWFW lock- this function is needed in most cases
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit PHY register to write
+ *  @device_type: 5 bit device type
+ *  @phy_data: Data to write to the PHY register
+ **/
+s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+				u32 device_type, u16 phy_data)
+{
+	s32 status;
+	u32 gssr = hw->phy.phy_semaphore_mask;
+
+	DEBUGFUNC("ixgbe_write_phy_reg_generic");
+
+	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) {
+		status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
+						 phy_data);
+		hw->mac.ops.release_swfw_sync(hw, gssr);
+	} else {
+		status = IXGBE_ERR_SWFW_SYNC;
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_setup_phy_link_generic - Set and restart auto-neg
+ *  @hw: pointer to hardware structure
+ *
+ *  Restart auto-negotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_SUCCESS;
+	u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
+	bool autoneg = false;
+	ixgbe_link_speed speed;
+
+	DEBUGFUNC("ixgbe_setup_phy_link_generic");
+
+	ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
+
+	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+		/* Set or unset auto-negotiation 10G advertisement */
+		hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				     &autoneg_reg);
+
+		autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
+		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+			autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
+
+		hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				      autoneg_reg);
+	}
+
+	if (hw->mac.type == ixgbe_mac_X550) {
+		if (speed & IXGBE_LINK_SPEED_5GB_FULL) {
+			/* Set or unset auto-negotiation 1G advertisement */
+			hw->phy.ops.read_reg(hw,
+				IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+				IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				&autoneg_reg);
+
+			autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
+			if (hw->phy.autoneg_advertised &
+			     IXGBE_LINK_SPEED_5GB_FULL)
+				autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
+
+			hw->phy.ops.write_reg(hw,
+				IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+				IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				autoneg_reg);
+		}
+
+		if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) {
+			/* Set or unset auto-negotiation 1G advertisement */
+			hw->phy.ops.read_reg(hw,
+				IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+				IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				&autoneg_reg);
+
+			autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
+			if (hw->phy.autoneg_advertised &
+			    IXGBE_LINK_SPEED_2_5GB_FULL)
+				autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
+
+			hw->phy.ops.write_reg(hw,
+				IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+				IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				autoneg_reg);
+		}
+	}
+
+	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+		/* Set or unset auto-negotiation 1G advertisement */
+		hw->phy.ops.read_reg(hw,
+				     IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				     &autoneg_reg);
+
+		autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
+		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+			autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
+
+		hw->phy.ops.write_reg(hw,
+				      IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				      autoneg_reg);
+	}
+
+	if (speed & IXGBE_LINK_SPEED_100_FULL) {
+		/* Set or unset auto-negotiation 100M advertisement */
+		hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				     &autoneg_reg);
+
+		autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE |
+				 IXGBE_MII_100BASE_T_ADVERTISE_HALF);
+		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+			autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
+
+		hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				      autoneg_reg);
+	}
+
+	/* Blocked by MNG FW so don't reset PHY */
+	if (ixgbe_check_reset_blocked(hw))
+		return status;
+
+	/* Restart PHY auto-negotiation. */
+	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
+
+	autoneg_reg |= IXGBE_MII_RESTART;
+
+	hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+			      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
+
+	return status;
+}
+
+/**
+ *  ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ **/
+s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+				       ixgbe_link_speed speed,
+				       bool autoneg_wait_to_complete)
+{
+	UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
+
+	DEBUGFUNC("ixgbe_setup_phy_link_speed_generic");
+
+	/*
+	 * Clear autoneg_advertised and set new values based on input link
+	 * speed.
+	 */
+	hw->phy.autoneg_advertised = 0;
+
+	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+	if (speed & IXGBE_LINK_SPEED_5GB_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL;
+
+	if (speed & IXGBE_LINK_SPEED_2_5GB_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
+
+	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+	if (speed & IXGBE_LINK_SPEED_100_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+
+	/* Setup link based on the new speed settings */
+	hw->phy.ops.setup_link(hw);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @autoneg: boolean auto-negotiation value
+ *
+ *  Determines the supported link capabilities by reading the PHY auto
+ *  negotiation register.
+ **/
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+					       ixgbe_link_speed *speed,
+					       bool *autoneg)
+{
+	s32 status;
+	u16 speed_ability;
+
+	DEBUGFUNC("ixgbe_get_copper_link_capabilities_generic");
+
+	*speed = 0;
+	*autoneg = true;
+
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
+				      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+				      &speed_ability);
+
+	if (status == IXGBE_SUCCESS) {
+		if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
+			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
+		if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G)
+			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
+		if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M)
+			*speed |= IXGBE_LINK_SPEED_100_FULL;
+	}
+
+	/* Internal PHY does not support 100 Mbps */
+	if (hw->mac.type == ixgbe_mac_X550EM_x)
+		*speed &= ~IXGBE_LINK_SPEED_100_FULL;
+
+	if (hw->mac.type == ixgbe_mac_X550) {
+		*speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
+		*speed |= IXGBE_LINK_SPEED_5GB_FULL;
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_check_phy_link_tnx - Determine link and speed status
+ *  @hw: pointer to hardware structure
+ *
+ *  Reads the VS1 register to determine if link is up and the current speed for
+ *  the PHY.
+ **/
+s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+			     bool *link_up)
+{
+	s32 status = IXGBE_SUCCESS;
+	u32 time_out;
+	u32 max_time_out = 10;
+	u16 phy_link = 0;
+	u16 phy_speed = 0;
+	u16 phy_data = 0;
+
+	DEBUGFUNC("ixgbe_check_phy_link_tnx");
+
+	/* Initialize speed and link to default case */
+	*link_up = false;
+	*speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+	/*
+	 * Check current speed and link status of the PHY register.
+	 * This is a vendor specific register and may have to
+	 * be changed for other copper PHYs.
+	 */
+	for (time_out = 0; time_out < max_time_out; time_out++) {
+		usec_delay(10);
+		status = hw->phy.ops.read_reg(hw,
+					IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
+					IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+					&phy_data);
+		phy_link = phy_data & IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
+		phy_speed = phy_data &
+				 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
+		if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
+			*link_up = true;
+			if (phy_speed ==
+			    IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
+				*speed = IXGBE_LINK_SPEED_1GB_FULL;
+			break;
+		}
+	}
+
+	return status;
+}
+
+/**
+ *	ixgbe_setup_phy_link_tnx - Set and restart auto-neg
+ *	@hw: pointer to hardware structure
+ *
+ *	Restart auto-negotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_SUCCESS;
+	u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
+	bool autoneg = false;
+	ixgbe_link_speed speed;
+
+	DEBUGFUNC("ixgbe_setup_phy_link_tnx");
+
+	ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
+
+	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+		/* Set or unset auto-negotiation 10G advertisement */
+		hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				     &autoneg_reg);
+
+		autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
+		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+			autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
+
+		hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				      autoneg_reg);
+	}
+
+	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+		/* Set or unset auto-negotiation 1G advertisement */
+		hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
+				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				     &autoneg_reg);
+
+		autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
+		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+			autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
+
+		hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				      autoneg_reg);
+	}
+
+	if (speed & IXGBE_LINK_SPEED_100_FULL) {
+		/* Set or unset auto-negotiation 100M advertisement */
+		hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				     &autoneg_reg);
+
+		autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE;
+		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+			autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
+
+		hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				      autoneg_reg);
+	}
+
+	/* Blocked by MNG FW so don't reset PHY */
+	if (ixgbe_check_reset_blocked(hw))
+		return status;
+
+	/* Restart PHY auto-negotiation. */
+	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
+
+	autoneg_reg |= IXGBE_MII_RESTART;
+
+	hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+			      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
+
+	return status;
+}
+
+/**
+ *  ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
+ *  @hw: pointer to hardware structure
+ *  @firmware_version: pointer to the PHY Firmware Version
+ **/
+s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
+				       u16 *firmware_version)
+{
+	s32 status;
+
+	DEBUGFUNC("ixgbe_get_phy_firmware_version_tnx");
+
+	status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
+				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+				      firmware_version);
+
+	return status;
+}
+
+/**
+ *  ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
+ *  @hw: pointer to hardware structure
+ *  @firmware_version: pointer to the PHY Firmware Version
+ **/
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+					   u16 *firmware_version)
+{
+	s32 status;
+
+	DEBUGFUNC("ixgbe_get_phy_firmware_version_generic");
+
+	status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
+				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+				      firmware_version);
+
+	return status;
+}
+
+/**
+ *  ixgbe_reset_phy_nl - Performs a PHY reset
+ *  @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
+{
+	u16 phy_offset, control, eword, edata, block_crc;
+	bool end_data = false;
+	u16 list_offset, data_offset;
+	u16 phy_data = 0;
+	s32 ret_val = IXGBE_SUCCESS;
+	u32 i;
+
+	DEBUGFUNC("ixgbe_reset_phy_nl");
+
+	/* Blocked by MNG FW so bail */
+	if (ixgbe_check_reset_blocked(hw))
+		goto out;
+
+	hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+			     IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
+
+	/* reset the PHY and poll for completion */
+	hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+			      IXGBE_MDIO_PHY_XS_DEV_TYPE,
+			      (phy_data | IXGBE_MDIO_PHY_XS_RESET));
+
+	for (i = 0; i < 100; i++) {
+		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+				     IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
+		if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0)
+			break;
+		msec_delay(10);
+	}
+
+	if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) {
+		DEBUGOUT("PHY reset did not complete.\n");
+		ret_val = IXGBE_ERR_PHY;
+		goto out;
+	}
+
+	/* Get init offsets */
+	ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
+						      &data_offset);
+	if (ret_val != IXGBE_SUCCESS)
+		goto out;
+
+	ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
+	data_offset++;
+	while (!end_data) {
+		/*
+		 * Read control word from PHY init contents offset
+		 */
+		ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
+		if (ret_val)
+			goto err_eeprom;
+		control = (eword & IXGBE_CONTROL_MASK_NL) >>
+			   IXGBE_CONTROL_SHIFT_NL;
+		edata = eword & IXGBE_DATA_MASK_NL;
+		switch (control) {
+		case IXGBE_DELAY_NL:
+			data_offset++;
+			DEBUGOUT1("DELAY: %d MS\n", edata);
+			msec_delay(edata);
+			break;
+		case IXGBE_DATA_NL:
+			DEBUGOUT("DATA:\n");
+			data_offset++;
+			ret_val = hw->eeprom.ops.read(hw, data_offset,
+						      &phy_offset);
+			if (ret_val)
+				goto err_eeprom;
+			data_offset++;
+			for (i = 0; i < edata; i++) {
+				ret_val = hw->eeprom.ops.read(hw, data_offset,
+							      &eword);
+				if (ret_val)
+					goto err_eeprom;
+				hw->phy.ops.write_reg(hw, phy_offset,
+						      IXGBE_TWINAX_DEV, eword);
+				DEBUGOUT2("Wrote %4.4x to %4.4x\n", eword,
+					  phy_offset);
+				data_offset++;
+				phy_offset++;
+			}
+			break;
+		case IXGBE_CONTROL_NL:
+			data_offset++;
+			DEBUGOUT("CONTROL:\n");
+			if (edata == IXGBE_CONTROL_EOL_NL) {
+				DEBUGOUT("EOL\n");
+				end_data = true;
+			} else if (edata == IXGBE_CONTROL_SOL_NL) {
+				DEBUGOUT("SOL\n");
+			} else {
+				DEBUGOUT("Bad control value\n");
+				ret_val = IXGBE_ERR_PHY;
+				goto out;
+			}
+			break;
+		default:
+			DEBUGOUT("Bad control type\n");
+			ret_val = IXGBE_ERR_PHY;
+			goto out;
+		}
+	}
+
+out:
+	return ret_val;
+
+err_eeprom:
+	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+		      "eeprom read at offset %d failed", data_offset);
+	return IXGBE_ERR_PHY;
+}
+
+/**
+ *  ixgbe_identify_module_generic - Identifies module type
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines HW type and calls appropriate function.
+ **/
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_ERR_SFP_NOT_PRESENT;
+
+	DEBUGFUNC("ixgbe_identify_module_generic");
+
+	switch (hw->mac.ops.get_media_type(hw)) {
+	case ixgbe_media_type_fiber:
+		status = ixgbe_identify_sfp_module_generic(hw);
+		break;
+
+	case ixgbe_media_type_fiber_qsfp:
+		status = ixgbe_identify_qsfp_module_generic(hw);
+		break;
+
+	default:
+		hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+		status = IXGBE_ERR_SFP_NOT_PRESENT;
+		break;
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_identify_sfp_module_generic - Identifies SFP modules
+ *  @hw: pointer to hardware structure
+ *
+ *  Searches for and identifies the SFP module and assigns appropriate PHY type.
+ **/
+s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+	u32 vendor_oui = 0;
+	enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
+	u8 identifier = 0;
+	u8 comp_codes_1g = 0;
+	u8 comp_codes_10g = 0;
+	u8 oui_bytes[3] = {0, 0, 0};
+	u8 cable_tech = 0;
+	u8 cable_spec = 0;
+	u16 enforce_sfp = 0;
+
+	DEBUGFUNC("ixgbe_identify_sfp_module_generic");
+
+	if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
+		hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+		status = IXGBE_ERR_SFP_NOT_PRESENT;
+		goto out;
+	}
+
+	/* LAN ID is needed for I2C access */
+	hw->mac.ops.set_lan_id(hw);
+
+	status = hw->phy.ops.read_i2c_eeprom(hw,
+					     IXGBE_SFF_IDENTIFIER,
+					     &identifier);
+
+	if (status != IXGBE_SUCCESS)
+		goto err_read_i2c_eeprom;
+
+	if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
+		hw->phy.type = ixgbe_phy_sfp_unsupported;
+		status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+	} else {
+		status = hw->phy.ops.read_i2c_eeprom(hw,
+						     IXGBE_SFF_1GBE_COMP_CODES,
+						     &comp_codes_1g);
+
+		if (status != IXGBE_SUCCESS)
+			goto err_read_i2c_eeprom;
+
+		status = hw->phy.ops.read_i2c_eeprom(hw,
+						     IXGBE_SFF_10GBE_COMP_CODES,
+						     &comp_codes_10g);
+
+		if (status != IXGBE_SUCCESS)
+			goto err_read_i2c_eeprom;
+		status = hw->phy.ops.read_i2c_eeprom(hw,
+						     IXGBE_SFF_CABLE_TECHNOLOGY,
+						     &cable_tech);
+
+		if (status != IXGBE_SUCCESS)
+			goto err_read_i2c_eeprom;
+
+		 /* ID Module
+		  * =========
+		  * 0   SFP_DA_CU
+		  * 1   SFP_SR
+		  * 2   SFP_LR
+		  * 3   SFP_DA_CORE0 - 82599-specific
+		  * 4   SFP_DA_CORE1 - 82599-specific
+		  * 5   SFP_SR/LR_CORE0 - 82599-specific
+		  * 6   SFP_SR/LR_CORE1 - 82599-specific
+		  * 7   SFP_act_lmt_DA_CORE0 - 82599-specific
+		  * 8   SFP_act_lmt_DA_CORE1 - 82599-specific
+		  * 9   SFP_1g_cu_CORE0 - 82599-specific
+		  * 10  SFP_1g_cu_CORE1 - 82599-specific
+		  * 11  SFP_1g_sx_CORE0 - 82599-specific
+		  * 12  SFP_1g_sx_CORE1 - 82599-specific
+		  */
+		if (hw->mac.type == ixgbe_mac_82598EB) {
+			if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+				hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+			else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+				hw->phy.sfp_type = ixgbe_sfp_type_sr;
+			else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+				hw->phy.sfp_type = ixgbe_sfp_type_lr;
+			else
+				hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+		} else {
+			if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
+				if (hw->bus.lan_id == 0)
+					hw->phy.sfp_type =
+						     ixgbe_sfp_type_da_cu_core0;
+				else
+					hw->phy.sfp_type =
+						     ixgbe_sfp_type_da_cu_core1;
+			} else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
+				hw->phy.ops.read_i2c_eeprom(
+						hw, IXGBE_SFF_CABLE_SPEC_COMP,
+						&cable_spec);
+				if (cable_spec &
+				    IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
+					if (hw->bus.lan_id == 0)
+						hw->phy.sfp_type =
+						ixgbe_sfp_type_da_act_lmt_core0;
+					else
+						hw->phy.sfp_type =
+						ixgbe_sfp_type_da_act_lmt_core1;
+				} else {
+					hw->phy.sfp_type =
+							ixgbe_sfp_type_unknown;
+				}
+			} else if (comp_codes_10g &
+				   (IXGBE_SFF_10GBASESR_CAPABLE |
+				    IXGBE_SFF_10GBASELR_CAPABLE)) {
+				if (hw->bus.lan_id == 0)
+					hw->phy.sfp_type =
+						      ixgbe_sfp_type_srlr_core0;
+				else
+					hw->phy.sfp_type =
+						      ixgbe_sfp_type_srlr_core1;
+			} else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
+				if (hw->bus.lan_id == 0)
+					hw->phy.sfp_type =
+						ixgbe_sfp_type_1g_cu_core0;
+				else
+					hw->phy.sfp_type =
+						ixgbe_sfp_type_1g_cu_core1;
+			} else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
+				if (hw->bus.lan_id == 0)
+					hw->phy.sfp_type =
+						ixgbe_sfp_type_1g_sx_core0;
+				else
+					hw->phy.sfp_type =
+						ixgbe_sfp_type_1g_sx_core1;
+			} else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) {
+				if (hw->bus.lan_id == 0)
+					hw->phy.sfp_type =
+						ixgbe_sfp_type_1g_lx_core0;
+				else
+					hw->phy.sfp_type =
+						ixgbe_sfp_type_1g_lx_core1;
+			} else {
+				hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+			}
+		}
+
+		if (hw->phy.sfp_type != stored_sfp_type)
+			hw->phy.sfp_setup_needed = true;
+
+		/* Determine if the SFP+ PHY is dual speed or not. */
+		hw->phy.multispeed_fiber = false;
+		if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
+		   (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
+		   ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
+		   (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
+			hw->phy.multispeed_fiber = true;
+
+		/* Determine PHY vendor */
+		if (hw->phy.type != ixgbe_phy_nl) {
+			hw->phy.id = identifier;
+			status = hw->phy.ops.read_i2c_eeprom(hw,
+						    IXGBE_SFF_VENDOR_OUI_BYTE0,
+						    &oui_bytes[0]);
+
+			if (status != IXGBE_SUCCESS)
+				goto err_read_i2c_eeprom;
+
+			status = hw->phy.ops.read_i2c_eeprom(hw,
+						    IXGBE_SFF_VENDOR_OUI_BYTE1,
+						    &oui_bytes[1]);
+
+			if (status != IXGBE_SUCCESS)
+				goto err_read_i2c_eeprom;
+
+			status = hw->phy.ops.read_i2c_eeprom(hw,
+						    IXGBE_SFF_VENDOR_OUI_BYTE2,
+						    &oui_bytes[2]);
+
+			if (status != IXGBE_SUCCESS)
+				goto err_read_i2c_eeprom;
+
+			vendor_oui =
+			  ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
+			   (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
+			   (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
+
+			switch (vendor_oui) {
+			case IXGBE_SFF_VENDOR_OUI_TYCO:
+				if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+					hw->phy.type =
+						    ixgbe_phy_sfp_passive_tyco;
+				break;
+			case IXGBE_SFF_VENDOR_OUI_FTL:
+				if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+					hw->phy.type = ixgbe_phy_sfp_ftl_active;
+				else
+					hw->phy.type = ixgbe_phy_sfp_ftl;
+				break;
+			case IXGBE_SFF_VENDOR_OUI_AVAGO:
+				hw->phy.type = ixgbe_phy_sfp_avago;
+				break;
+			case IXGBE_SFF_VENDOR_OUI_INTEL:
+				hw->phy.type = ixgbe_phy_sfp_intel;
+				break;
+			default:
+				if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+					hw->phy.type =
+						 ixgbe_phy_sfp_passive_unknown;
+				else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+					hw->phy.type =
+						ixgbe_phy_sfp_active_unknown;
+				else
+					hw->phy.type = ixgbe_phy_sfp_unknown;
+				break;
+			}
+		}
+
+		/* Allow any DA cable vendor */
+		if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
+		    IXGBE_SFF_DA_ACTIVE_CABLE)) {
+			status = IXGBE_SUCCESS;
+			goto out;
+		}
+
+		/* Verify supported 1G SFP modules */
+		if (comp_codes_10g == 0 &&
+		    !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+		      hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+		      hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+		      hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
+		      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+		      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+			hw->phy.type = ixgbe_phy_sfp_unsupported;
+			status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+			goto out;
+		}
+
+		/* Anything else 82598-based is supported */
+		if (hw->mac.type == ixgbe_mac_82598EB) {
+			status = IXGBE_SUCCESS;
+			goto out;
+		}
+
+		ixgbe_get_device_caps(hw, &enforce_sfp);
+		if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
+		    !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+		      hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+		      hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+		      hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
+		      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+		      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+			/* Make sure we're a supported PHY type */
+			if (hw->phy.type == ixgbe_phy_sfp_intel) {
+				status = IXGBE_SUCCESS;
+			} else {
+				if (hw->allow_unsupported_sfp == true) {
+					EWARN(hw, "WARNING: Intel (R) Network "
+					      "Connections are quality tested "
+					      "using Intel (R) Ethernet Optics."
+					      " Using untested modules is not "
+					      "supported and may cause unstable"
+					      " operation or damage to the "
+					      "module or the adapter. Intel "
+					      "Corporation is not responsible "
+					      "for any harm caused by using "
+					      "untested modules.\n", status);
+					status = IXGBE_SUCCESS;
+				} else {
+					DEBUGOUT("SFP+ module not supported\n");
+					hw->phy.type =
+						ixgbe_phy_sfp_unsupported;
+					status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+				}
+			}
+		} else {
+			status = IXGBE_SUCCESS;
+		}
+	}
+
+out:
+	return status;
+
+err_read_i2c_eeprom:
+	hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+	if (hw->phy.type != ixgbe_phy_nl) {
+		hw->phy.id = 0;
+		hw->phy.type = ixgbe_phy_unknown;
+	}
+	return IXGBE_ERR_SFP_NOT_PRESENT;
+}
+
+/**
+ *  ixgbe_get_supported_phy_sfp_layer_generic - Returns physical layer type
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines physical layer capabilities of the current SFP.
+ */
+s32 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw)
+{
+	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+	u8 comp_codes_10g = 0;
+	u8 comp_codes_1g = 0;
+
+	DEBUGFUNC("ixgbe_get_supported_phy_sfp_layer_generic");
+
+	hw->phy.ops.identify_sfp(hw);
+	if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+		return physical_layer;
+
+	switch (hw->phy.type) {
+	case ixgbe_phy_sfp_passive_tyco:
+	case ixgbe_phy_sfp_passive_unknown:
+	case ixgbe_phy_qsfp_passive_unknown:
+		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+		break;
+	case ixgbe_phy_sfp_ftl_active:
+	case ixgbe_phy_sfp_active_unknown:
+	case ixgbe_phy_qsfp_active_unknown:
+		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
+		break;
+	case ixgbe_phy_sfp_avago:
+	case ixgbe_phy_sfp_ftl:
+	case ixgbe_phy_sfp_intel:
+	case ixgbe_phy_sfp_unknown:
+		hw->phy.ops.read_i2c_eeprom(hw,
+		      IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
+		hw->phy.ops.read_i2c_eeprom(hw,
+		      IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
+		if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+		else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+		else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
+			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
+		else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
+			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
+		break;
+	case ixgbe_phy_qsfp_intel:
+	case ixgbe_phy_qsfp_unknown:
+		hw->phy.ops.read_i2c_eeprom(hw,
+		      IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g);
+		if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+		else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+		break;
+	default:
+		break;
+	}
+
+	return physical_layer;
+}
+
+/**
+ *  ixgbe_identify_qsfp_module_generic - Identifies QSFP modules
+ *  @hw: pointer to hardware structure
+ *
+ *  Searches for and identifies the QSFP module and assigns appropriate PHY type
+ **/
+s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+	u32 vendor_oui = 0;
+	enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
+	u8 identifier = 0;
+	u8 comp_codes_1g = 0;
+	u8 comp_codes_10g = 0;
+	u8 oui_bytes[3] = {0, 0, 0};
+	u16 enforce_sfp = 0;
+	u8 connector = 0;
+	u8 cable_length = 0;
+	u8 device_tech = 0;
+	bool active_cable = false;
+
+	DEBUGFUNC("ixgbe_identify_qsfp_module_generic");
+
+	if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
+		hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+		status = IXGBE_ERR_SFP_NOT_PRESENT;
+		goto out;
+	}
+
+	status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
+					     &identifier);
+
+	if (status != IXGBE_SUCCESS)
+		goto err_read_i2c_eeprom;
+
+	if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
+		hw->phy.type = ixgbe_phy_sfp_unsupported;
+		status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+		goto out;
+	}
+
+	hw->phy.id = identifier;
+
+	/* LAN ID is needed for sfp_type determination */
+	hw->mac.ops.set_lan_id(hw);
+
+	status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
+					     &comp_codes_10g);
+
+	if (status != IXGBE_SUCCESS)
+		goto err_read_i2c_eeprom;
+
+	status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP,
+					     &comp_codes_1g);
+
+	if (status != IXGBE_SUCCESS)
+		goto err_read_i2c_eeprom;
+
+	if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) {
+		hw->phy.type = ixgbe_phy_qsfp_passive_unknown;
+		if (hw->bus.lan_id == 0)
+			hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0;
+		else
+			hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1;
+	} else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
+				     IXGBE_SFF_10GBASELR_CAPABLE)) {
+		if (hw->bus.lan_id == 0)
+			hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0;
+		else
+			hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1;
+	} else {
+		if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE)
+			active_cable = true;
+
+		if (!active_cable) {
+			/* check for active DA cables that pre-date
+			 * SFF-8436 v3.6 */
+			hw->phy.ops.read_i2c_eeprom(hw,
+					IXGBE_SFF_QSFP_CONNECTOR,
+					&connector);
+
+			hw->phy.ops.read_i2c_eeprom(hw,
+					IXGBE_SFF_QSFP_CABLE_LENGTH,
+					&cable_length);
+
+			hw->phy.ops.read_i2c_eeprom(hw,
+					IXGBE_SFF_QSFP_DEVICE_TECH,
+					&device_tech);
+
+			if ((connector ==
+				     IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) &&
+			    (cable_length > 0) &&
+			    ((device_tech >> 4) ==
+				     IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL))
+				active_cable = true;
+		}
+
+		if (active_cable) {
+			hw->phy.type = ixgbe_phy_qsfp_active_unknown;
+			if (hw->bus.lan_id == 0)
+				hw->phy.sfp_type =
+						ixgbe_sfp_type_da_act_lmt_core0;
+			else
+				hw->phy.sfp_type =
+						ixgbe_sfp_type_da_act_lmt_core1;
+		} else {
+			/* unsupported module type */
+			hw->phy.type = ixgbe_phy_sfp_unsupported;
+			status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+			goto out;
+		}
+	}
+
+	if (hw->phy.sfp_type != stored_sfp_type)
+		hw->phy.sfp_setup_needed = true;
+
+	/* Determine if the QSFP+ PHY is dual speed or not. */
+	hw->phy.multispeed_fiber = false;
+	if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
+	   (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
+	   ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
+	   (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
+		hw->phy.multispeed_fiber = true;
+
+	/* Determine PHY vendor for optical modules */
+	if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
+			      IXGBE_SFF_10GBASELR_CAPABLE))  {
+		status = hw->phy.ops.read_i2c_eeprom(hw,
+					    IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0,
+					    &oui_bytes[0]);
+
+		if (status != IXGBE_SUCCESS)
+			goto err_read_i2c_eeprom;
+
+		status = hw->phy.ops.read_i2c_eeprom(hw,
+					    IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1,
+					    &oui_bytes[1]);
+
+		if (status != IXGBE_SUCCESS)
+			goto err_read_i2c_eeprom;
+
+		status = hw->phy.ops.read_i2c_eeprom(hw,
+					    IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2,
+					    &oui_bytes[2]);
+
+		if (status != IXGBE_SUCCESS)
+			goto err_read_i2c_eeprom;
+
+		vendor_oui =
+		  ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
+		   (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
+		   (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
+
+		if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL)
+			hw->phy.type = ixgbe_phy_qsfp_intel;
+		else
+			hw->phy.type = ixgbe_phy_qsfp_unknown;
+
+		ixgbe_get_device_caps(hw, &enforce_sfp);
+		if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
+			/* Make sure we're a supported PHY type */
+			if (hw->phy.type == ixgbe_phy_qsfp_intel) {
+				status = IXGBE_SUCCESS;
+			} else {
+				if (hw->allow_unsupported_sfp == true) {
+					EWARN(hw, "WARNING: Intel (R) Network "
+					      "Connections are quality tested "
+					      "using Intel (R) Ethernet Optics."
+					      " Using untested modules is not "
+					      "supported and may cause unstable"
+					      " operation or damage to the "
+					      "module or the adapter. Intel "
+					      "Corporation is not responsible "
+					      "for any harm caused by using "
+					      "untested modules.\n", status);
+					status = IXGBE_SUCCESS;
+				} else {
+					DEBUGOUT("QSFP module not supported\n");
+					hw->phy.type =
+						ixgbe_phy_sfp_unsupported;
+					status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+				}
+			}
+		} else {
+			status = IXGBE_SUCCESS;
+		}
+	}
+
+out:
+	return status;
+
+err_read_i2c_eeprom:
+	hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+	hw->phy.id = 0;
+	hw->phy.type = ixgbe_phy_unknown;
+
+	return IXGBE_ERR_SFP_NOT_PRESENT;
+}
+
+
+/**
+ *  ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
+ *  @hw: pointer to hardware structure
+ *  @list_offset: offset to the SFP ID list
+ *  @data_offset: offset to the SFP data block
+ *
+ *  Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
+ *  so it returns the offsets to the phy init sequence block.
+ **/
+s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+					u16 *list_offset,
+					u16 *data_offset)
+{
+	u16 sfp_id;
+	u16 sfp_type = hw->phy.sfp_type;
+
+	DEBUGFUNC("ixgbe_get_sfp_init_sequence_offsets");
+
+	if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
+		return IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+	if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+		return IXGBE_ERR_SFP_NOT_PRESENT;
+
+	if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
+	    (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
+		return IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+	/*
+	 * Limiting active cables and 1G Phys must be initialized as
+	 * SR modules
+	 */
+	if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
+	    sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+	    sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+	    sfp_type == ixgbe_sfp_type_1g_sx_core0)
+		sfp_type = ixgbe_sfp_type_srlr_core0;
+	else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
+		 sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
+		 sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+		 sfp_type == ixgbe_sfp_type_1g_sx_core1)
+		sfp_type = ixgbe_sfp_type_srlr_core1;
+
+	/* Read offset to PHY init contents */
+	if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
+		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+			      "eeprom read at offset %d failed",
+			      IXGBE_PHY_INIT_OFFSET_NL);
+		return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+	}
+
+	if ((!*list_offset) || (*list_offset == 0xFFFF))
+		return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+
+	/* Shift offset to first ID word */
+	(*list_offset)++;
+
+	/*
+	 * Find the matching SFP ID in the EEPROM
+	 * and program the init sequence
+	 */
+	if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
+		goto err_phy;
+
+	while (sfp_id != IXGBE_PHY_INIT_END_NL) {
+		if (sfp_id == sfp_type) {
+			(*list_offset)++;
+			if (hw->eeprom.ops.read(hw, *list_offset, data_offset))
+				goto err_phy;
+			if ((!*data_offset) || (*data_offset == 0xFFFF)) {
+				DEBUGOUT("SFP+ module not supported\n");
+				return IXGBE_ERR_SFP_NOT_SUPPORTED;
+			} else {
+				break;
+			}
+		} else {
+			(*list_offset) += 2;
+			if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
+				goto err_phy;
+		}
+	}
+
+	if (sfp_id == IXGBE_PHY_INIT_END_NL) {
+		DEBUGOUT("No matching SFP+ module found\n");
+		return IXGBE_ERR_SFP_NOT_SUPPORTED;
+	}
+
+	return IXGBE_SUCCESS;
+
+err_phy:
+	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+		      "eeprom read at offset %d failed", *list_offset);
+	return IXGBE_ERR_PHY;
+}
+
+/**
+ *  ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: EEPROM byte offset to read
+ *  @eeprom_data: value read
+ *
+ *  Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+				  u8 *eeprom_data)
+{
+	DEBUGFUNC("ixgbe_read_i2c_eeprom_generic");
+
+	return hw->phy.ops.read_i2c_byte(hw, byte_offset,
+					 IXGBE_I2C_EEPROM_DEV_ADDR,
+					 eeprom_data);
+}
+
+/**
+ *  ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset at address 0xA2
+ *  @eeprom_data: value read
+ *
+ *  Performs byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+					  u8 *sff8472_data)
+{
+	return hw->phy.ops.read_i2c_byte(hw, byte_offset,
+					 IXGBE_I2C_EEPROM_DEV_ADDR2,
+					 sff8472_data);
+}
+
+/**
+ *  ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: EEPROM byte offset to write
+ *  @eeprom_data: value to write
+ *
+ *  Performs byte write operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+				   u8 eeprom_data)
+{
+	DEBUGFUNC("ixgbe_write_i2c_eeprom_generic");
+
+	return hw->phy.ops.write_i2c_byte(hw, byte_offset,
+					  IXGBE_I2C_EEPROM_DEV_ADDR,
+					  eeprom_data);
+}
+
+/**
+ * ixgbe_is_sfp_probe - Returns true if SFP is being detected
+ * @hw: pointer to hardware structure
+ * @offset: eeprom offset to be read
+ * @addr: I2C address to be read
+ */
+STATIC bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
+{
+	if (addr == IXGBE_I2C_EEPROM_DEV_ADDR &&
+	    offset == IXGBE_SFF_IDENTIFIER &&
+	    hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+		return true;
+	return false;
+}
+
+/**
+ *  ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to read
+ *  @data: value read
+ *
+ *  Performs byte read operation to SFP module's EEPROM over I2C interface at
+ *  a specified device address.
+ **/
+s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+				u8 dev_addr, u8 *data)
+{
+	s32 status;
+	u32 max_retry = 10;
+	u32 retry = 0;
+	u32 swfw_mask = hw->phy.phy_semaphore_mask;
+	bool nack = 1;
+	*data = 0;
+
+	DEBUGFUNC("ixgbe_read_i2c_byte_generic");
+
+	if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr))
+		max_retry = IXGBE_SFP_DETECT_RETRIES;
+
+	do {
+		if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+			return IXGBE_ERR_SWFW_SYNC;
+
+		ixgbe_i2c_start(hw);
+
+		/* Device Address and write indication */
+		status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
+		if (status != IXGBE_SUCCESS)
+			goto fail;
+
+		status = ixgbe_get_i2c_ack(hw);
+		if (status != IXGBE_SUCCESS)
+			goto fail;
+
+		status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
+		if (status != IXGBE_SUCCESS)
+			goto fail;
+
+		status = ixgbe_get_i2c_ack(hw);
+		if (status != IXGBE_SUCCESS)
+			goto fail;
+
+		ixgbe_i2c_start(hw);
+
+		/* Device Address and read indication */
+		status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1));
+		if (status != IXGBE_SUCCESS)
+			goto fail;
+
+		status = ixgbe_get_i2c_ack(hw);
+		if (status != IXGBE_SUCCESS)
+			goto fail;
+
+		status = ixgbe_clock_in_i2c_byte(hw, data);
+		if (status != IXGBE_SUCCESS)
+			goto fail;
+
+		status = ixgbe_clock_out_i2c_bit(hw, nack);
+		if (status != IXGBE_SUCCESS)
+			goto fail;
+
+		ixgbe_i2c_stop(hw);
+		hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+		return IXGBE_SUCCESS;
+
+fail:
+		ixgbe_i2c_bus_clear(hw);
+		hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+		msec_delay(100);
+		retry++;
+		if (retry < max_retry)
+			DEBUGOUT("I2C byte read error - Retrying.\n");
+		else
+			DEBUGOUT("I2C byte read error.\n");
+
+	} while (retry < max_retry);
+
+	return status;
+}
+
+/**
+ *  ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to write
+ *  @data: value to write
+ *
+ *  Performs byte write operation to SFP module's EEPROM over I2C interface at
+ *  a specified device address.
+ **/
+s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+				 u8 dev_addr, u8 data)
+{
+	s32 status = IXGBE_SUCCESS;
+	u32 max_retry = 1;
+	u32 retry = 0;
+	u32 swfw_mask = hw->phy.phy_semaphore_mask;
+
+	DEBUGFUNC("ixgbe_write_i2c_byte_generic");
+
+	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != IXGBE_SUCCESS) {
+		status = IXGBE_ERR_SWFW_SYNC;
+		goto write_byte_out;
+	}
+
+	do {
+		ixgbe_i2c_start(hw);
+
+		status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
+		if (status != IXGBE_SUCCESS)
+			goto fail;
+
+		status = ixgbe_get_i2c_ack(hw);
+		if (status != IXGBE_SUCCESS)
+			goto fail;
+
+		status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
+		if (status != IXGBE_SUCCESS)
+			goto fail;
+
+		status = ixgbe_get_i2c_ack(hw);
+		if (status != IXGBE_SUCCESS)
+			goto fail;
+
+		status = ixgbe_clock_out_i2c_byte(hw, data);
+		if (status != IXGBE_SUCCESS)
+			goto fail;
+
+		status = ixgbe_get_i2c_ack(hw);
+		if (status != IXGBE_SUCCESS)
+			goto fail;
+
+		ixgbe_i2c_stop(hw);
+		hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+		return IXGBE_SUCCESS;
+
+fail:
+		ixgbe_i2c_bus_clear(hw);
+		retry++;
+		if (retry < max_retry)
+			DEBUGOUT("I2C byte write error - Retrying.\n");
+		else
+			DEBUGOUT("I2C byte write error.\n");
+	} while (retry < max_retry);
+
+	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+write_byte_out:
+	return status;
+}
+
+/**
+ *  ixgbe_i2c_start - Sets I2C start condition
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets I2C start condition (High -> Low on SDA while SCL is High)
+ *  Set bit-bang mode on X550 hardware.
+ **/
+STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw)
+{
+	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+
+	DEBUGFUNC("ixgbe_i2c_start");
+
+	i2cctl |= IXGBE_I2C_BB_EN_BY_MAC(hw);
+
+	/* Start condition must begin with data and clock high */
+	ixgbe_set_i2c_data(hw, &i2cctl, 1);
+	ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+	/* Setup time for start condition (4.7us) */
+	usec_delay(IXGBE_I2C_T_SU_STA);
+
+	ixgbe_set_i2c_data(hw, &i2cctl, 0);
+
+	/* Hold time for start condition (4us) */
+	usec_delay(IXGBE_I2C_T_HD_STA);
+
+	ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+	/* Minimum low period of clock is 4.7 us */
+	usec_delay(IXGBE_I2C_T_LOW);
+
+}
+
+/**
+ *  ixgbe_i2c_stop - Sets I2C stop condition
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets I2C stop condition (Low -> High on SDA while SCL is High)
+ *  Disables bit-bang mode and negates data output enable on X550
+ *  hardware.
+ **/
+STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw)
+{
+	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+	u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
+	u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw);
+	u32 bb_en_bit = IXGBE_I2C_BB_EN_BY_MAC(hw);
+
+	DEBUGFUNC("ixgbe_i2c_stop");
+
+	/* Stop condition must begin with data low and clock high */
+	ixgbe_set_i2c_data(hw, &i2cctl, 0);
+	ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+	/* Setup time for stop condition (4us) */
+	usec_delay(IXGBE_I2C_T_SU_STO);
+
+	ixgbe_set_i2c_data(hw, &i2cctl, 1);
+
+	/* bus free time between stop and start (4.7us)*/
+	usec_delay(IXGBE_I2C_T_BUF);
+
+	if (bb_en_bit || data_oe_bit || clk_oe_bit) {
+		i2cctl &= ~bb_en_bit;
+		i2cctl |= data_oe_bit | clk_oe_bit;
+		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
+		IXGBE_WRITE_FLUSH(hw);
+	}
+}
+
+/**
+ *  ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C
+ *  @hw: pointer to hardware structure
+ *  @data: data byte to clock in
+ *
+ *  Clocks in one byte data via I2C data/clock
+ **/
+STATIC s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
+{
+	s32 i;
+	bool bit = 0;
+
+	DEBUGFUNC("ixgbe_clock_in_i2c_byte");
+
+	*data = 0;
+	for (i = 7; i >= 0; i--) {
+		ixgbe_clock_in_i2c_bit(hw, &bit);
+		*data |= bit << i;
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C
+ *  @hw: pointer to hardware structure
+ *  @data: data byte clocked out
+ *
+ *  Clocks out one byte data via I2C data/clock
+ **/
+STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
+{
+	s32 status = IXGBE_SUCCESS;
+	s32 i;
+	u32 i2cctl;
+	bool bit;
+
+	DEBUGFUNC("ixgbe_clock_out_i2c_byte");
+
+	for (i = 7; i >= 0; i--) {
+		bit = (data >> i) & 0x1;
+		status = ixgbe_clock_out_i2c_bit(hw, bit);
+
+		if (status != IXGBE_SUCCESS)
+			break;
+	}
+
+	/* Release SDA line (set high) */
+	i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+	i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+	i2cctl |= IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
+	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return status;
+}
+
+/**
+ *  ixgbe_get_i2c_ack - Polls for I2C ACK
+ *  @hw: pointer to hardware structure
+ *
+ *  Clocks in/out one bit via I2C data/clock
+ **/
+STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
+{
+	u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
+	s32 status = IXGBE_SUCCESS;
+	u32 i = 0;
+	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+	u32 timeout = 10;
+	bool ack = 1;
+
+	DEBUGFUNC("ixgbe_get_i2c_ack");
+
+	if (data_oe_bit) {
+		i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+		i2cctl |= data_oe_bit;
+		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
+		IXGBE_WRITE_FLUSH(hw);
+	}
+	ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+	/* Minimum high period of clock is 4us */
+	usec_delay(IXGBE_I2C_T_HIGH);
+
+	/* Poll for ACK.  Note that ACK in I2C spec is
+	 * transition from 1 to 0 */
+	for (i = 0; i < timeout; i++) {
+		i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+		ack = ixgbe_get_i2c_data(hw, &i2cctl);
+
+		usec_delay(1);
+		if (!ack)
+			break;
+	}
+
+	if (ack) {
+		DEBUGOUT("I2C ack was not received.\n");
+		status = IXGBE_ERR_I2C;
+	}
+
+	ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+	/* Minimum low period of clock is 4.7 us */
+	usec_delay(IXGBE_I2C_T_LOW);
+
+	return status;
+}
+
+/**
+ *  ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock
+ *  @hw: pointer to hardware structure
+ *  @data: read data value
+ *
+ *  Clocks in one bit via I2C data/clock
+ **/
+STATIC s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
+{
+	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+	u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
+
+	DEBUGFUNC("ixgbe_clock_in_i2c_bit");
+
+	if (data_oe_bit) {
+		i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+		i2cctl |= data_oe_bit;
+		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
+		IXGBE_WRITE_FLUSH(hw);
+	}
+	ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+	/* Minimum high period of clock is 4us */
+	usec_delay(IXGBE_I2C_T_HIGH);
+
+	i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+	*data = ixgbe_get_i2c_data(hw, &i2cctl);
+
+	ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+	/* Minimum low period of clock is 4.7 us */
+	usec_delay(IXGBE_I2C_T_LOW);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock
+ *  @hw: pointer to hardware structure
+ *  @data: data value to write
+ *
+ *  Clocks out one bit via I2C data/clock
+ **/
+STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
+{
+	s32 status;
+	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+
+	DEBUGFUNC("ixgbe_clock_out_i2c_bit");
+
+	status = ixgbe_set_i2c_data(hw, &i2cctl, data);
+	if (status == IXGBE_SUCCESS) {
+		ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+		/* Minimum high period of clock is 4us */
+		usec_delay(IXGBE_I2C_T_HIGH);
+
+		ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+		/* Minimum low period of clock is 4.7 us.
+		 * This also takes care of the data hold time.
+		 */
+		usec_delay(IXGBE_I2C_T_LOW);
+	} else {
+		status = IXGBE_ERR_I2C;
+		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+			     "I2C data was not set to %X\n", data);
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_raise_i2c_clk - Raises the I2C SCL clock
+ *  @hw: pointer to hardware structure
+ *  @i2cctl: Current value of I2CCTL register
+ *
+ *  Raises the I2C clock line '0'->'1'
+ *  Negates the I2C clock output enable on X550 hardware.
+ **/
+STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
+{
+	u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw);
+	u32 i = 0;
+	u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
+	u32 i2cctl_r = 0;
+
+	DEBUGFUNC("ixgbe_raise_i2c_clk");
+
+	if (clk_oe_bit) {
+		*i2cctl |= clk_oe_bit;
+		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+	}
+
+	for (i = 0; i < timeout; i++) {
+		*i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw);
+
+		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+		IXGBE_WRITE_FLUSH(hw);
+		/* SCL rise time (1000ns) */
+		usec_delay(IXGBE_I2C_T_RISE);
+
+		i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+		if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw))
+			break;
+	}
+}
+
+/**
+ *  ixgbe_lower_i2c_clk - Lowers the I2C SCL clock
+ *  @hw: pointer to hardware structure
+ *  @i2cctl: Current value of I2CCTL register
+ *
+ *  Lowers the I2C clock line '1'->'0'
+ *  Asserts the I2C clock output enable on X550 hardware.
+ **/
+STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
+{
+	DEBUGFUNC("ixgbe_lower_i2c_clk");
+
+	*i2cctl &= ~(IXGBE_I2C_CLK_OUT_BY_MAC(hw));
+	*i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw);
+
+	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* SCL fall time (300ns) */
+	usec_delay(IXGBE_I2C_T_FALL);
+}
+
+/**
+ *  ixgbe_set_i2c_data - Sets the I2C data bit
+ *  @hw: pointer to hardware structure
+ *  @i2cctl: Current value of I2CCTL register
+ *  @data: I2C data value (0 or 1) to set
+ *
+ *  Sets the I2C data bit
+ *  Asserts the I2C data output enable on X550 hardware.
+ **/
+STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
+{
+	u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
+	s32 status = IXGBE_SUCCESS;
+
+	DEBUGFUNC("ixgbe_set_i2c_data");
+
+	if (data)
+		*i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+	else
+		*i2cctl &= ~(IXGBE_I2C_DATA_OUT_BY_MAC(hw));
+	*i2cctl &= ~data_oe_bit;
+
+	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
+	usec_delay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
+
+	if (!data)	/* Can't verify data in this case */
+		return IXGBE_SUCCESS;
+	if (data_oe_bit) {
+		*i2cctl |= data_oe_bit;
+		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+		IXGBE_WRITE_FLUSH(hw);
+	}
+
+	/* Verify data was set correctly */
+	*i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+	if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
+		status = IXGBE_ERR_I2C;
+		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+			     "Error - I2C data was not set to %X.\n",
+			     data);
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_get_i2c_data - Reads the I2C SDA data bit
+ *  @hw: pointer to hardware structure
+ *  @i2cctl: Current value of I2CCTL register
+ *
+ *  Returns the I2C data bit value
+ *  Negates the I2C data output enable on X550 hardware.
+ **/
+STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
+{
+	u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
+	bool data;
+
+	DEBUGFUNC("ixgbe_get_i2c_data");
+
+	if (data_oe_bit) {
+		*i2cctl |= data_oe_bit;
+		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+		IXGBE_WRITE_FLUSH(hw);
+		usec_delay(IXGBE_I2C_T_FALL);
+	}
+
+	if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw))
+		data = 1;
+	else
+		data = 0;
+
+	return data;
+}
+
+/**
+ *  ixgbe_i2c_bus_clear - Clears the I2C bus
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears the I2C bus by sending nine clock pulses.
+ *  Used when data line is stuck low.
+ **/
+void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
+{
+	u32 i2cctl;
+	u32 i;
+
+	DEBUGFUNC("ixgbe_i2c_bus_clear");
+
+	ixgbe_i2c_start(hw);
+	i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+
+	ixgbe_set_i2c_data(hw, &i2cctl, 1);
+
+	for (i = 0; i < 9; i++) {
+		ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+		/* Min high period of clock is 4us */
+		usec_delay(IXGBE_I2C_T_HIGH);
+
+		ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+		/* Min low period of clock is 4.7us*/
+		usec_delay(IXGBE_I2C_T_LOW);
+	}
+
+	ixgbe_i2c_start(hw);
+
+	/* Put the i2c bus back to default state */
+	ixgbe_i2c_stop(hw);
+}
+
+/**
+ *  ixgbe_tn_check_overtemp - Checks if an overtemp occurred.
+ *  @hw: pointer to hardware structure
+ *
+ *  Checks if the LASI temp alarm status was triggered due to overtemp
+ **/
+s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_SUCCESS;
+	u16 phy_data = 0;
+
+	DEBUGFUNC("ixgbe_tn_check_overtemp");
+
+	if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
+		goto out;
+
+	/* Check that the LASI temp alarm status was triggered */
+	hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
+			     IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data);
+
+	if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
+		goto out;
+
+	status = IXGBE_ERR_OVERTEMP;
+	ERROR_REPORT1(IXGBE_ERROR_CAUTION, "Device over temperature");
+out:
+	return status;
+}
+
+/**
+ * ixgbe_set_copper_phy_power - Control power for copper phy
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ */
+s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
+{
+	u32 status;
+	u16 reg;
+
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
+				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+				      &reg);
+	if (status)
+		return status;
+
+	if (on) {
+		reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
+	} else {
+		if (ixgbe_check_reset_blocked(hw))
+			return 0;
+		reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
+	}
+
+	status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
+				       IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+				       reg);
+	return status;
+}
diff --git a/drivers/net/ixgbe/base/ixgbe_phy.h b/drivers/net/ixgbe/base/ixgbe_phy.h
new file mode 100644
index 0000000..21d88f7
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_phy.h
@@ -0,0 +1,181 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_PHY_H_
+#define _IXGBE_PHY_H_
+
+#include "ixgbe_type.h"
+#define IXGBE_I2C_EEPROM_DEV_ADDR	0xA0
+#define IXGBE_I2C_EEPROM_DEV_ADDR2	0xA2
+#define IXGBE_I2C_EEPROM_BANK_LEN	0xFF
+
+/* EEPROM byte offsets */
+#define IXGBE_SFF_IDENTIFIER		0x0
+#define IXGBE_SFF_IDENTIFIER_SFP	0x3
+#define IXGBE_SFF_VENDOR_OUI_BYTE0	0x25
+#define IXGBE_SFF_VENDOR_OUI_BYTE1	0x26
+#define IXGBE_SFF_VENDOR_OUI_BYTE2	0x27
+#define IXGBE_SFF_1GBE_COMP_CODES	0x6
+#define IXGBE_SFF_10GBE_COMP_CODES	0x3
+#define IXGBE_SFF_CABLE_TECHNOLOGY	0x8
+#define IXGBE_SFF_CABLE_SPEC_COMP	0x3C
+#define IXGBE_SFF_SFF_8472_SWAP		0x5C
+#define IXGBE_SFF_SFF_8472_COMP		0x5E
+#define IXGBE_SFF_IDENTIFIER_QSFP_PLUS	0xD
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0	0xA5
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1	0xA6
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2	0xA7
+#define IXGBE_SFF_QSFP_CONNECTOR	0x82
+#define IXGBE_SFF_QSFP_10GBE_COMP	0x83
+#define IXGBE_SFF_QSFP_1GBE_COMP	0x86
+#define IXGBE_SFF_QSFP_CABLE_LENGTH	0x92
+#define IXGBE_SFF_QSFP_DEVICE_TECH	0x93
+
+/* Bitmasks */
+#define IXGBE_SFF_DA_PASSIVE_CABLE	0x4
+#define IXGBE_SFF_DA_ACTIVE_CABLE	0x8
+#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING	0x4
+#define IXGBE_SFF_1GBASESX_CAPABLE	0x1
+#define IXGBE_SFF_1GBASELX_CAPABLE	0x2
+#define IXGBE_SFF_1GBASET_CAPABLE	0x8
+#define IXGBE_SFF_10GBASESR_CAPABLE	0x10
+#define IXGBE_SFF_10GBASELR_CAPABLE	0x20
+#define IXGBE_SFF_ADDRESSING_MODE	0x4
+#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE	0x1
+#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE	0x8
+#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE	0x23
+#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL	0x0
+#define IXGBE_I2C_EEPROM_READ_MASK	0x100
+#define IXGBE_I2C_EEPROM_STATUS_MASK	0x3
+#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION	0x0
+#define IXGBE_I2C_EEPROM_STATUS_PASS	0x1
+#define IXGBE_I2C_EEPROM_STATUS_FAIL	0x2
+#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS	0x3
+
+#define IXGBE_CS4227			0xBE	/* CS4227 address */
+#define IXGBE_CS4227_SPARE24_LSB	0x12B0	/* Reg to program EDC */
+#define IXGBE_CS4227_EDC_MODE_CX1	0x0002
+#define IXGBE_CS4227_EDC_MODE_SR	0x0004
+
+/* Flow control defines */
+#define IXGBE_TAF_SYM_PAUSE		0x400
+#define IXGBE_TAF_ASM_PAUSE		0x800
+
+/* Bit-shift macros */
+#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT	24
+#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT	16
+#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT	8
+
+/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
+#define IXGBE_SFF_VENDOR_OUI_TYCO	0x00407600
+#define IXGBE_SFF_VENDOR_OUI_FTL	0x00906500
+#define IXGBE_SFF_VENDOR_OUI_AVAGO	0x00176A00
+#define IXGBE_SFF_VENDOR_OUI_INTEL	0x001B2100
+
+/* I2C SDA and SCL timing parameters for standard mode */
+#define IXGBE_I2C_T_HD_STA	4
+#define IXGBE_I2C_T_LOW		5
+#define IXGBE_I2C_T_HIGH	4
+#define IXGBE_I2C_T_SU_STA	5
+#define IXGBE_I2C_T_HD_DATA	5
+#define IXGBE_I2C_T_SU_DATA	1
+#define IXGBE_I2C_T_RISE	1
+#define IXGBE_I2C_T_FALL	1
+#define IXGBE_I2C_T_SU_STO	4
+#define IXGBE_I2C_T_BUF		5
+
+#ifndef IXGBE_SFP_DETECT_RETRIES
+#define IXGBE_SFP_DETECT_RETRIES	10
+
+#endif /* IXGBE_SFP_DETECT_RETRIES */
+#define IXGBE_TN_LASI_STATUS_REG	0x9005
+#define IXGBE_TN_LASI_STATUS_TEMP_ALARM	0x0008
+
+/* SFP+ SFF-8472 Compliance */
+#define IXGBE_SFF_SFF_8472_UNSUP	0x00
+
+#ident "$Id: ixgbe_phy.h,v 1.56 2013/09/05 23:59:49 jtkirshe Exp $"
+
+s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
+bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
+enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
+s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
+s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
+s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+			   u16 *phy_data);
+s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+			    u16 phy_data);
+s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+			       u32 device_type, u16 *phy_data);
+s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+				u32 device_type, u16 phy_data);
+s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
+s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+				       ixgbe_link_speed speed,
+				       bool autoneg_wait_to_complete);
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+					       ixgbe_link_speed *speed,
+					       bool *autoneg);
+s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
+
+/* PHY specific */
+s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
+			     ixgbe_link_speed *speed,
+			     bool *link_up);
+s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
+s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
+				       u16 *firmware_version);
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+					   u16 *firmware_version);
+
+s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
+s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
+s32 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw);
+s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
+s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+					u16 *list_offset,
+					u16 *data_offset);
+s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
+s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+				u8 dev_addr, u8 *data);
+s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+				 u8 dev_addr, u8 data);
+s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+				  u8 *eeprom_data);
+s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+				   u8 eeprom_data);
+void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
+#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ixgbe/base/ixgbe_type.h b/drivers/net/ixgbe/base/ixgbe_type.h
new file mode 100644
index 0000000..e4432e2
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_type.h
@@ -0,0 +1,3858 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_TYPE_H_
+#define _IXGBE_TYPE_H_
+
+/*
+ * The following is a brief description of the error categories used by the
+ * ERROR_REPORT* macros.
+ *
+ * - IXGBE_ERROR_INVALID_STATE
+ * This category is for errors which represent a serious failure state that is
+ * unexpected, and could be potentially harmful to device operation. It should
+ * not be used for errors relating to issues that can be worked around or
+ * ignored.
+ *
+ * - IXGBE_ERROR_POLLING
+ * This category is for errors related to polling/timeout issues and should be
+ * used in any case where the timeout occured, or a failure to obtain a lock, or
+ * failure to receive data within the time limit.
+ *
+ * - IXGBE_ERROR_CAUTION
+ * This category should be used for reporting issues that may be the cause of
+ * other errors, such as temperature warnings. It should indicate an event which
+ * could be serious, but hasn't necessarily caused problems yet.
+ *
+ * - IXGBE_ERROR_SOFTWARE
+ * This category is intended for errors due to software state preventing
+ * something. The category is not intended for errors due to bad arguments, or
+ * due to unsupported features. It should be used when a state occurs which
+ * prevents action but is not a serious issue.
+ *
+ * - IXGBE_ERROR_ARGUMENT
+ * This category is for when a bad or invalid argument is passed. It should be
+ * used whenever a function is called and error checking has detected the
+ * argument is wrong or incorrect.
+ *
+ * - IXGBE_ERROR_UNSUPPORTED
+ * This category is for errors which are due to unsupported circumstances or
+ * configuration issues. It should not be used when the issue is due to an
+ * invalid argument, but for when something has occurred that is unsupported
+ * (Ex: Flow control autonegotiation or an unsupported SFP+ module.)
+ */
+
+#include "ixgbe_osdep.h"
+
+/* Override this by setting IOMEM in your ixgbe_osdep.h header */
+
+/* Vendor ID */
+#define IXGBE_INTEL_VENDOR_ID			0x8086
+
+/* Device IDs */
+#define IXGBE_DEV_ID_82598			0x10B6
+#define IXGBE_DEV_ID_82598_BX			0x1508
+#define IXGBE_DEV_ID_82598AF_DUAL_PORT		0x10C6
+#define IXGBE_DEV_ID_82598AF_SINGLE_PORT	0x10C7
+#define IXGBE_DEV_ID_82598AT			0x10C8
+#define IXGBE_DEV_ID_82598AT2			0x150B
+#define IXGBE_DEV_ID_82598EB_SFP_LOM		0x10DB
+#define IXGBE_DEV_ID_82598EB_CX4		0x10DD
+#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT	0x10EC
+#define IXGBE_DEV_ID_82598_DA_DUAL_PORT		0x10F1
+#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM	0x10E1
+#define IXGBE_DEV_ID_82598EB_XF_LR		0x10F4
+#define IXGBE_DEV_ID_82599_KX4			0x10F7
+#define IXGBE_DEV_ID_82599_KX4_MEZZ		0x1514
+#define IXGBE_DEV_ID_82599_KR			0x1517
+#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE	0x10F8
+#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ	0x000C
+#define IXGBE_DEV_ID_82599_CX4			0x10F9
+#define IXGBE_DEV_ID_82599_SFP			0x10FB
+#define IXGBE_SUBDEV_ID_82599_SFP		0x11A9
+#define IXGBE_SUBDEV_ID_82599_SFP_WOL0		0x1071
+#define IXGBE_SUBDEV_ID_82599_RNDC		0x1F72
+#define IXGBE_SUBDEV_ID_82599_560FLR		0x17D0
+#define IXGBE_SUBDEV_ID_82599_ECNA_DP		0x0470
+#define IXGBE_SUBDEV_ID_82599_SP_560FLR		0x211B
+#define IXGBE_SUBDEV_ID_82599_LOM_SFP		0x8976
+#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6		0x2159
+#define IXGBE_SUBDEV_ID_82599_SFP_1OCP		0x000D
+#define IXGBE_SUBDEV_ID_82599_SFP_2OCP		0x0008
+#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE	0x152A
+#define IXGBE_DEV_ID_82599_SFP_FCOE		0x1529
+#define IXGBE_DEV_ID_82599_SFP_EM		0x1507
+#define IXGBE_DEV_ID_82599_SFP_SF2		0x154D
+#define IXGBE_DEV_ID_82599_SFP_SF_QP		0x154A
+#define IXGBE_DEV_ID_82599_QSFP_SF_QP		0x1558
+#define IXGBE_DEV_ID_82599EN_SFP		0x1557
+#define IXGBE_SUBDEV_ID_82599EN_SFP_OCP1	0x0001
+#define IXGBE_DEV_ID_82599_XAUI_LOM		0x10FC
+#define IXGBE_DEV_ID_82599_T3_LOM		0x151C
+#define IXGBE_DEV_ID_82599_VF			0x10ED
+#define IXGBE_DEV_ID_82599_VF_HV		0x152E
+#define IXGBE_DEV_ID_82599_LS			0x154F
+#define IXGBE_DEV_ID_X540T			0x1528
+#define IXGBE_DEV_ID_X540_VF			0x1515
+#define IXGBE_DEV_ID_X540_VF_HV			0x1530
+#define IXGBE_DEV_ID_X540T1			0x1560
+#define IXGBE_DEV_ID_X550T			0x1563
+#define IXGBE_DEV_ID_X550EM_X_KX4		0x15AA
+#define IXGBE_DEV_ID_X550EM_X_KR		0x15AB
+#define IXGBE_DEV_ID_X550EM_X_SFP		0x15AC
+#define IXGBE_DEV_ID_X550EM_X_10G_T		0x15AD
+#define IXGBE_DEV_ID_X550EM_X_1G_T		0x15AE
+#define IXGBE_DEV_ID_X550_VF_HV			0x1564
+#define IXGBE_DEV_ID_X550_VF			0x1565
+#define IXGBE_DEV_ID_X550EM_X_VF		0x15A8
+#define IXGBE_DEV_ID_X550EM_X_VF_HV		0x15A9
+
+/* General Registers */
+#define IXGBE_CTRL		0x00000
+#define IXGBE_STATUS		0x00008
+#define IXGBE_CTRL_EXT		0x00018
+#define IXGBE_ESDP		0x00020
+#define IXGBE_EODSDP		0x00028
+#define IXGBE_I2CCTL_82599	0x00028
+#define IXGBE_I2CCTL_X550	0x15F5C
+#define IXGBE_I2CCTL_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
+				 IXGBE_I2CCTL_X550 : IXGBE_I2CCTL_82599))
+#define IXGBE_PHY_GPIO		0x00028
+#define IXGBE_MAC_GPIO		0x00030
+#define IXGBE_PHYINT_STATUS0	0x00100
+#define IXGBE_PHYINT_STATUS1	0x00104
+#define IXGBE_PHYINT_STATUS2	0x00108
+#define IXGBE_LEDCTL		0x00200
+#define IXGBE_FRTIMER		0x00048
+#define IXGBE_TCPTIMER		0x0004C
+#define IXGBE_CORESPARE		0x00600
+#define IXGBE_EXVET		0x05078
+
+/* NVM Registers */
+#define IXGBE_EEC	0x10010
+#define IXGBE_EERD	0x10014
+#define IXGBE_EEWR	0x10018
+#define IXGBE_FLA	0x1001C
+#define IXGBE_EEMNGCTL	0x10110
+#define IXGBE_EEMNGDATA	0x10114
+#define IXGBE_FLMNGCTL	0x10118
+#define IXGBE_FLMNGDATA	0x1011C
+#define IXGBE_FLMNGCNT	0x10120
+#define IXGBE_FLOP	0x1013C
+#define IXGBE_GRC	0x10200
+#define IXGBE_SRAMREL	0x10210
+#define IXGBE_PHYDBG	0x10218
+
+/* General Receive Control */
+#define IXGBE_GRC_MNG	0x00000001 /* Manageability Enable */
+#define IXGBE_GRC_APME	0x00000002 /* APM enabled in EEPROM */
+
+#define IXGBE_VPDDIAG0	0x10204
+#define IXGBE_VPDDIAG1	0x10208
+
+/* I2CCTL Bit Masks */
+#define IXGBE_I2C_CLK_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
+					0x00004000 : 0x00000001)
+#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
+					0x00000200 : 0x00000002)
+#define IXGBE_I2C_DATA_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
+					0x00001000 : 0x00000004)
+#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
+					0x00000400 : 0x00000008)
+#define IXGBE_I2C_BB_EN_BY_MAC(hw) ((hw)->mac.type >= ixgbe_mac_X550 ? \
+				    0x00000100 : 0)
+#define IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw) ((hw)->mac.type >= ixgbe_mac_X550 ? \
+					   0x00000800 : 0)
+#define IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw) ((hw)->mac.type >= ixgbe_mac_X550 ? \
+					  0x00002000 : 0)
+#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT	500
+
+#define IXGBE_I2C_THERMAL_SENSOR_ADDR	0xF8
+#define IXGBE_EMC_INTERNAL_DATA		0x00
+#define IXGBE_EMC_INTERNAL_THERM_LIMIT	0x20
+#define IXGBE_EMC_DIODE1_DATA		0x01
+#define IXGBE_EMC_DIODE1_THERM_LIMIT	0x19
+#define IXGBE_EMC_DIODE2_DATA		0x23
+#define IXGBE_EMC_DIODE2_THERM_LIMIT	0x1A
+
+#define IXGBE_MAX_SENSORS		3
+
+struct ixgbe_thermal_diode_data {
+	u8 location;
+	u8 temp;
+	u8 caution_thresh;
+	u8 max_op_thresh;
+};
+
+struct ixgbe_thermal_sensor_data {
+	struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS];
+};
+
+/* Interrupt Registers */
+#define IXGBE_EICR		0x00800
+#define IXGBE_EICS		0x00808
+#define IXGBE_EIMS		0x00880
+#define IXGBE_EIMC		0x00888
+#define IXGBE_EIAC		0x00810
+#define IXGBE_EIAM		0x00890
+#define IXGBE_EICS_EX(_i)	(0x00A90 + (_i) * 4)
+#define IXGBE_EIMS_EX(_i)	(0x00AA0 + (_i) * 4)
+#define IXGBE_EIMC_EX(_i)	(0x00AB0 + (_i) * 4)
+#define IXGBE_EIAM_EX(_i)	(0x00AD0 + (_i) * 4)
+/* 82599 EITR is only 12 bits, with the lower 3 always zero */
+/*
+ * 82598 EITR is 16 bits but set the limits based on the max
+ * supported by all ixgbe hardware
+ */
+#define IXGBE_MAX_INT_RATE	488281
+#define IXGBE_MIN_INT_RATE	956
+#define IXGBE_MAX_EITR		0x00000FF8
+#define IXGBE_MIN_EITR		8
+#define IXGBE_EITR(_i)		(((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
+				 (0x012300 + (((_i) - 24) * 4)))
+#define IXGBE_EITR_ITR_INT_MASK	0x00000FF8
+#define IXGBE_EITR_LLI_MOD	0x00008000
+#define IXGBE_EITR_CNT_WDIS	0x80000000
+#define IXGBE_IVAR(_i)		(0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
+#define IXGBE_IVAR_MISC		0x00A00 /* misc MSI-X interrupt causes */
+#define IXGBE_EITRSEL		0x00894
+#define IXGBE_MSIXT		0x00000 /* MSI-X Table. 0x0000 - 0x01C */
+#define IXGBE_MSIXPBA		0x02000 /* MSI-X Pending bit array */
+#define IXGBE_PBACL(_i)	(((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4)))
+#define IXGBE_GPIE		0x00898
+
+/* Flow Control Registers */
+#define IXGBE_FCADBUL		0x03210
+#define IXGBE_FCADBUH		0x03214
+#define IXGBE_FCAMACL		0x04328
+#define IXGBE_FCAMACH		0x0432C
+#define IXGBE_FCRTH_82599(_i)	(0x03260 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_FCRTL_82599(_i)	(0x03220 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_PFCTOP		0x03008
+#define IXGBE_FCTTV(_i)		(0x03200 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_FCRTL(_i)		(0x03220 + ((_i) * 8)) /* 8 of these (0-7) */
+#define IXGBE_FCRTH(_i)		(0x03260 + ((_i) * 8)) /* 8 of these (0-7) */
+#define IXGBE_FCRTV		0x032A0
+#define IXGBE_FCCFG		0x03D00
+#define IXGBE_TFCS		0x0CE00
+
+/* Receive DMA Registers */
+#define IXGBE_RDBAL(_i)	(((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
+			 (0x0D000 + (((_i) - 64) * 0x40)))
+#define IXGBE_RDBAH(_i)	(((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
+			 (0x0D004 + (((_i) - 64) * 0x40)))
+#define IXGBE_RDLEN(_i)	(((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
+			 (0x0D008 + (((_i) - 64) * 0x40)))
+#define IXGBE_RDH(_i)	(((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
+			 (0x0D010 + (((_i) - 64) * 0x40)))
+#define IXGBE_RDT(_i)	(((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
+			 (0x0D018 + (((_i) - 64) * 0x40)))
+#define IXGBE_RXDCTL(_i)	(((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
+				 (0x0D028 + (((_i) - 64) * 0x40)))
+#define IXGBE_RSCCTL(_i)	(((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
+				 (0x0D02C + (((_i) - 64) * 0x40)))
+#define IXGBE_RSCDBU	0x03028
+#define IXGBE_RDDCC	0x02F20
+#define IXGBE_RXMEMWRAP	0x03190
+#define IXGBE_STARCTRL	0x03024
+/*
+ * Split and Replication Receive Control Registers
+ * 00-15 : 0x02100 + n*4
+ * 16-64 : 0x01014 + n*0x40
+ * 64-127: 0x0D014 + (n-64)*0x40
+ */
+#define IXGBE_SRRCTL(_i)	(((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
+				 (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
+				 (0x0D014 + (((_i) - 64) * 0x40))))
+/*
+ * Rx DCA Control Register:
+ * 00-15 : 0x02200 + n*4
+ * 16-64 : 0x0100C + n*0x40
+ * 64-127: 0x0D00C + (n-64)*0x40
+ */
+#define IXGBE_DCA_RXCTRL(_i)	(((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
+				 (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
+				 (0x0D00C + (((_i) - 64) * 0x40))))
+#define IXGBE_RDRXCTL		0x02F00
+/* 8 of these 0x03C00 - 0x03C1C */
+#define IXGBE_RXPBSIZE(_i)	(0x03C00 + ((_i) * 4))
+#define IXGBE_RXCTRL		0x03000
+#define IXGBE_DROPEN		0x03D04
+#define IXGBE_RXPBSIZE_SHIFT	10
+#define IXGBE_RXPBSIZE_MASK	0x000FFC00
+
+/* Receive Registers */
+#define IXGBE_RXCSUM		0x05000
+#define IXGBE_RFCTL		0x05008
+#define IXGBE_DRECCCTL		0x02F08
+#define IXGBE_DRECCCTL_DISABLE	0
+#define IXGBE_DRECCCTL2		0x02F8C
+
+/* Multicast Table Array - 128 entries */
+#define IXGBE_MTA(_i)		(0x05200 + ((_i) * 4))
+#define IXGBE_RAL(_i)		(((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+				 (0x0A200 + ((_i) * 8)))
+#define IXGBE_RAH(_i)		(((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+				 (0x0A204 + ((_i) * 8)))
+#define IXGBE_MPSAR_LO(_i)	(0x0A600 + ((_i) * 8))
+#define IXGBE_MPSAR_HI(_i)	(0x0A604 + ((_i) * 8))
+/* Packet split receive type */
+#define IXGBE_PSRTYPE(_i)	(((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
+				 (0x0EA00 + ((_i) * 4)))
+/* array of 4096 1-bit vlan filters */
+#define IXGBE_VFTA(_i)		(0x0A000 + ((_i) * 4))
+/*array of 4096 4-bit vlan vmdq indices */
+#define IXGBE_VFTAVIND(_j, _i)	(0x0A200 + ((_j) * 0x200) + ((_i) * 4))
+#define IXGBE_FCTRL		0x05080
+#define IXGBE_VLNCTRL		0x05088
+#define IXGBE_MCSTCTRL		0x05090
+#define IXGBE_MRQC		0x05818
+#define IXGBE_SAQF(_i)	(0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */
+#define IXGBE_DAQF(_i)	(0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */
+#define IXGBE_SDPQF(_i)	(0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */
+#define IXGBE_FTQF(_i)	(0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */
+#define IXGBE_ETQF(_i)	(0x05128 + ((_i) * 4)) /* EType Queue Filter */
+#define IXGBE_ETQS(_i)	(0x0EC00 + ((_i) * 4)) /* EType Queue Select */
+#define IXGBE_SYNQF	0x0EC30 /* SYN Packet Queue Filter */
+#define IXGBE_RQTC	0x0EC70
+#define IXGBE_MTQC	0x08120
+#define IXGBE_VLVF(_i)	(0x0F100 + ((_i) * 4))  /* 64 of these (0-63) */
+#define IXGBE_VLVFB(_i)	(0x0F200 + ((_i) * 4))  /* 128 of these (0-127) */
+#define IXGBE_VMVIR(_i)	(0x08000 + ((_i) * 4))  /* 64 of these (0-63) */
+#define IXGBE_PFFLPL		0x050B0
+#define IXGBE_PFFLPH		0x050B4
+#define IXGBE_VT_CTL		0x051B0
+#define IXGBE_PFMAILBOX(_i)	(0x04B00 + (4 * (_i))) /* 64 total */
+/* 64 Mailboxes, 16 DW each */
+#define IXGBE_PFMBMEM(_i)	(0x13000 + (64 * (_i)))
+#define IXGBE_PFMBICR(_i)	(0x00710 + (4 * (_i))) /* 4 total */
+#define IXGBE_PFMBIMR(_i)	(0x00720 + (4 * (_i))) /* 4 total */
+#define IXGBE_VFRE(_i)		(0x051E0 + ((_i) * 4))
+#define IXGBE_VFTE(_i)		(0x08110 + ((_i) * 4))
+#define IXGBE_VMECM(_i)		(0x08790 + ((_i) * 4))
+#define IXGBE_QDE		0x2F04
+#define IXGBE_VMTXSW(_i)	(0x05180 + ((_i) * 4)) /* 2 total */
+#define IXGBE_VMOLR(_i)		(0x0F000 + ((_i) * 4)) /* 64 total */
+#define IXGBE_UTA(_i)		(0x0F400 + ((_i) * 4))
+#define IXGBE_MRCTL(_i)		(0x0F600 + ((_i) * 4))
+#define IXGBE_VMRVLAN(_i)	(0x0F610 + ((_i) * 4))
+#define IXGBE_VMRVM(_i)		(0x0F630 + ((_i) * 4))
+#define IXGBE_LVMMC_RX		0x2FA8
+#define IXGBE_LVMMC_TX		0x8108
+#define IXGBE_LMVM_RX		0x2FA4
+#define IXGBE_LMVM_TX		0x8124
+#define IXGBE_WQBR_RX(_i)	(0x2FB0 + ((_i) * 4)) /* 4 total */
+#define IXGBE_WQBR_TX(_i)	(0x8130 + ((_i) * 4)) /* 4 total */
+#define IXGBE_L34T_IMIR(_i)	(0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
+#define IXGBE_RXFECCERR0	0x051B8
+#define IXGBE_LLITHRESH		0x0EC90
+#define IXGBE_IMIR(_i)		(0x05A80 + ((_i) * 4))  /* 8 of these (0-7) */
+#define IXGBE_IMIREXT(_i)	(0x05AA0 + ((_i) * 4))  /* 8 of these (0-7) */
+#define IXGBE_IMIRVP		0x05AC0
+#define IXGBE_VMD_CTL		0x0581C
+#define IXGBE_RETA(_i)		(0x05C00 + ((_i) * 4))  /* 32 of these (0-31) */
+#define IXGBE_ERETA(_i)		(0x0EE80 + ((_i) * 4))  /* 96 of these (0-95) */
+#define IXGBE_RSSRK(_i)		(0x05C80 + ((_i) * 4))  /* 10 of these (0-9) */
+
+/* Registers for setting up RSS on X550 with SRIOV
+ * _p - pool number (0..63)
+ * _i - index (0..10 for PFVFRSSRK, 0..15 for PFVFRETA)
+ */
+#define IXGBE_PFVFMRQC(_p)	(0x03400 + ((_p) * 4))
+#define IXGBE_PFVFRSSRK(_i, _p)	(0x018000 + ((_i) * 4) + ((_p) * 0x40))
+#define IXGBE_PFVFRETA(_i, _p)	(0x019000 + ((_i) * 4) + ((_p) * 0x40))
+
+/* Flow Director registers */
+#define IXGBE_FDIRCTRL	0x0EE00
+#define IXGBE_FDIRHKEY	0x0EE68
+#define IXGBE_FDIRSKEY	0x0EE6C
+#define IXGBE_FDIRDIP4M	0x0EE3C
+#define IXGBE_FDIRSIP4M	0x0EE40
+#define IXGBE_FDIRTCPM	0x0EE44
+#define IXGBE_FDIRUDPM	0x0EE48
+#define IXGBE_FDIRSCTPM	0x0EE78
+#define IXGBE_FDIRIP6M	0x0EE74
+#define IXGBE_FDIRM	0x0EE70
+
+/* Flow Director Stats registers */
+#define IXGBE_FDIRFREE	0x0EE38
+#define IXGBE_FDIRLEN	0x0EE4C
+#define IXGBE_FDIRUSTAT	0x0EE50
+#define IXGBE_FDIRFSTAT	0x0EE54
+#define IXGBE_FDIRMATCH	0x0EE58
+#define IXGBE_FDIRMISS	0x0EE5C
+
+/* Flow Director Programming registers */
+#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */
+#define IXGBE_FDIRIPSA	0x0EE18
+#define IXGBE_FDIRIPDA	0x0EE1C
+#define IXGBE_FDIRPORT	0x0EE20
+#define IXGBE_FDIRVLAN	0x0EE24
+#define IXGBE_FDIRHASH	0x0EE28
+#define IXGBE_FDIRCMD	0x0EE2C
+
+/* Transmit DMA registers */
+#define IXGBE_TDBAL(_i)		(0x06000 + ((_i) * 0x40)) /* 32 of them (0-31)*/
+#define IXGBE_TDBAH(_i)		(0x06004 + ((_i) * 0x40))
+#define IXGBE_TDLEN(_i)		(0x06008 + ((_i) * 0x40))
+#define IXGBE_TDH(_i)		(0x06010 + ((_i) * 0x40))
+#define IXGBE_TDT(_i)		(0x06018 + ((_i) * 0x40))
+#define IXGBE_TXDCTL(_i)	(0x06028 + ((_i) * 0x40))
+#define IXGBE_TDWBAL(_i)	(0x06038 + ((_i) * 0x40))
+#define IXGBE_TDWBAH(_i)	(0x0603C + ((_i) * 0x40))
+#define IXGBE_DTXCTL		0x07E00
+
+#define IXGBE_DMATXCTL		0x04A80
+#define IXGBE_PFVFSPOOF(_i)	(0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */
+#define IXGBE_PFDTXGSWC		0x08220
+#define IXGBE_DTXMXSZRQ		0x08100
+#define IXGBE_DTXTCPFLGL	0x04A88
+#define IXGBE_DTXTCPFLGH	0x04A8C
+#define IXGBE_LBDRPEN		0x0CA00
+#define IXGBE_TXPBTHRESH(_i)	(0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */
+
+#define IXGBE_DMATXCTL_TE	0x1 /* Transmit Enable */
+#define IXGBE_DMATXCTL_NS	0x2 /* No Snoop LSO hdr buffer */
+#define IXGBE_DMATXCTL_GDV	0x8 /* Global Double VLAN */
+#define IXGBE_DMATXCTL_MDP_EN	0x20 /* Bit 5 */
+#define IXGBE_DMATXCTL_MBINTEN	0x40 /* Bit 6 */
+#define IXGBE_DMATXCTL_VT_SHIFT	16  /* VLAN EtherType */
+
+#define IXGBE_PFDTXGSWC_VT_LBEN	0x1 /* Local L2 VT switch enable */
+
+/* Anti-spoofing defines */
+#define IXGBE_SPOOF_MACAS_MASK		0xFF
+#define IXGBE_SPOOF_VLANAS_MASK		0xFF00
+#define IXGBE_SPOOF_VLANAS_SHIFT	8
+#define IXGBE_SPOOF_ETHERTYPEAS		0xFF000000
+#define IXGBE_SPOOF_ETHERTYPEAS_SHIFT	16
+#define IXGBE_PFVFSPOOF_REG_COUNT	8
+/* 16 of these (0-15) */
+#define IXGBE_DCA_TXCTRL(_i)		(0x07200 + ((_i) * 4))
+/* Tx DCA Control register : 128 of these (0-127) */
+#define IXGBE_DCA_TXCTRL_82599(_i)	(0x0600C + ((_i) * 0x40))
+#define IXGBE_TIPG			0x0CB00
+#define IXGBE_TXPBSIZE(_i)		(0x0CC00 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_MNGTXMAP			0x0CD10
+#define IXGBE_TIPG_FIBER_DEFAULT	3
+#define IXGBE_TXPBSIZE_SHIFT		10
+
+/* Wake up registers */
+#define IXGBE_WUC	0x05800
+#define IXGBE_WUFC	0x05808
+#define IXGBE_WUS	0x05810
+#define IXGBE_IPAV	0x05838
+#define IXGBE_IP4AT	0x05840 /* IPv4 table 0x5840-0x5858 */
+#define IXGBE_IP6AT	0x05880 /* IPv6 table 0x5880-0x588F */
+
+#define IXGBE_WUPL	0x05900
+#define IXGBE_WUPM	0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
+#define IXGBE_PROXYS	0x05F60 /* Proxying Status Register */
+#define IXGBE_PROXYFC	0x05F64 /* Proxying Filter Control Register */
+#define IXGBE_VXLANCTRL	0x0000507C /* Rx filter VXLAN UDPPORT Register */
+
+#define IXGBE_FHFT(_n)	(0x09000 + ((_n) * 0x100)) /* Flex host filter table */
+/* Ext Flexible Host Filter Table */
+#define IXGBE_FHFT_EXT(_n)	(0x09800 + ((_n) * 0x100))
+#define IXGBE_FHFT_EXT_X550(_n)	(0x09600 + ((_n) * 0x100))
+
+/* Four Flexible Filters are supported */
+#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX		4
+
+/* Six Flexible Filters are supported */
+#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_6	6
+/* Eight Flexible Filters are supported */
+#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_8	8
+#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX	2
+
+/* Each Flexible Filter is at most 128 (0x80) bytes in length */
+#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX		128
+#define IXGBE_FHFT_LENGTH_OFFSET		0xFC  /* Length byte in FHFT */
+#define IXGBE_FHFT_LENGTH_MASK			0x0FF /* Length in lower byte */
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define IXGBE_WUC_PME_EN	0x00000002 /* PME Enable */
+#define IXGBE_WUC_PME_STATUS	0x00000004 /* PME Status */
+#define IXGBE_WUC_WKEN		0x00000010 /* Enable PE_WAKE_N pin assertion  */
+
+/* Wake Up Filter Control */
+#define IXGBE_WUFC_LNKC	0x00000001 /* Link Status Change Wakeup Enable */
+#define IXGBE_WUFC_MAG	0x00000002 /* Magic Packet Wakeup Enable */
+#define IXGBE_WUFC_EX	0x00000004 /* Directed Exact Wakeup Enable */
+#define IXGBE_WUFC_MC	0x00000008 /* Directed Multicast Wakeup Enable */
+#define IXGBE_WUFC_BC	0x00000010 /* Broadcast Wakeup Enable */
+#define IXGBE_WUFC_ARP	0x00000020 /* ARP Request Packet Wakeup Enable */
+#define IXGBE_WUFC_IPV4	0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define IXGBE_WUFC_IPV6	0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define IXGBE_WUFC_MNG	0x00000100 /* Directed Mgmt Packet Wakeup Enable */
+
+#define IXGBE_WUFC_IGNORE_TCO	0x00008000 /* Ignore WakeOn TCO packets */
+#define IXGBE_WUFC_FLX0	0x00010000 /* Flexible Filter 0 Enable */
+#define IXGBE_WUFC_FLX1	0x00020000 /* Flexible Filter 1 Enable */
+#define IXGBE_WUFC_FLX2	0x00040000 /* Flexible Filter 2 Enable */
+#define IXGBE_WUFC_FLX3	0x00080000 /* Flexible Filter 3 Enable */
+#define IXGBE_WUFC_FLX4	0x00100000 /* Flexible Filter 4 Enable */
+#define IXGBE_WUFC_FLX5	0x00200000 /* Flexible Filter 5 Enable */
+#define IXGBE_WUFC_FLX_FILTERS		0x000F0000 /* Mask for 4 flex filters */
+#define IXGBE_WUFC_FLX_FILTERS_6	0x003F0000 /* Mask for 6 flex filters */
+#define IXGBE_WUFC_FLX_FILTERS_8	0x00FF0000 /* Mask for 8 flex filters */
+#define IXGBE_WUFC_FW_RST_WK	0x80000000 /* Ena wake on FW reset assertion */
+/* Mask for Ext. flex filters */
+#define IXGBE_WUFC_EXT_FLX_FILTERS	0x00300000
+#define IXGBE_WUFC_ALL_FILTERS		0x000F00FF /* Mask all 4 flex filters */
+#define IXGBE_WUFC_ALL_FILTERS_6	0x003F00FF /* Mask all 6 flex filters */
+#define IXGBE_WUFC_ALL_FILTERS_8	0x00FF00FF /* Mask all 8 flex filters */
+#define IXGBE_WUFC_FLX_OFFSET	16 /* Offset to the Flexible Filters bits */
+
+/* Wake Up Status */
+#define IXGBE_WUS_LNKC		IXGBE_WUFC_LNKC
+#define IXGBE_WUS_MAG		IXGBE_WUFC_MAG
+#define IXGBE_WUS_EX		IXGBE_WUFC_EX
+#define IXGBE_WUS_MC		IXGBE_WUFC_MC
+#define IXGBE_WUS_BC		IXGBE_WUFC_BC
+#define IXGBE_WUS_ARP		IXGBE_WUFC_ARP
+#define IXGBE_WUS_IPV4		IXGBE_WUFC_IPV4
+#define IXGBE_WUS_IPV6		IXGBE_WUFC_IPV6
+#define IXGBE_WUS_MNG		IXGBE_WUFC_MNG
+#define IXGBE_WUS_FLX0		IXGBE_WUFC_FLX0
+#define IXGBE_WUS_FLX1		IXGBE_WUFC_FLX1
+#define IXGBE_WUS_FLX2		IXGBE_WUFC_FLX2
+#define IXGBE_WUS_FLX3		IXGBE_WUFC_FLX3
+#define IXGBE_WUS_FLX4		IXGBE_WUFC_FLX4
+#define IXGBE_WUS_FLX5		IXGBE_WUFC_FLX5
+#define IXGBE_WUS_FLX_FILTERS	IXGBE_WUFC_FLX_FILTERS
+#define IXGBE_WUS_FW_RST_WK	IXGBE_WUFC_FW_RST_WK
+/* Proxy Status */
+#define IXGBE_PROXYS_EX		0x00000004 /* Exact packet received */
+#define IXGBE_PROXYS_ARP_DIR	0x00000020 /* ARP w/filter match received */
+#define IXGBE_PROXYS_NS		0x00000200 /* IPV6 NS received */
+#define IXGBE_PROXYS_NS_DIR	0x00000400 /* IPV6 NS w/DA match received */
+#define IXGBE_PROXYS_ARP	0x00000800 /* ARP request packet received */
+#define IXGBE_PROXYS_MLD	0x00001000 /* IPv6 MLD packet received */
+
+/* Proxying Filter Control */
+#define IXGBE_PROXYFC_ENABLE	0x00000001 /* Port Proxying Enable */
+#define IXGBE_PROXYFC_EX	0x00000004 /* Directed Exact Proxy Enable */
+#define IXGBE_PROXYFC_ARP_DIR	0x00000020 /* Directed ARP Proxy Enable */
+#define IXGBE_PROXYFC_NS	0x00000200 /* IPv6 Neighbor Solicitation */
+#define IXGBE_PROXYFC_ARP	0x00000800 /* ARP Request Proxy Enable */
+#define IXGBE_PROXYFC_MLD	0x00000800 /* IPv6 MLD Proxy Enable */
+#define IXGBE_PROXYFC_NO_TCO	0x00008000 /* Ignore TCO packets */
+
+#define IXGBE_WUPL_LENGTH_MASK	0xFFFF
+
+/* DCB registers */
+#define IXGBE_DCB_MAX_TRAFFIC_CLASS	8
+#define IXGBE_RMCS		0x03D00
+#define IXGBE_DPMCS		0x07F40
+#define IXGBE_PDPMCS		0x0CD00
+#define IXGBE_RUPPBMR		0x050A0
+#define IXGBE_RT2CR(_i)		(0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RT2SR(_i)		(0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TDTQ2TCCR(_i)	(0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */
+#define IXGBE_TDTQ2TCSR(_i)	(0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */
+#define IXGBE_TDPT2TCCR(_i)	(0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TDPT2TCSR(_i)	(0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
+
+/* Power Management */
+/* DMA Coalescing configuration */
+struct ixgbe_dmac_config {
+	u16	watchdog_timer; /* usec units */
+	bool	fcoe_en;
+	u32	link_speed;
+	u8	fcoe_tc;
+	u8	num_tcs;
+};
+
+/*
+ * DMA Coalescing threshold Rx PB TC[n] value in Kilobyte by link speed.
+ * DMACRXT = 10Gbps = 10,000 bits / usec = 1250 bytes / usec 70 * 1250 ==
+ * 87500 bytes [85KB]
+ */
+#define IXGBE_DMACRXT_10G		0x55
+#define IXGBE_DMACRXT_1G		0x09
+#define IXGBE_DMACRXT_100M		0x01
+
+/* DMA Coalescing registers */
+#define IXGBE_DMCMNGTH			0x15F20 /* Management Threshold */
+#define IXGBE_DMACR			0x02400 /* Control register */
+#define IXGBE_DMCTH(_i)			(0x03300 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_DMCTLX			0x02404 /* Time to Lx request */
+/* DMA Coalescing register fields */
+#define IXGBE_DMCMNGTH_DMCMNGTH_MASK	0x000FFFF0 /* Mng Threshold mask */
+#define IXGBE_DMCMNGTH_DMCMNGTH_SHIFT	4 /* Management Threshold shift */
+#define IXGBE_DMACR_DMACWT_MASK		0x0000FFFF /* Watchdog Timer mask */
+#define IXGBE_DMACR_HIGH_PRI_TC_MASK	0x00FF0000
+#define IXGBE_DMACR_HIGH_PRI_TC_SHIFT	16
+#define IXGBE_DMACR_EN_MNG_IND		0x10000000 /* Enable Mng Indications */
+#define IXGBE_DMACR_LX_COAL_IND		0x40000000 /* Lx Coalescing indicate */
+#define IXGBE_DMACR_DMAC_EN		0x80000000 /* DMA Coalescing Enable */
+#define IXGBE_DMCTH_DMACRXT_MASK	0x000001FF /* Receive Threshold mask */
+#define IXGBE_DMCTLX_TTLX_MASK		0x00000FFF /* Time to Lx request mask */
+
+/* EEE registers */
+#define IXGBE_EEER			0x043A0 /* EEE register */
+#define IXGBE_EEE_STAT			0x04398 /* EEE Status */
+#define IXGBE_EEE_SU			0x04380 /* EEE Set up */
+#define IXGBE_TLPIC			0x041F4 /* EEE Tx LPI count */
+#define IXGBE_RLPIC			0x041F8 /* EEE Rx LPI count */
+
+/* EEE register fields */
+#define IXGBE_EEER_TX_LPI_EN		0x00010000 /* Enable EEE LPI TX path */
+#define IXGBE_EEER_RX_LPI_EN		0x00020000 /* Enable EEE LPI RX path */
+#define IXGBE_EEE_STAT_NEG		0x20000000 /* EEE support neg on link */
+#define IXGBE_EEE_RX_LPI_STATUS		0x40000000 /* RX Link in LPI status */
+#define IXGBE_EEE_TX_LPI_STATUS		0x80000000 /* TX Link in LPI status */
+
+
+
+/* Security Control Registers */
+#define IXGBE_SECTXCTRL		0x08800
+#define IXGBE_SECTXSTAT		0x08804
+#define IXGBE_SECTXBUFFAF	0x08808
+#define IXGBE_SECTXMINIFG	0x08810
+#define IXGBE_SECRXCTRL		0x08D00
+#define IXGBE_SECRXSTAT		0x08D04
+
+/* Security Bit Fields and Masks */
+#define IXGBE_SECTXCTRL_SECTX_DIS	0x00000001
+#define IXGBE_SECTXCTRL_TX_DIS		0x00000002
+#define IXGBE_SECTXCTRL_STORE_FORWARD	0x00000004
+
+#define IXGBE_SECTXSTAT_SECTX_RDY	0x00000001
+#define IXGBE_SECTXSTAT_ECC_TXERR	0x00000002
+
+#define IXGBE_SECRXCTRL_SECRX_DIS	0x00000001
+#define IXGBE_SECRXCTRL_RX_DIS		0x00000002
+
+#define IXGBE_SECRXSTAT_SECRX_RDY	0x00000001
+#define IXGBE_SECRXSTAT_ECC_RXERR	0x00000002
+
+/* LinkSec (MacSec) Registers */
+#define IXGBE_LSECTXCAP		0x08A00
+#define IXGBE_LSECRXCAP		0x08F00
+#define IXGBE_LSECTXCTRL	0x08A04
+#define IXGBE_LSECTXSCL		0x08A08 /* SCI Low */
+#define IXGBE_LSECTXSCH		0x08A0C /* SCI High */
+#define IXGBE_LSECTXSA		0x08A10
+#define IXGBE_LSECTXPN0		0x08A14
+#define IXGBE_LSECTXPN1		0x08A18
+#define IXGBE_LSECTXKEY0(_n)	(0x08A1C + (4 * (_n))) /* 4 of these (0-3) */
+#define IXGBE_LSECTXKEY1(_n)	(0x08A2C + (4 * (_n))) /* 4 of these (0-3) */
+#define IXGBE_LSECRXCTRL	0x08F04
+#define IXGBE_LSECRXSCL		0x08F08
+#define IXGBE_LSECRXSCH		0x08F0C
+#define IXGBE_LSECRXSA(_i)	(0x08F10 + (4 * (_i))) /* 2 of these (0-1) */
+#define IXGBE_LSECRXPN(_i)	(0x08F18 + (4 * (_i))) /* 2 of these (0-1) */
+#define IXGBE_LSECRXKEY(_n, _m)	(0x08F20 + ((0x10 * (_n)) + (4 * (_m))))
+#define IXGBE_LSECTXUT		0x08A3C /* OutPktsUntagged */
+#define IXGBE_LSECTXPKTE	0x08A40 /* OutPktsEncrypted */
+#define IXGBE_LSECTXPKTP	0x08A44 /* OutPktsProtected */
+#define IXGBE_LSECTXOCTE	0x08A48 /* OutOctetsEncrypted */
+#define IXGBE_LSECTXOCTP	0x08A4C /* OutOctetsProtected */
+#define IXGBE_LSECRXUT		0x08F40 /* InPktsUntagged/InPktsNoTag */
+#define IXGBE_LSECRXOCTD	0x08F44 /* InOctetsDecrypted */
+#define IXGBE_LSECRXOCTV	0x08F48 /* InOctetsValidated */
+#define IXGBE_LSECRXBAD		0x08F4C /* InPktsBadTag */
+#define IXGBE_LSECRXNOSCI	0x08F50 /* InPktsNoSci */
+#define IXGBE_LSECRXUNSCI	0x08F54 /* InPktsUnknownSci */
+#define IXGBE_LSECRXUNCH	0x08F58 /* InPktsUnchecked */
+#define IXGBE_LSECRXDELAY	0x08F5C /* InPktsDelayed */
+#define IXGBE_LSECRXLATE	0x08F60 /* InPktsLate */
+#define IXGBE_LSECRXOK(_n)	(0x08F64 + (0x04 * (_n))) /* InPktsOk */
+#define IXGBE_LSECRXINV(_n)	(0x08F6C + (0x04 * (_n))) /* InPktsInvalid */
+#define IXGBE_LSECRXNV(_n)	(0x08F74 + (0x04 * (_n))) /* InPktsNotValid */
+#define IXGBE_LSECRXUNSA	0x08F7C /* InPktsUnusedSa */
+#define IXGBE_LSECRXNUSA	0x08F80 /* InPktsNotUsingSa */
+
+/* LinkSec (MacSec) Bit Fields and Masks */
+#define IXGBE_LSECTXCAP_SUM_MASK	0x00FF0000
+#define IXGBE_LSECTXCAP_SUM_SHIFT	16
+#define IXGBE_LSECRXCAP_SUM_MASK	0x00FF0000
+#define IXGBE_LSECRXCAP_SUM_SHIFT	16
+
+#define IXGBE_LSECTXCTRL_EN_MASK	0x00000003
+#define IXGBE_LSECTXCTRL_DISABLE	0x0
+#define IXGBE_LSECTXCTRL_AUTH		0x1
+#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT	0x2
+#define IXGBE_LSECTXCTRL_AISCI		0x00000020
+#define IXGBE_LSECTXCTRL_PNTHRSH_MASK	0xFFFFFF00
+#define IXGBE_LSECTXCTRL_RSV_MASK	0x000000D8
+
+#define IXGBE_LSECRXCTRL_EN_MASK	0x0000000C
+#define IXGBE_LSECRXCTRL_EN_SHIFT	2
+#define IXGBE_LSECRXCTRL_DISABLE	0x0
+#define IXGBE_LSECRXCTRL_CHECK		0x1
+#define IXGBE_LSECRXCTRL_STRICT		0x2
+#define IXGBE_LSECRXCTRL_DROP		0x3
+#define IXGBE_LSECRXCTRL_PLSH		0x00000040
+#define IXGBE_LSECRXCTRL_RP		0x00000080
+#define IXGBE_LSECRXCTRL_RSV_MASK	0xFFFFFF33
+
+/* IpSec Registers */
+#define IXGBE_IPSTXIDX		0x08900
+#define IXGBE_IPSTXSALT		0x08904
+#define IXGBE_IPSTXKEY(_i)	(0x08908 + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXIDX		0x08E00
+#define IXGBE_IPSRXIPADDR(_i)	(0x08E04 + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXSPI		0x08E14
+#define IXGBE_IPSRXIPIDX	0x08E18
+#define IXGBE_IPSRXKEY(_i)	(0x08E1C + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXSALT		0x08E2C
+#define IXGBE_IPSRXMOD		0x08E30
+
+#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE	0x4
+
+/* DCB registers */
+#define IXGBE_RTRPCS		0x02430
+#define IXGBE_RTTDCS		0x04900
+#define IXGBE_RTTDCS_ARBDIS	0x00000040 /* DCB arbiter disable */
+#define IXGBE_RTTPCS		0x0CD00
+#define IXGBE_RTRUP2TC		0x03020
+#define IXGBE_RTTUP2TC		0x0C800
+#define IXGBE_RTRPT4C(_i)	(0x02140 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TXLLQ(_i)		(0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_RTRPT4S(_i)	(0x02160 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDT2C(_i)	(0x04910 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDT2S(_i)	(0x04930 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTPT2C(_i)	(0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTPT2S(_i)	(0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDQSEL		0x04904
+#define IXGBE_RTTDT1C		0x04908
+#define IXGBE_RTTDT1S		0x0490C
+#define IXGBE_RTTDTECC		0x04990
+#define IXGBE_RTTDTECC_NO_BCN	0x00000100
+
+#define IXGBE_RTTBCNRC			0x04984
+#define IXGBE_RTTBCNRC_RS_ENA		0x80000000
+#define IXGBE_RTTBCNRC_RF_DEC_MASK	0x00003FFF
+#define IXGBE_RTTBCNRC_RF_INT_SHIFT	14
+#define IXGBE_RTTBCNRC_RF_INT_MASK \
+	(IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
+#define IXGBE_RTTBCNRM	0x04980
+
+/* BCN (for DCB) Registers */
+#define IXGBE_RTTBCNRS	0x04988
+#define IXGBE_RTTBCNCR	0x08B00
+#define IXGBE_RTTBCNACH	0x08B04
+#define IXGBE_RTTBCNACL	0x08B08
+#define IXGBE_RTTBCNTG	0x04A90
+#define IXGBE_RTTBCNIDX	0x08B0C
+#define IXGBE_RTTBCNCP	0x08B10
+#define IXGBE_RTFRTIMER	0x08B14
+#define IXGBE_RTTBCNRTT	0x05150
+#define IXGBE_RTTBCNRD	0x0498C
+
+
+/* FCoE DMA Context Registers */
+/* FCoE Direct DMA Context */
+#define IXGBE_FCDDC(_i, _j)	(0x20000 + ((_i) * 0x4) + ((_j) * 0x10))
+#define IXGBE_FCPTRL		0x02410 /* FC User Desc. PTR Low */
+#define IXGBE_FCPTRH		0x02414 /* FC USer Desc. PTR High */
+#define IXGBE_FCBUFF		0x02418 /* FC Buffer Control */
+#define IXGBE_FCDMARW		0x02420 /* FC Receive DMA RW */
+#define IXGBE_FCBUFF_VALID	(1 << 0)   /* DMA Context Valid */
+#define IXGBE_FCBUFF_BUFFSIZE	(3 << 3)   /* User Buffer Size */
+#define IXGBE_FCBUFF_WRCONTX	(1 << 7)   /* 0: Initiator, 1: Target */
+#define IXGBE_FCBUFF_BUFFCNT	0x0000ff00 /* Number of User Buffers */
+#define IXGBE_FCBUFF_OFFSET	0xffff0000 /* User Buffer Offset */
+#define IXGBE_FCBUFF_BUFFSIZE_SHIFT	3
+#define IXGBE_FCBUFF_BUFFCNT_SHIFT	8
+#define IXGBE_FCBUFF_OFFSET_SHIFT	16
+#define IXGBE_FCDMARW_WE		(1 << 14)   /* Write enable */
+#define IXGBE_FCDMARW_RE		(1 << 15)   /* Read enable */
+#define IXGBE_FCDMARW_FCOESEL		0x000001ff  /* FC X_ID: 11 bits */
+#define IXGBE_FCDMARW_LASTSIZE		0xffff0000  /* Last User Buffer Size */
+#define IXGBE_FCDMARW_LASTSIZE_SHIFT	16
+/* FCoE SOF/EOF */
+#define IXGBE_TEOFF		0x04A94 /* Tx FC EOF */
+#define IXGBE_TSOFF		0x04A98 /* Tx FC SOF */
+#define IXGBE_REOFF		0x05158 /* Rx FC EOF */
+#define IXGBE_RSOFF		0x051F8 /* Rx FC SOF */
+/* FCoE Filter Context Registers */
+#define IXGBE_FCD_ID		0x05114 /* FCoE D_ID */
+#define IXGBE_FCSMAC		0x0510C /* FCoE Source MAC */
+#define IXGBE_FCFLTRW_SMAC_HIGH_SHIFT	16
+/* FCoE Direct Filter Context */
+#define IXGBE_FCDFC(_i, _j)	(0x28000 + ((_i) * 0x4) + ((_j) * 0x10))
+#define IXGBE_FCDFCD(_i)	(0x30000 + ((_i) * 0x4))
+#define IXGBE_FCFLT		0x05108 /* FC FLT Context */
+#define IXGBE_FCFLTRW		0x05110 /* FC Filter RW Control */
+#define IXGBE_FCPARAM		0x051d8 /* FC Offset Parameter */
+#define IXGBE_FCFLT_VALID	(1 << 0)   /* Filter Context Valid */
+#define IXGBE_FCFLT_FIRST	(1 << 1)   /* Filter First */
+#define IXGBE_FCFLT_SEQID	0x00ff0000 /* Sequence ID */
+#define IXGBE_FCFLT_SEQCNT	0xff000000 /* Sequence Count */
+#define IXGBE_FCFLTRW_RVALDT	(1 << 13)  /* Fast Re-Validation */
+#define IXGBE_FCFLTRW_WE	(1 << 14)  /* Write Enable */
+#define IXGBE_FCFLTRW_RE	(1 << 15)  /* Read Enable */
+/* FCoE Receive Control */
+#define IXGBE_FCRXCTRL		0x05100 /* FC Receive Control */
+#define IXGBE_FCRXCTRL_FCOELLI	(1 << 0)   /* Low latency interrupt */
+#define IXGBE_FCRXCTRL_SAVBAD	(1 << 1)   /* Save Bad Frames */
+#define IXGBE_FCRXCTRL_FRSTRDH	(1 << 2)   /* EN 1st Read Header */
+#define IXGBE_FCRXCTRL_LASTSEQH	(1 << 3)   /* EN Last Header in Seq */
+#define IXGBE_FCRXCTRL_ALLH	(1 << 4)   /* EN All Headers */
+#define IXGBE_FCRXCTRL_FRSTSEQH	(1 << 5)   /* EN 1st Seq. Header */
+#define IXGBE_FCRXCTRL_ICRC	(1 << 6)   /* Ignore Bad FC CRC */
+#define IXGBE_FCRXCTRL_FCCRCBO	(1 << 7)   /* FC CRC Byte Ordering */
+#define IXGBE_FCRXCTRL_FCOEVER	0x00000f00 /* FCoE Version: 4 bits */
+#define IXGBE_FCRXCTRL_FCOEVER_SHIFT	8
+/* FCoE Redirection */
+#define IXGBE_FCRECTL		0x0ED00 /* FC Redirection Control */
+#define IXGBE_FCRETA0		0x0ED10 /* FC Redirection Table 0 */
+#define IXGBE_FCRETA(_i)	(IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */
+#define IXGBE_FCRECTL_ENA	0x1 /* FCoE Redir Table Enable */
+#define IXGBE_FCRETASEL_ENA	0x2 /* FCoE FCRETASEL bit */
+#define IXGBE_FCRETA_SIZE	8 /* Max entries in FCRETA */
+#define IXGBE_FCRETA_ENTRY_MASK	0x0000007f /* 7 bits for the queue index */
+#define IXGBE_FCRETA_SIZE_X550	32 /* Max entries in FCRETA */
+/* Higher 7 bits for the queue index */
+#define IXGBE_FCRETA_ENTRY_HIGH_MASK	0x007F0000
+#define IXGBE_FCRETA_ENTRY_HIGH_SHIFT	16
+
+/* Stats registers */
+#define IXGBE_CRCERRS	0x04000
+#define IXGBE_ILLERRC	0x04004
+#define IXGBE_ERRBC	0x04008
+#define IXGBE_MSPDC	0x04010
+#define IXGBE_MPC(_i)	(0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/
+#define IXGBE_MLFC	0x04034
+#define IXGBE_MRFC	0x04038
+#define IXGBE_RLEC	0x04040
+#define IXGBE_LXONTXC	0x03F60
+#define IXGBE_LXONRXC	0x0CF60
+#define IXGBE_LXOFFTXC	0x03F68
+#define IXGBE_LXOFFRXC	0x0CF68
+#define IXGBE_LXONRXCNT		0x041A4
+#define IXGBE_LXOFFRXCNT	0x041A8
+#define IXGBE_PXONRXCNT(_i)	(0x04140 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXOFFRXCNT(_i)	(0x04160 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXON2OFFCNT(_i)	(0x03240 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXONTXC(_i)	(0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/
+#define IXGBE_PXONRXC(_i)	(0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/
+#define IXGBE_PXOFFTXC(_i)	(0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/
+#define IXGBE_PXOFFRXC(_i)	(0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/
+#define IXGBE_PRC64		0x0405C
+#define IXGBE_PRC127		0x04060
+#define IXGBE_PRC255		0x04064
+#define IXGBE_PRC511		0x04068
+#define IXGBE_PRC1023		0x0406C
+#define IXGBE_PRC1522		0x04070
+#define IXGBE_GPRC		0x04074
+#define IXGBE_BPRC		0x04078
+#define IXGBE_MPRC		0x0407C
+#define IXGBE_GPTC		0x04080
+#define IXGBE_GORCL		0x04088
+#define IXGBE_GORCH		0x0408C
+#define IXGBE_GOTCL		0x04090
+#define IXGBE_GOTCH		0x04094
+#define IXGBE_RNBC(_i)		(0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/
+#define IXGBE_RUC		0x040A4
+#define IXGBE_RFC		0x040A8
+#define IXGBE_ROC		0x040AC
+#define IXGBE_RJC		0x040B0
+#define IXGBE_MNGPRC		0x040B4
+#define IXGBE_MNGPDC		0x040B8
+#define IXGBE_MNGPTC		0x0CF90
+#define IXGBE_TORL		0x040C0
+#define IXGBE_TORH		0x040C4
+#define IXGBE_TPR		0x040D0
+#define IXGBE_TPT		0x040D4
+#define IXGBE_PTC64		0x040D8
+#define IXGBE_PTC127		0x040DC
+#define IXGBE_PTC255		0x040E0
+#define IXGBE_PTC511		0x040E4
+#define IXGBE_PTC1023		0x040E8
+#define IXGBE_PTC1522		0x040EC
+#define IXGBE_MPTC		0x040F0
+#define IXGBE_BPTC		0x040F4
+#define IXGBE_XEC		0x04120
+#define IXGBE_SSVPC		0x08780
+
+#define IXGBE_RQSMR(_i)	(0x02300 + ((_i) * 4))
+#define IXGBE_TQSMR(_i)	(((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
+			 (0x08600 + ((_i) * 4)))
+#define IXGBE_TQSM(_i)	(0x08600 + ((_i) * 4))
+
+#define IXGBE_QPRC(_i)	(0x01030 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QPTC(_i)	(0x06030 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC(_i)	(0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBTC(_i)	(0x06034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_L(_i)	(0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_H(_i)	(0x01038 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QPRDC(_i)		(0x01430 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBTC_L(_i)	(0x08700 + ((_i) * 0x8)) /* 16 of these */
+#define IXGBE_QBTC_H(_i)	(0x08704 + ((_i) * 0x8)) /* 16 of these */
+#define IXGBE_FCCRC		0x05118 /* Num of Good Eth CRC w/ Bad FC CRC */
+#define IXGBE_FCOERPDC		0x0241C /* FCoE Rx Packets Dropped Count */
+#define IXGBE_FCLAST		0x02424 /* FCoE Last Error Count */
+#define IXGBE_FCOEPRC		0x02428 /* Number of FCoE Packets Received */
+#define IXGBE_FCOEDWRC		0x0242C /* Number of FCoE DWords Received */
+#define IXGBE_FCOEPTC		0x08784 /* Number of FCoE Packets Transmitted */
+#define IXGBE_FCOEDWTC		0x08788 /* Number of FCoE DWords Transmitted */
+#define IXGBE_FCCRC_CNT_MASK	0x0000FFFF /* CRC_CNT: bit 0 - 15 */
+#define IXGBE_FCLAST_CNT_MASK	0x0000FFFF /* Last_CNT: bit 0 - 15 */
+#define IXGBE_O2BGPTC		0x041C4
+#define IXGBE_O2BSPC		0x087B0
+#define IXGBE_B2OSPC		0x041C0
+#define IXGBE_B2OGPRC		0x02F90
+#define IXGBE_BUPRC		0x04180
+#define IXGBE_BMPRC		0x04184
+#define IXGBE_BBPRC		0x04188
+#define IXGBE_BUPTC		0x0418C
+#define IXGBE_BMPTC		0x04190
+#define IXGBE_BBPTC		0x04194
+#define IXGBE_BCRCERRS		0x04198
+#define IXGBE_BXONRXC		0x0419C
+#define IXGBE_BXOFFRXC		0x041E0
+#define IXGBE_BXONTXC		0x041E4
+#define IXGBE_BXOFFTXC		0x041E8
+
+/* Management */
+#define IXGBE_MAVTV(_i)		(0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MFUTP(_i)		(0x05030 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MANC		0x05820
+#define IXGBE_MFVAL		0x05824
+#define IXGBE_MANC2H		0x05860
+#define IXGBE_MDEF(_i)		(0x05890 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MIPAF		0x058B0
+#define IXGBE_MMAL(_i)		(0x05910 + ((_i) * 8)) /* 4 of these (0-3) */
+#define IXGBE_MMAH(_i)		(0x05914 + ((_i) * 8)) /* 4 of these (0-3) */
+#define IXGBE_FTFT		0x09400 /* 0x9400-0x97FC */
+#define IXGBE_METF(_i)		(0x05190 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_MDEF_EXT(_i)	(0x05160 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_LSWFW		0x15014
+#define IXGBE_BMCIP(_i)		(0x05050 + ((_i) * 4)) /* 0x5050-0x505C */
+#define IXGBE_BMCIPVAL		0x05060
+#define IXGBE_BMCIP_IPADDR_TYPE	0x00000001
+#define IXGBE_BMCIP_IPADDR_VALID	0x00000002
+
+/* Management Bit Fields and Masks */
+#define IXGBE_MANC_MPROXYE	0x40000000 /* Management Proxy Enable */
+#define IXGBE_MANC_RCV_TCO_EN	0x00020000 /* Rcv TCO packet enable */
+#define IXGBE_MANC_EN_BMC2OS	0x10000000 /* Ena BMC2OS and OS2BMC traffic */
+#define IXGBE_MANC_EN_BMC2OS_SHIFT	28
+
+/* Firmware Semaphore Register */
+#define IXGBE_FWSM_MODE_MASK	0xE
+#define IXGBE_FWSM_TS_ENABLED	0x1
+#define IXGBE_FWSM_FW_MODE_PT	0x4
+
+/* ARC Subsystem registers */
+#define IXGBE_HICR		0x15F00
+#define IXGBE_FWSTS		0x15F0C
+#define IXGBE_HSMC0R		0x15F04
+#define IXGBE_HSMC1R		0x15F08
+#define IXGBE_SWSR		0x15F10
+#define IXGBE_HFDR		0x15FE8
+#define IXGBE_FLEX_MNG		0x15800 /* 0x15800 - 0x15EFC */
+
+#define IXGBE_HICR_EN		0x01  /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define IXGBE_HICR_C		0x02
+#define IXGBE_HICR_SV		0x04  /* Status Validity */
+#define IXGBE_HICR_FW_RESET_ENABLE	0x40
+#define IXGBE_HICR_FW_RESET	0x80
+
+/* PCI-E registers */
+#define IXGBE_GCR		0x11000
+#define IXGBE_GTV		0x11004
+#define IXGBE_FUNCTAG		0x11008
+#define IXGBE_GLT		0x1100C
+#define IXGBE_PCIEPIPEADR	0x11004
+#define IXGBE_PCIEPIPEDAT	0x11008
+#define IXGBE_GSCL_1		0x11010
+#define IXGBE_GSCL_2		0x11014
+#define IXGBE_GSCL_3		0x11018
+#define IXGBE_GSCL_4		0x1101C
+#define IXGBE_GSCN_0		0x11020
+#define IXGBE_GSCN_1		0x11024
+#define IXGBE_GSCN_2		0x11028
+#define IXGBE_GSCN_3		0x1102C
+#define IXGBE_FACTPS		0x10150
+#define IXGBE_PCIEANACTL	0x11040
+#define IXGBE_SWSM		0x10140
+#define IXGBE_FWSM		0x10148
+#define IXGBE_GSSR		0x10160
+#define IXGBE_MREVID		0x11064
+#define IXGBE_DCA_ID		0x11070
+#define IXGBE_DCA_CTRL		0x11074
+#define IXGBE_SWFW_SYNC		IXGBE_GSSR
+
+/* PCI-E registers 82599-Specific */
+#define IXGBE_GCR_EXT		0x11050
+#define IXGBE_GSCL_5_82599	0x11030
+#define IXGBE_GSCL_6_82599	0x11034
+#define IXGBE_GSCL_7_82599	0x11038
+#define IXGBE_GSCL_8_82599	0x1103C
+#define IXGBE_PHYADR_82599	0x11040
+#define IXGBE_PHYDAT_82599	0x11044
+#define IXGBE_PHYCTL_82599	0x11048
+#define IXGBE_PBACLR_82599	0x11068
+#define IXGBE_CIAA_82599	0x11088
+#define IXGBE_CIAD_82599	0x1108C
+#define IXGBE_CIAA_X550		0x11508
+#define IXGBE_CIAD_X550		0x11510
+#define IXGBE_CIAA_BY_MAC(_hw)	((((_hw)->mac.type >= ixgbe_mac_X550) ? \
+				 IXGBE_CIAA_X550 : IXGBE_CIAA_82599))
+#define IXGBE_CIAD_BY_MAC(_hw)	((((_hw)->mac.type >= ixgbe_mac_X550) ? \
+				 IXGBE_CIAD_X550 : IXGBE_CIAD_82599))
+#define IXGBE_PICAUSE		0x110B0
+#define IXGBE_PIENA		0x110B8
+#define IXGBE_CDQ_MBR_82599	0x110B4
+#define IXGBE_PCIESPARE		0x110BC
+#define IXGBE_MISC_REG_82599	0x110F0
+#define IXGBE_ECC_CTRL_0_82599	0x11100
+#define IXGBE_ECC_CTRL_1_82599	0x11104
+#define IXGBE_ECC_STATUS_82599	0x110E0
+#define IXGBE_BAR_CTRL_82599	0x110F4
+
+/* PCI Express Control */
+#define IXGBE_GCR_CMPL_TMOUT_MASK	0x0000F000
+#define IXGBE_GCR_CMPL_TMOUT_10ms	0x00001000
+#define IXGBE_GCR_CMPL_TMOUT_RESEND	0x00010000
+#define IXGBE_GCR_CAP_VER2		0x00040000
+
+#define IXGBE_GCR_EXT_MSIX_EN		0x80000000
+#define IXGBE_GCR_EXT_BUFFERS_CLEAR	0x40000000
+#define IXGBE_GCR_EXT_VT_MODE_16	0x00000001
+#define IXGBE_GCR_EXT_VT_MODE_32	0x00000002
+#define IXGBE_GCR_EXT_VT_MODE_64	0x00000003
+#define IXGBE_GCR_EXT_SRIOV		(IXGBE_GCR_EXT_MSIX_EN | \
+					 IXGBE_GCR_EXT_VT_MODE_64)
+#define IXGBE_GCR_EXT_VT_MODE_MASK	0x00000003
+/* Time Sync Registers */
+#define IXGBE_TSYNCRXCTL	0x05188 /* Rx Time Sync Control register - RW */
+#define IXGBE_TSYNCTXCTL	0x08C00 /* Tx Time Sync Control register - RW */
+#define IXGBE_RXSTMPL	0x051E8 /* Rx timestamp Low - RO */
+#define IXGBE_RXSTMPH	0x051A4 /* Rx timestamp High - RO */
+#define IXGBE_RXSATRL	0x051A0 /* Rx timestamp attribute low - RO */
+#define IXGBE_RXSATRH	0x051A8 /* Rx timestamp attribute high - RO */
+#define IXGBE_RXMTRL	0x05120 /* RX message type register low - RW */
+#define IXGBE_TXSTMPL	0x08C04 /* Tx timestamp value Low - RO */
+#define IXGBE_TXSTMPH	0x08C08 /* Tx timestamp value High - RO */
+#define IXGBE_SYSTIML	0x08C0C /* System time register Low - RO */
+#define IXGBE_SYSTIMH	0x08C10 /* System time register High - RO */
+#define IXGBE_SYSTIMR	0x08C58 /* System time register Residue - RO */
+#define IXGBE_TIMINCA	0x08C14 /* Increment attributes register - RW */
+#define IXGBE_TIMADJL	0x08C18 /* Time Adjustment Offset register Low - RW */
+#define IXGBE_TIMADJH	0x08C1C /* Time Adjustment Offset register High - RW */
+#define IXGBE_TSAUXC	0x08C20 /* TimeSync Auxiliary Control register - RW */
+#define IXGBE_TRGTTIML0	0x08C24 /* Target Time Register 0 Low - RW */
+#define IXGBE_TRGTTIMH0	0x08C28 /* Target Time Register 0 High - RW */
+#define IXGBE_TRGTTIML1	0x08C2C /* Target Time Register 1 Low - RW */
+#define IXGBE_TRGTTIMH1	0x08C30 /* Target Time Register 1 High - RW */
+#define IXGBE_CLKTIML	0x08C34 /* Clock Out Time Register Low - RW */
+#define IXGBE_CLKTIMH	0x08C38 /* Clock Out Time Register High - RW */
+#define IXGBE_FREQOUT0	0x08C34 /* Frequency Out 0 Control register - RW */
+#define IXGBE_FREQOUT1	0x08C38 /* Frequency Out 1 Control register - RW */
+#define IXGBE_AUXSTMPL0	0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */
+#define IXGBE_AUXSTMPH0	0x08C40 /* Auxiliary Time Stamp 0 register High - RO */
+#define IXGBE_AUXSTMPL1	0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */
+#define IXGBE_AUXSTMPH1	0x08C48 /* Auxiliary Time Stamp 1 register High - RO */
+#define IXGBE_TSIM	0x08C68 /* TimeSync Interrupt Mask Register - RW */
+#define IXGBE_TSICR	0x08C60 /* TimeSync Interrupt Cause Register - WO */
+#define IXGBE_TSSDP	0x0003C /* TimeSync SDP Configuration Register - RW */
+
+/* Diagnostic Registers */
+#define IXGBE_RDSTATCTL		0x02C20
+#define IXGBE_RDSTAT(_i)	(0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
+#define IXGBE_RDHMPN		0x02F08
+#define IXGBE_RIC_DW(_i)	(0x02F10 + ((_i) * 4))
+#define IXGBE_RDPROBE		0x02F20
+#define IXGBE_RDMAM		0x02F30
+#define IXGBE_RDMAD		0x02F34
+#define IXGBE_TDHMPN		0x07F08
+#define IXGBE_TDHMPN2		0x082FC
+#define IXGBE_TXDESCIC		0x082CC
+#define IXGBE_TIC_DW(_i)	(0x07F10 + ((_i) * 4))
+#define IXGBE_TIC_DW2(_i)	(0x082B0 + ((_i) * 4))
+#define IXGBE_TDPROBE		0x07F20
+#define IXGBE_TXBUFCTRL		0x0C600
+#define IXGBE_TXBUFDATA0	0x0C610
+#define IXGBE_TXBUFDATA1	0x0C614
+#define IXGBE_TXBUFDATA2	0x0C618
+#define IXGBE_TXBUFDATA3	0x0C61C
+#define IXGBE_RXBUFCTRL		0x03600
+#define IXGBE_RXBUFDATA0	0x03610
+#define IXGBE_RXBUFDATA1	0x03614
+#define IXGBE_RXBUFDATA2	0x03618
+#define IXGBE_RXBUFDATA3	0x0361C
+#define IXGBE_PCIE_DIAG(_i)	(0x11090 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_RFVAL		0x050A4
+#define IXGBE_MDFTC1		0x042B8
+#define IXGBE_MDFTC2		0x042C0
+#define IXGBE_MDFTFIFO1		0x042C4
+#define IXGBE_MDFTFIFO2		0x042C8
+#define IXGBE_MDFTS		0x042CC
+#define IXGBE_RXDATAWRPTR(_i)	(0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/
+#define IXGBE_RXDESCWRPTR(_i)	(0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/
+#define IXGBE_RXDATARDPTR(_i)	(0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/
+#define IXGBE_RXDESCRDPTR(_i)	(0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/
+#define IXGBE_TXDATAWRPTR(_i)	(0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/
+#define IXGBE_TXDESCWRPTR(_i)	(0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/
+#define IXGBE_TXDATARDPTR(_i)	(0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/
+#define IXGBE_TXDESCRDPTR(_i)	(0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/
+#define IXGBE_PCIEECCCTL	0x1106C
+#define IXGBE_RXWRPTR(_i)	(0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/
+#define IXGBE_RXUSED(_i)	(0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/
+#define IXGBE_RXRDPTR(_i)	(0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/
+#define IXGBE_RXRDWRPTR(_i)	(0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/
+#define IXGBE_TXWRPTR(_i)	(0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/
+#define IXGBE_TXUSED(_i)	(0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/
+#define IXGBE_TXRDPTR(_i)	(0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/
+#define IXGBE_TXRDWRPTR(_i)	(0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/
+#define IXGBE_PCIEECCCTL0	0x11100
+#define IXGBE_PCIEECCCTL1	0x11104
+#define IXGBE_RXDBUECC		0x03F70
+#define IXGBE_TXDBUECC		0x0CF70
+#define IXGBE_RXDBUEST		0x03F74
+#define IXGBE_TXDBUEST		0x0CF74
+#define IXGBE_PBTXECC		0x0C300
+#define IXGBE_PBRXECC		0x03300
+#define IXGBE_GHECCR		0x110B0
+
+/* MAC Registers */
+#define IXGBE_PCS1GCFIG		0x04200
+#define IXGBE_PCS1GLCTL		0x04208
+#define IXGBE_PCS1GLSTA		0x0420C
+#define IXGBE_PCS1GDBG0		0x04210
+#define IXGBE_PCS1GDBG1		0x04214
+#define IXGBE_PCS1GANA		0x04218
+#define IXGBE_PCS1GANLP		0x0421C
+#define IXGBE_PCS1GANNP		0x04220
+#define IXGBE_PCS1GANLPNP	0x04224
+#define IXGBE_HLREG0		0x04240
+#define IXGBE_HLREG1		0x04244
+#define IXGBE_PAP		0x04248
+#define IXGBE_MACA		0x0424C
+#define IXGBE_APAE		0x04250
+#define IXGBE_ARD		0x04254
+#define IXGBE_AIS		0x04258
+#define IXGBE_MSCA		0x0425C
+#define IXGBE_MSRWD		0x04260
+#define IXGBE_MLADD		0x04264
+#define IXGBE_MHADD		0x04268
+#define IXGBE_MAXFRS		0x04268
+#define IXGBE_TREG		0x0426C
+#define IXGBE_PCSS1		0x04288
+#define IXGBE_PCSS2		0x0428C
+#define IXGBE_XPCSS		0x04290
+#define IXGBE_MFLCN		0x04294
+#define IXGBE_SERDESC		0x04298
+#define IXGBE_MACS		0x0429C
+#define IXGBE_AUTOC		0x042A0
+#define IXGBE_LINKS		0x042A4
+#define IXGBE_LINKS2		0x04324
+#define IXGBE_AUTOC2		0x042A8
+#define IXGBE_AUTOC3		0x042AC
+#define IXGBE_ANLP1		0x042B0
+#define IXGBE_ANLP2		0x042B4
+#define IXGBE_MACC		0x04330
+#define IXGBE_ATLASCTL		0x04800
+#define IXGBE_MMNGC		0x042D0
+#define IXGBE_ANLPNP1		0x042D4
+#define IXGBE_ANLPNP2		0x042D8
+#define IXGBE_KRPCSFC		0x042E0
+#define IXGBE_KRPCSS		0x042E4
+#define IXGBE_FECS1		0x042E8
+#define IXGBE_FECS2		0x042EC
+#define IXGBE_SMADARCTL		0x14F10
+#define IXGBE_MPVC		0x04318
+#define IXGBE_SGMIIC		0x04314
+
+/* Statistics Registers */
+#define IXGBE_RXNFGPC		0x041B0
+#define IXGBE_RXNFGBCL		0x041B4
+#define IXGBE_RXNFGBCH		0x041B8
+#define IXGBE_RXDGPC		0x02F50
+#define IXGBE_RXDGBCL		0x02F54
+#define IXGBE_RXDGBCH		0x02F58
+#define IXGBE_RXDDGPC		0x02F5C
+#define IXGBE_RXDDGBCL		0x02F60
+#define IXGBE_RXDDGBCH		0x02F64
+#define IXGBE_RXLPBKGPC		0x02F68
+#define IXGBE_RXLPBKGBCL	0x02F6C
+#define IXGBE_RXLPBKGBCH	0x02F70
+#define IXGBE_RXDLPBKGPC	0x02F74
+#define IXGBE_RXDLPBKGBCL	0x02F78
+#define IXGBE_RXDLPBKGBCH	0x02F7C
+#define IXGBE_TXDGPC		0x087A0
+#define IXGBE_TXDGBCL		0x087A4
+#define IXGBE_TXDGBCH		0x087A8
+
+#define IXGBE_RXDSTATCTRL	0x02F40
+
+/* Copper Pond 2 link timeout */
+#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50
+
+/* Omer CORECTL */
+#define IXGBE_CORECTL			0x014F00
+/* BARCTRL */
+#define IXGBE_BARCTRL			0x110F4
+#define IXGBE_BARCTRL_FLSIZE		0x0700
+#define IXGBE_BARCTRL_FLSIZE_SHIFT	8
+#define IXGBE_BARCTRL_CSRSIZE		0x2000
+
+/* RSCCTL Bit Masks */
+#define IXGBE_RSCCTL_RSCEN	0x01
+#define IXGBE_RSCCTL_MAXDESC_1	0x00
+#define IXGBE_RSCCTL_MAXDESC_4	0x04
+#define IXGBE_RSCCTL_MAXDESC_8	0x08
+#define IXGBE_RSCCTL_MAXDESC_16	0x0C
+#define IXGBE_RSCCTL_TS_DIS	0x02
+
+/* RSCDBU Bit Masks */
+#define IXGBE_RSCDBU_RSCSMALDIS_MASK	0x0000007F
+#define IXGBE_RSCDBU_RSCACKDIS		0x00000080
+
+/* RDRXCTL Bit Masks */
+#define IXGBE_RDRXCTL_RDMTS_1_2		0x00000000 /* Rx Desc Min THLD Size */
+#define IXGBE_RDRXCTL_CRCSTRIP		0x00000002 /* CRC Strip */
+#define IXGBE_RDRXCTL_PSP		0x00000004 /* Pad Small Packet */
+#define IXGBE_RDRXCTL_MVMEN		0x00000020
+#define IXGBE_RDRXCTL_RSC_PUSH_DIS	0x00000020
+#define IXGBE_RDRXCTL_DMAIDONE		0x00000008 /* DMA init cycle done */
+#define IXGBE_RDRXCTL_RSC_PUSH		0x00000080
+#define IXGBE_RDRXCTL_AGGDIS		0x00010000 /* Aggregation disable */
+#define IXGBE_RDRXCTL_RSCFRSTSIZE	0x003E0000 /* RSC First packet size */
+#define IXGBE_RDRXCTL_RSCLLIDIS		0x00800000 /* Disable RSC compl on LLI*/
+#define IXGBE_RDRXCTL_RSCACKC		0x02000000 /* must set 1 when RSC ena */
+#define IXGBE_RDRXCTL_FCOE_WRFIX	0x04000000 /* must set 1 when RSC ena */
+#define IXGBE_RDRXCTL_MBINTEN		0x10000000
+#define IXGBE_RDRXCTL_MDP_EN		0x20000000
+
+/* RQTC Bit Masks and Shifts */
+#define IXGBE_RQTC_SHIFT_TC(_i)	((_i) * 4)
+#define IXGBE_RQTC_TC0_MASK	(0x7 << 0)
+#define IXGBE_RQTC_TC1_MASK	(0x7 << 4)
+#define IXGBE_RQTC_TC2_MASK	(0x7 << 8)
+#define IXGBE_RQTC_TC3_MASK	(0x7 << 12)
+#define IXGBE_RQTC_TC4_MASK	(0x7 << 16)
+#define IXGBE_RQTC_TC5_MASK	(0x7 << 20)
+#define IXGBE_RQTC_TC6_MASK	(0x7 << 24)
+#define IXGBE_RQTC_TC7_MASK	(0x7 << 28)
+
+/* PSRTYPE.RQPL Bit masks and shift */
+#define IXGBE_PSRTYPE_RQPL_MASK		0x7
+#define IXGBE_PSRTYPE_RQPL_SHIFT	29
+
+/* CTRL Bit Masks */
+#define IXGBE_CTRL_GIO_DIS	0x00000004 /* Global IO Master Disable bit */
+#define IXGBE_CTRL_LNK_RST	0x00000008 /* Link Reset. Resets everything. */
+#define IXGBE_CTRL_RST		0x04000000 /* Reset (SW) */
+#define IXGBE_CTRL_RST_MASK	(IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
+
+/* FACTPS */
+#define IXGBE_FACTPS_MNGCG	0x20000000 /* Manageblility Clock Gated */
+#define IXGBE_FACTPS_LFS	0x40000000 /* LAN Function Select */
+
+/* MHADD Bit Masks */
+#define IXGBE_MHADD_MFS_MASK	0xFFFF0000
+#define IXGBE_MHADD_MFS_SHIFT	16
+
+/* Extended Device Control */
+#define IXGBE_CTRL_EXT_PFRSTD	0x00004000 /* Physical Function Reset Done */
+#define IXGBE_CTRL_EXT_NS_DIS	0x00010000 /* No Snoop disable */
+#define IXGBE_CTRL_EXT_RO_DIS	0x00020000 /* Relaxed Ordering disable */
+#define IXGBE_CTRL_EXT_DRV_LOAD	0x10000000 /* Driver loaded bit for FW */
+
+/* Direct Cache Access (DCA) definitions */
+#define IXGBE_DCA_CTRL_DCA_ENABLE	0x00000000 /* DCA Enable */
+#define IXGBE_DCA_CTRL_DCA_DISABLE	0x00000001 /* DCA Disable */
+
+#define IXGBE_DCA_CTRL_DCA_MODE_CB1	0x00 /* DCA Mode CB1 */
+#define IXGBE_DCA_CTRL_DCA_MODE_CB2	0x02 /* DCA Mode CB2 */
+
+#define IXGBE_DCA_RXCTRL_CPUID_MASK	0x0000001F /* Rx CPUID Mask */
+#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599	0xFF000000 /* Rx CPUID Mask */
+#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599	24 /* Rx CPUID Shift */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN	(1 << 5) /* Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN	(1 << 6) /* Rx Desc header ena */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN	(1 << 7) /* Rx Desc payload ena */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN	(1 << 9) /* Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN	(1 << 13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN	(1 << 15) /* Rx wr header RO */
+
+#define IXGBE_DCA_TXCTRL_CPUID_MASK	0x0000001F /* Tx CPUID Mask */
+#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599	0xFF000000 /* Tx CPUID Mask */
+#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599	24 /* Tx CPUID Shift */
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN	(1 << 5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN	(1 << 9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN	(1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN	(1 << 13) /* Tx rd data Relax Order */
+#define IXGBE_DCA_MAX_QUEUES_82598	16 /* DCA regs only on 16 queues */
+
+/* MSCA Bit Masks */
+#define IXGBE_MSCA_NP_ADDR_MASK		0x0000FFFF /* MDI Addr (new prot) */
+#define IXGBE_MSCA_NP_ADDR_SHIFT	0
+#define IXGBE_MSCA_DEV_TYPE_MASK	0x001F0000 /* Dev Type (new prot) */
+#define IXGBE_MSCA_DEV_TYPE_SHIFT	16 /* Register Address (old prot */
+#define IXGBE_MSCA_PHY_ADDR_MASK	0x03E00000 /* PHY Address mask */
+#define IXGBE_MSCA_PHY_ADDR_SHIFT	21 /* PHY Address shift*/
+#define IXGBE_MSCA_OP_CODE_MASK		0x0C000000 /* OP CODE mask */
+#define IXGBE_MSCA_OP_CODE_SHIFT	26 /* OP CODE shift */
+#define IXGBE_MSCA_ADDR_CYCLE		0x00000000 /* OP CODE 00 (addr cycle) */
+#define IXGBE_MSCA_WRITE		0x04000000 /* OP CODE 01 (wr) */
+#define IXGBE_MSCA_READ			0x0C000000 /* OP CODE 11 (rd) */
+#define IXGBE_MSCA_READ_AUTOINC		0x08000000 /* OP CODE 10 (rd auto inc)*/
+#define IXGBE_MSCA_ST_CODE_MASK		0x30000000 /* ST Code mask */
+#define IXGBE_MSCA_ST_CODE_SHIFT	28 /* ST Code shift */
+#define IXGBE_MSCA_NEW_PROTOCOL		0x00000000 /* ST CODE 00 (new prot) */
+#define IXGBE_MSCA_OLD_PROTOCOL		0x10000000 /* ST CODE 01 (old prot) */
+#define IXGBE_MSCA_MDI_COMMAND		0x40000000 /* Initiate MDI command */
+#define IXGBE_MSCA_MDI_IN_PROG_EN	0x80000000 /* MDI in progress ena */
+
+/* MSRWD bit masks */
+#define IXGBE_MSRWD_WRITE_DATA_MASK	0x0000FFFF
+#define IXGBE_MSRWD_WRITE_DATA_SHIFT	0
+#define IXGBE_MSRWD_READ_DATA_MASK	0xFFFF0000
+#define IXGBE_MSRWD_READ_DATA_SHIFT	16
+
+/* Atlas registers */
+#define IXGBE_ATLAS_PDN_LPBK		0x24
+#define IXGBE_ATLAS_PDN_10G		0xB
+#define IXGBE_ATLAS_PDN_1G		0xC
+#define IXGBE_ATLAS_PDN_AN		0xD
+
+/* Atlas bit masks */
+#define IXGBE_ATLASCTL_WRITE_CMD	0x00010000
+#define IXGBE_ATLAS_PDN_TX_REG_EN	0x10
+#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL	0xF0
+#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL	0xF0
+#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL	0xF0
+
+/* Omer bit masks */
+#define IXGBE_CORECTL_WRITE_CMD		0x00010000
+
+/* Device Type definitions for new protocol MDIO commands */
+#define IXGBE_MDIO_PMA_PMD_DEV_TYPE		0x1
+#define IXGBE_MDIO_PCS_DEV_TYPE			0x3
+#define IXGBE_MDIO_PHY_XS_DEV_TYPE		0x4
+#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE		0x7
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE	0x1E   /* Device 30 */
+#define IXGBE_TWINAX_DEV			1
+
+#define IXGBE_MDIO_COMMAND_TIMEOUT	100 /* PHY Timeout for 1 GB mode */
+
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL		0x0 /* VS1 Ctrl Reg */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS		0x1 /* VS1 Status Reg */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS	0x0008 /* 1 = Link Up */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS	0x0010 /* 0-10G, 1-1G */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED		0x0018
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED		0x0010
+
+#define IXGBE_MDIO_AUTO_NEG_CONTROL	0x0 /* AUTO_NEG Control Reg */
+#define IXGBE_MDIO_AUTO_NEG_STATUS	0x1 /* AUTO_NEG Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT	0xC800 /* AUTO_NEG Vendor Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_ADVT	0x10 /* AUTO_NEG Advt Reg */
+#define IXGBE_MDIO_AUTO_NEG_LP		0x13 /* AUTO_NEG LP Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT	0x3C /* AUTO_NEG EEE Advt Reg */
+#define IXGBE_AUTO_NEG_10GBASE_EEE_ADVT	0x8  /* AUTO NEG EEE 10GBaseT Advt */
+#define IXGBE_AUTO_NEG_1000BASE_EEE_ADVT 0x4  /* AUTO NEG EEE 1000BaseT Advt */
+#define IXGBE_AUTO_NEG_100BASE_EEE_ADVT	0x2  /* AUTO NEG EEE 100BaseT Advt */
+#define IXGBE_MDIO_PHY_XS_CONTROL	0x0 /* PHY_XS Control Reg */
+#define IXGBE_MDIO_PHY_XS_RESET		0x8000 /* PHY_XS Reset */
+#define IXGBE_MDIO_PHY_ID_HIGH		0x2 /* PHY ID High Reg*/
+#define IXGBE_MDIO_PHY_ID_LOW		0x3 /* PHY ID Low Reg*/
+#define IXGBE_MDIO_PHY_SPEED_ABILITY	0x4 /* Speed Ability Reg */
+#define IXGBE_MDIO_PHY_SPEED_10G	0x0001 /* 10G capable */
+#define IXGBE_MDIO_PHY_SPEED_1G		0x0010 /* 1G capable */
+#define IXGBE_MDIO_PHY_SPEED_100M	0x0020 /* 100M capable */
+#define IXGBE_MDIO_PHY_EXT_ABILITY	0xB /* Ext Ability Reg */
+#define IXGBE_MDIO_PHY_10GBASET_ABILITY		0x0004 /* 10GBaseT capable */
+#define IXGBE_MDIO_PHY_1000BASET_ABILITY	0x0020 /* 1000BaseT capable */
+#define IXGBE_MDIO_PHY_100BASETX_ABILITY	0x0080 /* 100BaseTX capable */
+#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE	0x0800 /* Set low power mode */
+
+#define IXGBE_MDIO_TX_VENDOR_ALARMS_3		0xCC02 /* Vendor Alarms 3 Reg */
+#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK	0x3 /* PHY Reset Complete Mask */
+#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */
+#define IXGBE_MDIO_POWER_UP_STALL		0x8000 /* Power Up Stall */
+
+#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR	0x0000 /* PMA/PMD Control Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR	0xC30A /* PHY_XS SDA/SCL Addr Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA	0xC30B /* PHY_XS SDA/SCL Data Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT	0xC30C /* PHY_XS SDA/SCL Status Reg */
+#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Transmit Dis Reg */
+#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Transmit Dis */
+
+#define IXGBE_PCRC8ECL		0x0E810 /* PCR CRC-8 Error Count Lo */
+#define IXGBE_PCRC8ECH		0x0E811 /* PCR CRC-8 Error Count Hi */
+#define IXGBE_PCRC8ECH_MASK	0x1F
+#define IXGBE_LDPCECL		0x0E820 /* PCR Uncorrected Error Count Lo */
+#define IXGBE_LDPCECH		0x0E821 /* PCR Uncorrected Error Count Hi */
+
+/* MII clause 22/28 definitions */
+#define IXGBE_MDIO_PHY_LOW_POWER_MODE	0x0800
+
+#define IXGBE_MDIO_XENPAK_LASI_STATUS		0x9005 /* XENPAK LASI Status register*/
+#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM	0x1 /* Link Status Alarm change */
+
+#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS		0x4 /* Indicates if link is up */
+
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK		0x7 /* Speed/Duplex Mask */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF	0x0 /* 10Mb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL	0x1 /* 10Mb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF	0x2 /* 100Mb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL	0x3 /* 100Mb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF	0x4 /* 1Gb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL	0x5 /* 1Gb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF	0x6 /* 10Gb/s Half Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL	0x7 /* 10Gb/s Full Duplex */
+
+#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG	0x20   /* 10G Control Reg */
+#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
+#define IXGBE_MII_AUTONEG_XNP_TX_REG		0x17   /* 1G XNP Transmit */
+#define IXGBE_MII_AUTONEG_ADVERTISE_REG		0x10   /* 100M Advertisement */
+#define IXGBE_MII_10GBASE_T_ADVERTISE		0x1000 /* full duplex, bit:12*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX	0x4000 /* full duplex, bit:14*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE		0x8000 /* full duplex, bit:15*/
+#define IXGBE_MII_2_5GBASE_T_ADVERTISE		0x0400
+#define IXGBE_MII_5GBASE_T_ADVERTISE		0x0800
+#define IXGBE_MII_100BASE_T_ADVERTISE		0x0100 /* full duplex, bit:8 */
+#define IXGBE_MII_100BASE_T_ADVERTISE_HALF	0x0080 /* half duplex, bit:7 */
+#define IXGBE_MII_RESTART			0x200
+#define IXGBE_MII_AUTONEG_COMPLETE		0x20
+#define IXGBE_MII_AUTONEG_LINK_UP		0x04
+#define IXGBE_MII_AUTONEG_REG			0x0
+
+#define IXGBE_PHY_REVISION_MASK		0xFFFFFFF0
+#define IXGBE_MAX_PHY_ADDR		32
+
+/* PHY IDs*/
+#define TN1010_PHY_ID	0x00A19410
+#define TNX_FW_REV	0xB
+#define X540_PHY_ID	0x01540200
+#define X550_PHY_ID	0x01540220
+#define X557_PHY_ID	0x01540240
+#define AQ_FW_REV	0x20
+#define QT2022_PHY_ID	0x0043A400
+#define ATH_PHY_ID	0x03429050
+
+/* PHY Types */
+#define IXGBE_M88E1145_E_PHY_ID	0x01410CD0
+
+/* Special PHY Init Routine */
+#define IXGBE_PHY_INIT_OFFSET_NL	0x002B
+#define IXGBE_PHY_INIT_END_NL		0xFFFF
+#define IXGBE_CONTROL_MASK_NL		0xF000
+#define IXGBE_DATA_MASK_NL		0x0FFF
+#define IXGBE_CONTROL_SHIFT_NL		12
+#define IXGBE_DELAY_NL			0
+#define IXGBE_DATA_NL			1
+#define IXGBE_CONTROL_NL		0x000F
+#define IXGBE_CONTROL_EOL_NL		0x0FFF
+#define IXGBE_CONTROL_SOL_NL		0x0000
+
+/* General purpose Interrupt Enable */
+#define IXGBE_SDP0_GPIEN	0x00000001 /* SDP0 */
+#define IXGBE_SDP1_GPIEN	0x00000002 /* SDP1 */
+#define IXGBE_SDP2_GPIEN	0x00000004 /* SDP2 */
+#define IXGBE_SDP0_GPIEN_X540	0x00000002 /* SDP0 on X540 and X550 */
+#define IXGBE_SDP1_GPIEN_X540	0x00000004 /* SDP1 on X540 and X550 */
+#define IXGBE_SDP2_GPIEN_X540	0x00000008 /* SDP2 on X540 and X550 */
+#define IXGBE_SDP0_GPIEN_BY_MAC(_hw) ((_hw)->mac.type >= ixgbe_mac_X540 ? \
+				      IXGBE_SDP0_GPIEN_X540 : IXGBE_SDP0_GPIEN)
+#define IXGBE_SDP1_GPIEN_BY_MAC(_hw) ((_hw)->mac.type >= ixgbe_mac_X540 ? \
+				      IXGBE_SDP1_GPIEN_X540 : IXGBE_SDP1_GPIEN)
+#define IXGBE_SDP2_GPIEN_BY_MAC(_hw) ((_hw)->mac.type >= ixgbe_mac_X540 ? \
+				      IXGBE_SDP2_GPIEN_X540 : IXGBE_SDP2_GPIEN)
+#define IXGBE_GPIE_MSIX_MODE	0x00000010 /* MSI-X mode */
+#define IXGBE_GPIE_OCD		0x00000020 /* Other Clear Disable */
+#define IXGBE_GPIE_EIMEN	0x00000040 /* Immediate Interrupt Enable */
+#define IXGBE_GPIE_EIAME	0x40000000
+#define IXGBE_GPIE_PBA_SUPPORT	0x80000000
+#define IXGBE_GPIE_RSC_DELAY_SHIFT	11
+#define IXGBE_GPIE_VTMODE_MASK	0x0000C000 /* VT Mode Mask */
+#define IXGBE_GPIE_VTMODE_16	0x00004000 /* 16 VFs 8 queues per VF */
+#define IXGBE_GPIE_VTMODE_32	0x00008000 /* 32 VFs 4 queues per VF */
+#define IXGBE_GPIE_VTMODE_64	0x0000C000 /* 64 VFs 2 queues per VF */
+
+/* Packet Buffer Initialization */
+#define IXGBE_MAX_PACKET_BUFFERS	8
+
+#define IXGBE_TXPBSIZE_20KB	0x00005000 /* 20KB Packet Buffer */
+#define IXGBE_TXPBSIZE_40KB	0x0000A000 /* 40KB Packet Buffer */
+#define IXGBE_RXPBSIZE_48KB	0x0000C000 /* 48KB Packet Buffer */
+#define IXGBE_RXPBSIZE_64KB	0x00010000 /* 64KB Packet Buffer */
+#define IXGBE_RXPBSIZE_80KB	0x00014000 /* 80KB Packet Buffer */
+#define IXGBE_RXPBSIZE_128KB	0x00020000 /* 128KB Packet Buffer */
+#define IXGBE_RXPBSIZE_MAX	0x00080000 /* 512KB Packet Buffer */
+#define IXGBE_TXPBSIZE_MAX	0x00028000 /* 160KB Packet Buffer */
+
+#define IXGBE_TXPKT_SIZE_MAX	0xA /* Max Tx Packet size */
+#define IXGBE_MAX_PB		8
+
+/* Packet buffer allocation strategies */
+enum {
+	PBA_STRATEGY_EQUAL	= 0, /* Distribute PB space equally */
+#define PBA_STRATEGY_EQUAL	PBA_STRATEGY_EQUAL
+	PBA_STRATEGY_WEIGHTED	= 1, /* Weight front half of TCs */
+#define PBA_STRATEGY_WEIGHTED	PBA_STRATEGY_WEIGHTED
+};
+
+/* Transmit Flow Control status */
+#define IXGBE_TFCS_TXOFF	0x00000001
+#define IXGBE_TFCS_TXOFF0	0x00000100
+#define IXGBE_TFCS_TXOFF1	0x00000200
+#define IXGBE_TFCS_TXOFF2	0x00000400
+#define IXGBE_TFCS_TXOFF3	0x00000800
+#define IXGBE_TFCS_TXOFF4	0x00001000
+#define IXGBE_TFCS_TXOFF5	0x00002000
+#define IXGBE_TFCS_TXOFF6	0x00004000
+#define IXGBE_TFCS_TXOFF7	0x00008000
+
+/* TCP Timer */
+#define IXGBE_TCPTIMER_KS		0x00000100
+#define IXGBE_TCPTIMER_COUNT_ENABLE	0x00000200
+#define IXGBE_TCPTIMER_COUNT_FINISH	0x00000400
+#define IXGBE_TCPTIMER_LOOP		0x00000800
+#define IXGBE_TCPTIMER_DURATION_MASK	0x000000FF
+
+/* HLREG0 Bit Masks */
+#define IXGBE_HLREG0_TXCRCEN		0x00000001 /* bit  0 */
+#define IXGBE_HLREG0_RXCRCSTRP		0x00000002 /* bit  1 */
+#define IXGBE_HLREG0_JUMBOEN		0x00000004 /* bit  2 */
+#define IXGBE_HLREG0_TXPADEN		0x00000400 /* bit 10 */
+#define IXGBE_HLREG0_TXPAUSEEN		0x00001000 /* bit 12 */
+#define IXGBE_HLREG0_RXPAUSEEN		0x00004000 /* bit 14 */
+#define IXGBE_HLREG0_LPBK		0x00008000 /* bit 15 */
+#define IXGBE_HLREG0_MDCSPD		0x00010000 /* bit 16 */
+#define IXGBE_HLREG0_CONTMDC		0x00020000 /* bit 17 */
+#define IXGBE_HLREG0_CTRLFLTR		0x00040000 /* bit 18 */
+#define IXGBE_HLREG0_PREPEND		0x00F00000 /* bits 20-23 */
+#define IXGBE_HLREG0_PRIPAUSEEN		0x01000000 /* bit 24 */
+#define IXGBE_HLREG0_RXPAUSERECDA	0x06000000 /* bits 25-26 */
+#define IXGBE_HLREG0_RXLNGTHERREN	0x08000000 /* bit 27 */
+#define IXGBE_HLREG0_RXPADSTRIPEN	0x10000000 /* bit 28 */
+
+/* VMD_CTL bitmasks */
+#define IXGBE_VMD_CTL_VMDQ_EN		0x00000001
+#define IXGBE_VMD_CTL_VMDQ_FILTER	0x00000002
+
+/* VT_CTL bitmasks */
+#define IXGBE_VT_CTL_DIS_DEFPL		0x20000000 /* disable default pool */
+#define IXGBE_VT_CTL_REPLEN		0x40000000 /* replication enabled */
+#define IXGBE_VT_CTL_VT_ENABLE		0x00000001  /* Enable VT Mode */
+#define IXGBE_VT_CTL_POOL_SHIFT		7
+#define IXGBE_VT_CTL_POOL_MASK		(0x3F << IXGBE_VT_CTL_POOL_SHIFT)
+
+/* VMOLR bitmasks */
+#define IXGBE_VMOLR_AUPE	0x01000000 /* accept untagged packets */
+#define IXGBE_VMOLR_ROMPE	0x02000000 /* accept packets in MTA tbl */
+#define IXGBE_VMOLR_ROPE	0x04000000 /* accept packets in UC tbl */
+#define IXGBE_VMOLR_BAM		0x08000000 /* accept broadcast packets */
+#define IXGBE_VMOLR_MPE		0x10000000 /* multicast promiscuous */
+
+/* VFRE bitmask */
+#define IXGBE_VFRE_ENABLE_ALL	0xFFFFFFFF
+
+#define IXGBE_VF_INIT_TIMEOUT	200 /* Number of retries to clear RSTI */
+
+/* RDHMPN and TDHMPN bitmasks */
+#define IXGBE_RDHMPN_RDICADDR		0x007FF800
+#define IXGBE_RDHMPN_RDICRDREQ		0x00800000
+#define IXGBE_RDHMPN_RDICADDR_SHIFT	11
+#define IXGBE_TDHMPN_TDICADDR		0x003FF800
+#define IXGBE_TDHMPN_TDICRDREQ		0x00800000
+#define IXGBE_TDHMPN_TDICADDR_SHIFT	11
+
+#define IXGBE_RDMAM_MEM_SEL_SHIFT		13
+#define IXGBE_RDMAM_DWORD_SHIFT			9
+#define IXGBE_RDMAM_DESC_COMP_FIFO		1
+#define IXGBE_RDMAM_DFC_CMD_FIFO		2
+#define IXGBE_RDMAM_RSC_HEADER_ADDR		3
+#define IXGBE_RDMAM_TCN_STATUS_RAM		4
+#define IXGBE_RDMAM_WB_COLL_FIFO		5
+#define IXGBE_RDMAM_QSC_CNT_RAM			6
+#define IXGBE_RDMAM_QSC_FCOE_RAM		7
+#define IXGBE_RDMAM_QSC_QUEUE_CNT		8
+#define IXGBE_RDMAM_QSC_QUEUE_RAM		0xA
+#define IXGBE_RDMAM_QSC_RSC_RAM			0xB
+#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE		135
+#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT		4
+#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE		48
+#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT		7
+#define IXGBE_RDMAM_RSC_HEADER_ADDR_RANGE	32
+#define IXGBE_RDMAM_RSC_HEADER_ADDR_COUNT	4
+#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE	256
+#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT	9
+#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE		8
+#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT		4
+#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE		64
+#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT		4
+#define IXGBE_RDMAM_QSC_FCOE_RAM_RANGE		512
+#define IXGBE_RDMAM_QSC_FCOE_RAM_COUNT		5
+#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE		32
+#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT		4
+#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE		128
+#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT		8
+#define IXGBE_RDMAM_QSC_RSC_RAM_RANGE		32
+#define IXGBE_RDMAM_QSC_RSC_RAM_COUNT		8
+
+#define IXGBE_TXDESCIC_READY	0x80000000
+
+/* Receive Checksum Control */
+#define IXGBE_RXCSUM_IPPCSE	0x00001000 /* IP payload checksum enable */
+#define IXGBE_RXCSUM_PCSD	0x00002000 /* packet checksum disabled */
+
+/* FCRTL Bit Masks */
+#define IXGBE_FCRTL_XONE	0x80000000 /* XON enable */
+#define IXGBE_FCRTH_FCEN	0x80000000 /* Packet buffer fc enable */
+
+/* PAP bit masks*/
+#define IXGBE_PAP_TXPAUSECNT_MASK	0x0000FFFF /* Pause counter mask */
+
+/* RMCS Bit Masks */
+#define IXGBE_RMCS_RRM			0x00000002 /* Rx Recycle Mode enable */
+/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
+#define IXGBE_RMCS_RAC			0x00000004
+/* Deficit Fixed Prio ena */
+#define IXGBE_RMCS_DFP			IXGBE_RMCS_RAC
+#define IXGBE_RMCS_TFCE_802_3X		0x00000008 /* Tx Priority FC ena */
+#define IXGBE_RMCS_TFCE_PRIORITY	0x00000010 /* Tx Priority FC ena */
+#define IXGBE_RMCS_ARBDIS		0x00000040 /* Arbitration disable bit */
+
+/* FCCFG Bit Masks */
+#define IXGBE_FCCFG_TFCE_802_3X		0x00000008 /* Tx link FC enable */
+#define IXGBE_FCCFG_TFCE_PRIORITY	0x00000010 /* Tx priority FC enable */
+
+/* Interrupt register bitmasks */
+
+/* Extended Interrupt Cause Read */
+#define IXGBE_EICR_RTX_QUEUE	0x0000FFFF /* RTx Queue Interrupt */
+#define IXGBE_EICR_FLOW_DIR	0x00010000 /* FDir Exception */
+#define IXGBE_EICR_RX_MISS	0x00020000 /* Packet Buffer Overrun */
+#define IXGBE_EICR_PCI		0x00040000 /* PCI Exception */
+#define IXGBE_EICR_MAILBOX	0x00080000 /* VF to PF Mailbox Interrupt */
+#define IXGBE_EICR_LSC		0x00100000 /* Link Status Change */
+#define IXGBE_EICR_LINKSEC	0x00200000 /* PN Threshold */
+#define IXGBE_EICR_MNG		0x00400000 /* Manageability Event Interrupt */
+#define IXGBE_EICR_TS		0x00800000 /* Thermal Sensor Event */
+#define IXGBE_EICR_TIMESYNC	0x01000000 /* Timesync Event */
+#define IXGBE_EICR_GPI_SDP0	0x01000000 /* Gen Purpose Interrupt on SDP0 */
+#define IXGBE_EICR_GPI_SDP1	0x02000000 /* Gen Purpose Interrupt on SDP1 */
+#define IXGBE_EICR_GPI_SDP2	0x04000000 /* Gen Purpose Interrupt on SDP2 */
+#define IXGBE_EICR_ECC		0x10000000 /* ECC Error */
+#define IXGBE_EICR_GPI_SDP0_X540 0x02000000 /* Gen Purpose Interrupt on SDP0 */
+#define IXGBE_EICR_GPI_SDP1_X540 0x04000000 /* Gen Purpose Interrupt on SDP1 */
+#define IXGBE_EICR_GPI_SDP2_X540 0x08000000 /* Gen Purpose Interrupt on SDP2 */
+#define IXGBE_EICR_GPI_SDP0_BY_MAC(_hw)	((_hw)->mac.type >= ixgbe_mac_X540 ? \
+					 IXGBE_EICR_GPI_SDP0_X540 : \
+					 IXGBE_EICR_GPI_SDP0)
+#define IXGBE_EICR_GPI_SDP1_BY_MAC(_hw)	((_hw)->mac.type >= ixgbe_mac_X540 ? \
+					 IXGBE_EICR_GPI_SDP1_X540 : \
+					 IXGBE_EICR_GPI_SDP1)
+#define IXGBE_EICR_GPI_SDP2_BY_MAC(_hw)	((_hw)->mac.type >= ixgbe_mac_X540 ? \
+					 IXGBE_EICR_GPI_SDP2_X540 : \
+					 IXGBE_EICR_GPI_SDP2)
+#define IXGBE_EICR_PBUR		0x10000000 /* Packet Buffer Handler Error */
+#define IXGBE_EICR_DHER		0x20000000 /* Descriptor Handler Error */
+#define IXGBE_EICR_TCP_TIMER	0x40000000 /* TCP Timer */
+#define IXGBE_EICR_OTHER	0x80000000 /* Interrupt Cause Active */
+
+/* Extended Interrupt Cause Set */
+#define IXGBE_EICS_RTX_QUEUE	IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EICS_FLOW_DIR	IXGBE_EICR_FLOW_DIR  /* FDir Exception */
+#define IXGBE_EICS_RX_MISS	IXGBE_EICR_RX_MISS   /* Pkt Buffer Overrun */
+#define IXGBE_EICS_PCI		IXGBE_EICR_PCI /* PCI Exception */
+#define IXGBE_EICS_MAILBOX	IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
+#define IXGBE_EICS_LSC		IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICS_MNG		IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EICS_TIMESYNC	IXGBE_EICR_TIMESYNC /* Timesync Event */
+#define IXGBE_EICS_GPI_SDP0	IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP1	IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP2	IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EICS_ECC		IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EICS_GPI_SDP0_BY_MAC(_hw)	IXGBE_EICR_GPI_SDP0_BY_MAC(_hw)
+#define IXGBE_EICS_GPI_SDP1_BY_MAC(_hw)	IXGBE_EICR_GPI_SDP1_BY_MAC(_hw)
+#define IXGBE_EICS_GPI_SDP2_BY_MAC(_hw)	IXGBE_EICR_GPI_SDP2_BY_MAC(_hw)
+#define IXGBE_EICS_PBUR		IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EICS_DHER		IXGBE_EICR_DHER /* Desc Handler Error */
+#define IXGBE_EICS_TCP_TIMER	IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EICS_OTHER	IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Set */
+#define IXGBE_EIMS_RTX_QUEUE	IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMS_FLOW_DIR	IXGBE_EICR_FLOW_DIR /* FDir Exception */
+#define IXGBE_EIMS_RX_MISS	IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
+#define IXGBE_EIMS_PCI		IXGBE_EICR_PCI /* PCI Exception */
+#define IXGBE_EIMS_MAILBOX	IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
+#define IXGBE_EIMS_LSC		IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMS_MNG		IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EIMS_TS		IXGBE_EICR_TS /* Thermal Sensor Event */
+#define IXGBE_EIMS_TIMESYNC	IXGBE_EICR_TIMESYNC /* Timesync Event */
+#define IXGBE_EIMS_GPI_SDP0	IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP1	IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP2	IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMS_ECC		IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EIMS_GPI_SDP0_BY_MAC(_hw)	IXGBE_EICR_GPI_SDP0_BY_MAC(_hw)
+#define IXGBE_EIMS_GPI_SDP1_BY_MAC(_hw)	IXGBE_EICR_GPI_SDP1_BY_MAC(_hw)
+#define IXGBE_EIMS_GPI_SDP2_BY_MAC(_hw)	IXGBE_EICR_GPI_SDP2_BY_MAC(_hw)
+#define IXGBE_EIMS_PBUR		IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EIMS_DHER		IXGBE_EICR_DHER /* Descr Handler Error */
+#define IXGBE_EIMS_TCP_TIMER	IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EIMS_OTHER	IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Clear */
+#define IXGBE_EIMC_RTX_QUEUE	IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMC_FLOW_DIR	IXGBE_EICR_FLOW_DIR /* FDir Exception */
+#define IXGBE_EIMC_RX_MISS	IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
+#define IXGBE_EIMC_PCI		IXGBE_EICR_PCI /* PCI Exception */
+#define IXGBE_EIMC_MAILBOX	IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMC_LSC		IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMC_MNG		IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EIMC_TIMESYNC	IXGBE_EICR_TIMESYNC /* Timesync Event */
+#define IXGBE_EIMC_GPI_SDP0	IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP1	IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP2	IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMC_ECC		IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EIMC_GPI_SDP0_BY_MAC(_hw)	IXGBE_EICR_GPI_SDP0_BY_MAC(_hw)
+#define IXGBE_EIMC_GPI_SDP1_BY_MAC(_hw)	IXGBE_EICR_GPI_SDP1_BY_MAC(_hw)
+#define IXGBE_EIMC_GPI_SDP2_BY_MAC(_hw)	IXGBE_EICR_GPI_SDP2_BY_MAC(_hw)
+#define IXGBE_EIMC_PBUR		IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EIMC_DHER		IXGBE_EICR_DHER /* Desc Handler Err */
+#define IXGBE_EIMC_TCP_TIMER	IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EIMC_OTHER	IXGBE_EICR_OTHER /* INT Cause Active */
+
+#define IXGBE_EIMS_ENABLE_MASK ( \
+				IXGBE_EIMS_RTX_QUEUE	| \
+				IXGBE_EIMS_LSC		| \
+				IXGBE_EIMS_TCP_TIMER	| \
+				IXGBE_EIMS_OTHER)
+
+/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
+#define IXGBE_IMIR_PORT_IM_EN	0x00010000  /* TCP port enable */
+#define IXGBE_IMIR_PORT_BP	0x00020000  /* TCP port check bypass */
+#define IXGBE_IMIREXT_SIZE_BP	0x00001000  /* Packet size bypass */
+#define IXGBE_IMIREXT_CTRL_URG	0x00002000  /* Check URG bit in header */
+#define IXGBE_IMIREXT_CTRL_ACK	0x00004000  /* Check ACK bit in header */
+#define IXGBE_IMIREXT_CTRL_PSH	0x00008000  /* Check PSH bit in header */
+#define IXGBE_IMIREXT_CTRL_RST	0x00010000  /* Check RST bit in header */
+#define IXGBE_IMIREXT_CTRL_SYN	0x00020000  /* Check SYN bit in header */
+#define IXGBE_IMIREXT_CTRL_FIN	0x00040000  /* Check FIN bit in header */
+#define IXGBE_IMIREXT_CTRL_BP	0x00080000  /* Bypass check of control bits */
+#define IXGBE_IMIR_SIZE_BP_82599	0x00001000 /* Packet size bypass */
+#define IXGBE_IMIR_CTRL_URG_82599	0x00002000 /* Check URG bit in header */
+#define IXGBE_IMIR_CTRL_ACK_82599	0x00004000 /* Check ACK bit in header */
+#define IXGBE_IMIR_CTRL_PSH_82599	0x00008000 /* Check PSH bit in header */
+#define IXGBE_IMIR_CTRL_RST_82599	0x00010000 /* Check RST bit in header */
+#define IXGBE_IMIR_CTRL_SYN_82599	0x00020000 /* Check SYN bit in header */
+#define IXGBE_IMIR_CTRL_FIN_82599	0x00040000 /* Check FIN bit in header */
+#define IXGBE_IMIR_CTRL_BP_82599	0x00080000 /* Bypass chk of ctrl bits */
+#define IXGBE_IMIR_LLI_EN_82599		0x00100000 /* Enables low latency Int */
+#define IXGBE_IMIR_RX_QUEUE_MASK_82599	0x0000007F /* Rx Queue Mask */
+#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599	21 /* Rx Queue Shift */
+#define IXGBE_IMIRVP_PRIORITY_MASK	0x00000007 /* VLAN priority mask */
+#define IXGBE_IMIRVP_PRIORITY_EN	0x00000008 /* VLAN priority enable */
+
+#define IXGBE_MAX_FTQF_FILTERS		128
+#define IXGBE_FTQF_PROTOCOL_MASK	0x00000003
+#define IXGBE_FTQF_PROTOCOL_TCP		0x00000000
+#define IXGBE_FTQF_PROTOCOL_UDP		0x00000001
+#define IXGBE_FTQF_PROTOCOL_SCTP	2
+#define IXGBE_FTQF_PRIORITY_MASK	0x00000007
+#define IXGBE_FTQF_PRIORITY_SHIFT	2
+#define IXGBE_FTQF_POOL_MASK		0x0000003F
+#define IXGBE_FTQF_POOL_SHIFT		8
+#define IXGBE_FTQF_5TUPLE_MASK_MASK	0x0000001F
+#define IXGBE_FTQF_5TUPLE_MASK_SHIFT	25
+#define IXGBE_FTQF_SOURCE_ADDR_MASK	0x1E
+#define IXGBE_FTQF_DEST_ADDR_MASK	0x1D
+#define IXGBE_FTQF_SOURCE_PORT_MASK	0x1B
+#define IXGBE_FTQF_DEST_PORT_MASK	0x17
+#define IXGBE_FTQF_PROTOCOL_COMP_MASK	0x0F
+#define IXGBE_FTQF_POOL_MASK_EN		0x40000000
+#define IXGBE_FTQF_QUEUE_ENABLE		0x80000000
+
+/* Interrupt clear mask */
+#define IXGBE_IRQ_CLEAR_MASK	0xFFFFFFFF
+
+/* Interrupt Vector Allocation Registers */
+#define IXGBE_IVAR_REG_NUM		25
+#define IXGBE_IVAR_REG_NUM_82599	64
+#define IXGBE_IVAR_TXRX_ENTRY		96
+#define IXGBE_IVAR_RX_ENTRY		64
+#define IXGBE_IVAR_RX_QUEUE(_i)		(0 + (_i))
+#define IXGBE_IVAR_TX_QUEUE(_i)		(64 + (_i))
+#define IXGBE_IVAR_TX_ENTRY		32
+
+#define IXGBE_IVAR_TCP_TIMER_INDEX	96 /* 0 based index */
+#define IXGBE_IVAR_OTHER_CAUSES_INDEX	97 /* 0 based index */
+
+#define IXGBE_MSIX_VECTOR(_i)		(0 + (_i))
+
+#define IXGBE_IVAR_ALLOC_VAL		0x80 /* Interrupt Allocation valid */
+
+/* ETYPE Queue Filter/Select Bit Masks */
+#define IXGBE_MAX_ETQF_FILTERS		8
+#define IXGBE_ETQF_FCOE			0x08000000 /* bit 27 */
+#define IXGBE_ETQF_BCN			0x10000000 /* bit 28 */
+#define IXGBE_ETQF_TX_ANTISPOOF		0x20000000 /* bit 29 */
+#define IXGBE_ETQF_1588			0x40000000 /* bit 30 */
+#define IXGBE_ETQF_FILTER_EN		0x80000000 /* bit 31 */
+#define IXGBE_ETQF_POOL_ENABLE		(1 << 26) /* bit 26 */
+#define IXGBE_ETQF_POOL_SHIFT		20
+
+#define IXGBE_ETQS_RX_QUEUE		0x007F0000 /* bits 22:16 */
+#define IXGBE_ETQS_RX_QUEUE_SHIFT	16
+#define IXGBE_ETQS_LLI			0x20000000 /* bit 29 */
+#define IXGBE_ETQS_QUEUE_EN		0x80000000 /* bit 31 */
+
+/*
+ * ETQF filter list: one static filter per filter consumer. This is
+ *		   to avoid filter collisions later. Add new filters
+ *		   here!!
+ *
+ * Current filters:
+ *	EAPOL 802.1x (0x888e): Filter 0
+ *	FCoE (0x8906):	 Filter 2
+ *	1588 (0x88f7):	 Filter 3
+ *	FIP  (0x8914):	 Filter 4
+ *	LLDP (0x88CC):	 Filter 5
+ *	LACP (0x8809):	 Filter 6
+ */
+#define IXGBE_ETQF_FILTER_EAPOL		0
+#define IXGBE_ETQF_FILTER_FCOE		2
+#define IXGBE_ETQF_FILTER_1588		3
+#define IXGBE_ETQF_FILTER_FIP		4
+#define IXGBE_ETQF_FILTER_LLDP		5
+#define IXGBE_ETQF_FILTER_LACP		6
+/* VLAN Control Bit Masks */
+#define IXGBE_VLNCTRL_VET		0x0000FFFF  /* bits 0-15 */
+#define IXGBE_VLNCTRL_CFI		0x10000000  /* bit 28 */
+#define IXGBE_VLNCTRL_CFIEN		0x20000000  /* bit 29 */
+#define IXGBE_VLNCTRL_VFE		0x40000000  /* bit 30 */
+#define IXGBE_VLNCTRL_VME		0x80000000  /* bit 31 */
+
+/* VLAN pool filtering masks */
+#define IXGBE_VLVF_VIEN			0x80000000  /* filter is valid */
+#define IXGBE_VLVF_ENTRIES		64
+#define IXGBE_VLVF_VLANID_MASK		0x00000FFF
+/* Per VF Port VLAN insertion rules */
+#define IXGBE_VMVIR_VLANA_DEFAULT	0x40000000 /* Always use default VLAN */
+#define IXGBE_VMVIR_VLANA_NEVER		0x80000000 /* Never insert VLAN tag */
+
+#define IXGBE_ETHERNET_IEEE_VLAN_TYPE	0x8100  /* 802.1q protocol */
+
+/* STATUS Bit Masks */
+#define IXGBE_STATUS_LAN_ID		0x0000000C /* LAN ID */
+#define IXGBE_STATUS_LAN_ID_SHIFT	2 /* LAN ID Shift*/
+#define IXGBE_STATUS_GIO		0x00080000 /* GIO Master Ena Status */
+
+#define IXGBE_STATUS_LAN_ID_0	0x00000000 /* LAN ID 0 */
+#define IXGBE_STATUS_LAN_ID_1	0x00000004 /* LAN ID 1 */
+
+/* ESDP Bit Masks */
+#define IXGBE_ESDP_SDP0		0x00000001 /* SDP0 Data Value */
+#define IXGBE_ESDP_SDP1		0x00000002 /* SDP1 Data Value */
+#define IXGBE_ESDP_SDP2		0x00000004 /* SDP2 Data Value */
+#define IXGBE_ESDP_SDP3		0x00000008 /* SDP3 Data Value */
+#define IXGBE_ESDP_SDP4		0x00000010 /* SDP4 Data Value */
+#define IXGBE_ESDP_SDP5		0x00000020 /* SDP5 Data Value */
+#define IXGBE_ESDP_SDP6		0x00000040 /* SDP6 Data Value */
+#define IXGBE_ESDP_SDP7		0x00000080 /* SDP7 Data Value */
+#define IXGBE_ESDP_SDP0_DIR	0x00000100 /* SDP0 IO direction */
+#define IXGBE_ESDP_SDP1_DIR	0x00000200 /* SDP1 IO direction */
+#define IXGBE_ESDP_SDP2_DIR	0x00000400 /* SDP1 IO direction */
+#define IXGBE_ESDP_SDP3_DIR	0x00000800 /* SDP3 IO direction */
+#define IXGBE_ESDP_SDP4_DIR	0x00001000 /* SDP4 IO direction */
+#define IXGBE_ESDP_SDP5_DIR	0x00002000 /* SDP5 IO direction */
+#define IXGBE_ESDP_SDP6_DIR	0x00004000 /* SDP6 IO direction */
+#define IXGBE_ESDP_SDP7_DIR	0x00008000 /* SDP7 IO direction */
+#define IXGBE_ESDP_SDP0_NATIVE	0x00010000 /* SDP0 IO mode */
+#define IXGBE_ESDP_SDP1_NATIVE	0x00020000 /* SDP1 IO mode */
+
+
+/* LEDCTL Bit Masks */
+#define IXGBE_LED_IVRT_BASE		0x00000040
+#define IXGBE_LED_BLINK_BASE		0x00000080
+#define IXGBE_LED_MODE_MASK_BASE	0x0000000F
+#define IXGBE_LED_OFFSET(_base, _i)	(_base << (8 * (_i)))
+#define IXGBE_LED_MODE_SHIFT(_i)	(8*(_i))
+#define IXGBE_LED_IVRT(_i)	IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
+#define IXGBE_LED_BLINK(_i)	IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
+#define IXGBE_LED_MODE_MASK(_i)	IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
+
+/* LED modes */
+#define IXGBE_LED_LINK_UP	0x0
+#define IXGBE_LED_LINK_10G	0x1
+#define IXGBE_LED_MAC		0x2
+#define IXGBE_LED_FILTER	0x3
+#define IXGBE_LED_LINK_ACTIVE	0x4
+#define IXGBE_LED_LINK_1G	0x5
+#define IXGBE_LED_ON		0xE
+#define IXGBE_LED_OFF		0xF
+
+/* AUTOC Bit Masks */
+#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000
+#define IXGBE_AUTOC_KX4_SUPP	0x80000000
+#define IXGBE_AUTOC_KX_SUPP	0x40000000
+#define IXGBE_AUTOC_PAUSE	0x30000000
+#define IXGBE_AUTOC_ASM_PAUSE	0x20000000
+#define IXGBE_AUTOC_SYM_PAUSE	0x10000000
+#define IXGBE_AUTOC_RF		0x08000000
+#define IXGBE_AUTOC_PD_TMR	0x06000000
+#define IXGBE_AUTOC_AN_RX_LOOSE	0x01000000
+#define IXGBE_AUTOC_AN_RX_DRIFT	0x00800000
+#define IXGBE_AUTOC_AN_RX_ALIGN	0x007C0000
+#define IXGBE_AUTOC_FECA	0x00040000
+#define IXGBE_AUTOC_FECR	0x00020000
+#define IXGBE_AUTOC_KR_SUPP	0x00010000
+#define IXGBE_AUTOC_AN_RESTART	0x00001000
+#define IXGBE_AUTOC_FLU		0x00000001
+#define IXGBE_AUTOC_LMS_SHIFT	13
+#define IXGBE_AUTOC_LMS_10G_SERIAL	(0x3 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR	(0x4 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_SGMII_1G_100M	(0x5 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN	(0x6 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII	(0x7 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_MASK		(0x7 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN	(0x0 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN	(0x1 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_AN		(0x2 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN		(0x4 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN	(0x6 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_ATTACH_TYPE	(0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+
+#define IXGBE_AUTOC_1G_PMA_PMD_MASK	0x00000200
+#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT	9
+#define IXGBE_AUTOC_10G_PMA_PMD_MASK	0x00000180
+#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT	7
+#define IXGBE_AUTOC_10G_XAUI	(0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_KX4	(0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_CX4	(0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_BX	(0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX	(0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_SFI	(0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX_BX	(0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+
+#define IXGBE_AUTOC2_UPPER_MASK	0xFFFF0000
+#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK	0x00030000
+#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT	16
+#define IXGBE_AUTOC2_10G_KR	(0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_XFI	(0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_SFI	(0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK	0x50000000
+#define IXGBE_AUTOC2_LINK_DISABLE_MASK		0x70000000
+
+#define IXGBE_MACC_FLU		0x00000001
+#define IXGBE_MACC_FSV_10G	0x00030000
+#define IXGBE_MACC_FS		0x00040000
+#define IXGBE_MAC_RX2TX_LPBK	0x00000002
+
+/* Veto Bit definiton */
+#define IXGBE_MMNGC_MNG_VETO	0x00000001
+
+/* LINKS Bit Masks */
+#define IXGBE_LINKS_KX_AN_COMP	0x80000000
+#define IXGBE_LINKS_UP		0x40000000
+#define IXGBE_LINKS_SPEED	0x20000000
+#define IXGBE_LINKS_MODE	0x18000000
+#define IXGBE_LINKS_RX_MODE	0x06000000
+#define IXGBE_LINKS_TX_MODE	0x01800000
+#define IXGBE_LINKS_XGXS_EN	0x00400000
+#define IXGBE_LINKS_SGMII_EN	0x02000000
+#define IXGBE_LINKS_PCS_1G_EN	0x00200000
+#define IXGBE_LINKS_1G_AN_EN	0x00100000
+#define IXGBE_LINKS_KX_AN_IDLE	0x00080000
+#define IXGBE_LINKS_1G_SYNC	0x00040000
+#define IXGBE_LINKS_10G_ALIGN	0x00020000
+#define IXGBE_LINKS_10G_LANE_SYNC	0x00017000
+#define IXGBE_LINKS_TL_FAULT		0x00001000
+#define IXGBE_LINKS_SIGNAL		0x00000F00
+
+#define IXGBE_LINKS_SPEED_NON_STD	0x08000000
+#define IXGBE_LINKS_SPEED_82599		0x30000000
+#define IXGBE_LINKS_SPEED_10G_82599	0x30000000
+#define IXGBE_LINKS_SPEED_1G_82599	0x20000000
+#define IXGBE_LINKS_SPEED_100_82599	0x10000000
+#define IXGBE_LINK_UP_TIME		90 /* 9.0 Seconds */
+#define IXGBE_AUTO_NEG_TIME		45 /* 4.5 Seconds */
+
+#define IXGBE_LINKS2_AN_SUPPORTED	0x00000040
+
+/* PCS1GLSTA Bit Masks */
+#define IXGBE_PCS1GLSTA_LINK_OK		1
+#define IXGBE_PCS1GLSTA_SYNK_OK		0x10
+#define IXGBE_PCS1GLSTA_AN_COMPLETE	0x10000
+#define IXGBE_PCS1GLSTA_AN_PAGE_RX	0x20000
+#define IXGBE_PCS1GLSTA_AN_TIMED_OUT	0x40000
+#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT	0x80000
+#define IXGBE_PCS1GLSTA_AN_ERROR_RWS	0x100000
+
+#define IXGBE_PCS1GANA_SYM_PAUSE	0x80
+#define IXGBE_PCS1GANA_ASM_PAUSE	0x100
+
+/* PCS1GLCTL Bit Masks */
+#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */
+#define IXGBE_PCS1GLCTL_FLV_LINK_UP	1
+#define IXGBE_PCS1GLCTL_FORCE_LINK	0x20
+#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH	0x40
+#define IXGBE_PCS1GLCTL_AN_ENABLE	0x10000
+#define IXGBE_PCS1GLCTL_AN_RESTART	0x20000
+
+/* ANLP1 Bit Masks */
+#define IXGBE_ANLP1_PAUSE		0x0C00
+#define IXGBE_ANLP1_SYM_PAUSE		0x0400
+#define IXGBE_ANLP1_ASM_PAUSE		0x0800
+#define IXGBE_ANLP1_AN_STATE_MASK	0x000f0000
+
+/* SW Semaphore Register bitmasks */
+#define IXGBE_SWSM_SMBI		0x00000001 /* Driver Semaphore bit */
+#define IXGBE_SWSM_SWESMBI	0x00000002 /* FW Semaphore bit */
+#define IXGBE_SWSM_WMNG		0x00000004 /* Wake MNG Clock */
+#define IXGBE_SWFW_REGSMP	0x80000000 /* Register Semaphore bit 31 */
+
+/* SW_FW_SYNC/GSSR definitions */
+#define IXGBE_GSSR_EEP_SM		0x0001
+#define IXGBE_GSSR_PHY0_SM		0x0002
+#define IXGBE_GSSR_PHY1_SM		0x0004
+#define IXGBE_GSSR_MAC_CSR_SM		0x0008
+#define IXGBE_GSSR_FLASH_SM		0x0010
+#define IXGBE_GSSR_NVM_UPDATE_SM	0x0200
+#define IXGBE_GSSR_SW_MNG_SM		0x0400
+#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys and both I2Cs */
+#define IXGBE_GSSR_I2C_MASK	0x1800
+#define IXGBE_GSSR_NVM_PHY_MASK	0xF
+
+/* FW Status register bitmask */
+#define IXGBE_FWSTS_FWRI	0x00000200 /* Firmware Reset Indication */
+
+/* EEC Register */
+#define IXGBE_EEC_SK		0x00000001 /* EEPROM Clock */
+#define IXGBE_EEC_CS		0x00000002 /* EEPROM Chip Select */
+#define IXGBE_EEC_DI		0x00000004 /* EEPROM Data In */
+#define IXGBE_EEC_DO		0x00000008 /* EEPROM Data Out */
+#define IXGBE_EEC_FWE_MASK	0x00000030 /* FLASH Write Enable */
+#define IXGBE_EEC_FWE_DIS	0x00000010 /* Disable FLASH writes */
+#define IXGBE_EEC_FWE_EN	0x00000020 /* Enable FLASH writes */
+#define IXGBE_EEC_FWE_SHIFT	4
+#define IXGBE_EEC_REQ		0x00000040 /* EEPROM Access Request */
+#define IXGBE_EEC_GNT		0x00000080 /* EEPROM Access Grant */
+#define IXGBE_EEC_PRES		0x00000100 /* EEPROM Present */
+#define IXGBE_EEC_ARD		0x00000200 /* EEPROM Auto Read Done */
+#define IXGBE_EEC_FLUP		0x00800000 /* Flash update command */
+#define IXGBE_EEC_SEC1VAL	0x02000000 /* Sector 1 Valid */
+#define IXGBE_EEC_FLUDONE	0x04000000 /* Flash update done */
+/* EEPROM Addressing bits based on type (0-small, 1-large) */
+#define IXGBE_EEC_ADDR_SIZE	0x00000400
+#define IXGBE_EEC_SIZE		0x00007800 /* EEPROM Size */
+#define IXGBE_EERD_MAX_ADDR	0x00003FFF /* EERD alows 14 bits for addr. */
+
+#define IXGBE_EEC_SIZE_SHIFT		11
+#define IXGBE_EEPROM_WORD_SIZE_SHIFT	6
+#define IXGBE_EEPROM_OPCODE_BITS	8
+
+/* FLA Register */
+#define IXGBE_FLA_LOCKED	0x00000040
+
+/* Part Number String Length */
+#define IXGBE_PBANUM_LENGTH	11
+
+/* Checksum and EEPROM pointers */
+#define IXGBE_PBANUM_PTR_GUARD		0xFAFA
+#define IXGBE_EEPROM_CHECKSUM		0x3F
+#define IXGBE_EEPROM_SUM		0xBABA
+#define IXGBE_PCIE_ANALOG_PTR		0x03
+#define IXGBE_ATLAS0_CONFIG_PTR		0x04
+#define IXGBE_PHY_PTR			0x04
+#define IXGBE_ATLAS1_CONFIG_PTR		0x05
+#define IXGBE_OPTION_ROM_PTR		0x05
+#define IXGBE_PCIE_GENERAL_PTR		0x06
+#define IXGBE_PCIE_CONFIG0_PTR		0x07
+#define IXGBE_PCIE_CONFIG1_PTR		0x08
+#define IXGBE_CORE0_PTR			0x09
+#define IXGBE_CORE1_PTR			0x0A
+#define IXGBE_MAC0_PTR			0x0B
+#define IXGBE_MAC1_PTR			0x0C
+#define IXGBE_CSR0_CONFIG_PTR		0x0D
+#define IXGBE_CSR1_CONFIG_PTR		0x0E
+#define IXGBE_PCIE_ANALOG_PTR_X550	0x02
+#define IXGBE_SHADOW_RAM_SIZE_X550	0x4000
+#define IXGBE_IXGBE_PCIE_GENERAL_SIZE	0x24
+#define IXGBE_PCIE_CONFIG_SIZE		0x08
+#define IXGBE_EEPROM_LAST_WORD		0x41
+#define IXGBE_FW_PTR			0x0F
+#define IXGBE_PBANUM0_PTR		0x15
+#define IXGBE_PBANUM1_PTR		0x16
+#define IXGBE_ALT_MAC_ADDR_PTR		0x37
+#define IXGBE_FREE_SPACE_PTR		0X3E
+
+/* External Thermal Sensor Config */
+#define IXGBE_ETS_CFG			0x26
+#define IXGBE_ETS_LTHRES_DELTA_MASK	0x07C0
+#define IXGBE_ETS_LTHRES_DELTA_SHIFT	6
+#define IXGBE_ETS_TYPE_MASK		0x0038
+#define IXGBE_ETS_TYPE_SHIFT		3
+#define IXGBE_ETS_TYPE_EMC		0x000
+#define IXGBE_ETS_NUM_SENSORS_MASK	0x0007
+#define IXGBE_ETS_DATA_LOC_MASK		0x3C00
+#define IXGBE_ETS_DATA_LOC_SHIFT	10
+#define IXGBE_ETS_DATA_INDEX_MASK	0x0300
+#define IXGBE_ETS_DATA_INDEX_SHIFT	8
+#define IXGBE_ETS_DATA_HTHRESH_MASK	0x00FF
+
+#define IXGBE_SAN_MAC_ADDR_PTR		0x28
+#define IXGBE_DEVICE_CAPS		0x2C
+#define IXGBE_SERIAL_NUMBER_MAC_ADDR	0x11
+#define IXGBE_PCIE_MSIX_82599_CAPS	0x72
+#define IXGBE_MAX_MSIX_VECTORS_82599	0x40
+#define IXGBE_PCIE_MSIX_82598_CAPS	0x62
+#define IXGBE_MAX_MSIX_VECTORS_82598	0x13
+
+/* MSI-X capability fields masks */
+#define IXGBE_PCIE_MSIX_TBL_SZ_MASK	0x7FF
+
+/* Legacy EEPROM word offsets */
+#define IXGBE_ISCSI_BOOT_CAPS		0x0033
+#define IXGBE_ISCSI_SETUP_PORT_0	0x0030
+#define IXGBE_ISCSI_SETUP_PORT_1	0x0034
+
+/* EEPROM Commands - SPI */
+#define IXGBE_EEPROM_MAX_RETRY_SPI	5000 /* Max wait 5ms for RDY signal */
+#define IXGBE_EEPROM_STATUS_RDY_SPI	0x01
+#define IXGBE_EEPROM_READ_OPCODE_SPI	0x03  /* EEPROM read opcode */
+#define IXGBE_EEPROM_WRITE_OPCODE_SPI	0x02  /* EEPROM write opcode */
+#define IXGBE_EEPROM_A8_OPCODE_SPI	0x08  /* opcode bit-3 = addr bit-8 */
+#define IXGBE_EEPROM_WREN_OPCODE_SPI	0x06  /* EEPROM set Write Ena latch */
+/* EEPROM reset Write Enable latch */
+#define IXGBE_EEPROM_WRDI_OPCODE_SPI	0x04
+#define IXGBE_EEPROM_RDSR_OPCODE_SPI	0x05  /* EEPROM read Status reg */
+#define IXGBE_EEPROM_WRSR_OPCODE_SPI	0x01  /* EEPROM write Status reg */
+#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI	0x20  /* EEPROM ERASE 4KB */
+#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI	0xD8  /* EEPROM ERASE 64KB */
+#define IXGBE_EEPROM_ERASE256_OPCODE_SPI	0xDB  /* EEPROM ERASE 256B */
+
+/* EEPROM Read Register */
+#define IXGBE_EEPROM_RW_REG_DATA	16 /* data offset in EEPROM read reg */
+#define IXGBE_EEPROM_RW_REG_DONE	2 /* Offset to READ done bit */
+#define IXGBE_EEPROM_RW_REG_START	1 /* First bit to start operation */
+#define IXGBE_EEPROM_RW_ADDR_SHIFT	2 /* Shift to the address bits */
+#define IXGBE_NVM_POLL_WRITE		1 /* Flag for polling for wr complete */
+#define IXGBE_NVM_POLL_READ		0 /* Flag for polling for rd complete */
+
+#define IXGBE_ETH_LENGTH_OF_ADDRESS	6
+
+#define IXGBE_EEPROM_PAGE_SIZE_MAX	128
+#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT	256 /* words rd in burst */
+#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT	256 /* words wr in burst */
+#define IXGBE_EEPROM_CTRL_2		1 /* EEPROM CTRL word 2 */
+#define IXGBE_EEPROM_CCD_BIT		2
+
+#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
+#define IXGBE_EEPROM_GRANT_ATTEMPTS	1000 /* EEPROM attempts to gain grant */
+#endif
+
+/* Number of 5 microseconds we wait for EERD read and
+ * EERW write to complete */
+#define IXGBE_EERD_EEWR_ATTEMPTS	100000
+
+/* # attempts we wait for flush update to complete */
+#define IXGBE_FLUDONE_ATTEMPTS		20000
+
+#define IXGBE_PCIE_CTRL2		0x5   /* PCIe Control 2 Offset */
+#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE	0x8   /* Dummy Function Enable */
+#define IXGBE_PCIE_CTRL2_LAN_DISABLE	0x2   /* LAN PCI Disable */
+#define IXGBE_PCIE_CTRL2_DISABLE_SELECT	0x1   /* LAN Disable Select */
+
+#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET		0x0
+#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET		0x3
+#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP		0x1
+#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS		0x2
+#define IXGBE_FW_LESM_PARAMETERS_PTR		0x2
+#define IXGBE_FW_LESM_STATE_1			0x1
+#define IXGBE_FW_LESM_STATE_ENABLED		0x8000 /* LESM Enable bit */
+#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR	0x4
+#define IXGBE_FW_PATCH_VERSION_4		0x7
+#define IXGBE_FCOE_IBA_CAPS_BLK_PTR		0x33 /* iSCSI/FCOE block */
+#define IXGBE_FCOE_IBA_CAPS_FCOE		0x20 /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_BLK_PTR		0x17 /* iSCSI/FCOE block */
+#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET		0x0 /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE		0x1 /* FCOE flags enable bit */
+#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR		0x27 /* Alt. SAN MAC block */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET	0x0 /* Alt SAN MAC capability */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET	0x1 /* Alt SAN MAC 0 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET	0x4 /* Alt SAN MAC 1 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET	0x7 /* Alt WWNN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET	0x8 /* Alt WWPN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC	0x0 /* Alt SAN MAC exists */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN	0x1 /* Alt WWN base exists */
+
+/* FW header offset */
+#define IXGBE_X540_FW_PASSTHROUGH_PATCH_CONFIG_PTR	0x4
+#define IXGBE_X540_FW_MODULE_MASK			0x7FFF
+/* 4KB multiplier */
+#define IXGBE_X540_FW_MODULE_LENGTH			0x1000
+/* version word 2 (month & day) */
+#define IXGBE_X540_FW_PATCH_VERSION_2		0x5
+/* version word 3 (silicon compatibility & year) */
+#define IXGBE_X540_FW_PATCH_VERSION_3		0x6
+/* version word 4 (major & minor numbers) */
+#define IXGBE_X540_FW_PATCH_VERSION_4		0x7
+
+#define IXGBE_DEVICE_CAPS_WOL_PORT0_1	0x4 /* WoL supported on ports 0 & 1 */
+#define IXGBE_DEVICE_CAPS_WOL_PORT0	0x8 /* WoL supported on port 0 */
+#define IXGBE_DEVICE_CAPS_WOL_MASK	0xC /* Mask for WoL capabilities */
+
+/* PCI Bus Info */
+#define IXGBE_PCI_DEVICE_STATUS		0xAA
+#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING	0x0020
+#define IXGBE_PCI_LINK_STATUS		0xB2
+#define IXGBE_PCI_DEVICE_CONTROL2	0xC8
+#define IXGBE_PCI_LINK_WIDTH		0x3F0
+#define IXGBE_PCI_LINK_WIDTH_1		0x10
+#define IXGBE_PCI_LINK_WIDTH_2		0x20
+#define IXGBE_PCI_LINK_WIDTH_4		0x40
+#define IXGBE_PCI_LINK_WIDTH_8		0x80
+#define IXGBE_PCI_LINK_SPEED		0xF
+#define IXGBE_PCI_LINK_SPEED_2500	0x1
+#define IXGBE_PCI_LINK_SPEED_5000	0x2
+#define IXGBE_PCI_LINK_SPEED_8000	0x3
+#define IXGBE_PCI_HEADER_TYPE_REGISTER	0x0E
+#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC	0x80
+#define IXGBE_PCI_DEVICE_CONTROL2_16ms	0x0005
+
+#define IXGBE_PCIDEVCTRL2_TIMEO_MASK	0xf
+#define IXGBE_PCIDEVCTRL2_16_32ms_def	0x0
+#define IXGBE_PCIDEVCTRL2_50_100us	0x1
+#define IXGBE_PCIDEVCTRL2_1_2ms		0x2
+#define IXGBE_PCIDEVCTRL2_16_32ms	0x5
+#define IXGBE_PCIDEVCTRL2_65_130ms	0x6
+#define IXGBE_PCIDEVCTRL2_260_520ms	0x9
+#define IXGBE_PCIDEVCTRL2_1_2s		0xa
+#define IXGBE_PCIDEVCTRL2_4_8s		0xd
+#define IXGBE_PCIDEVCTRL2_17_34s	0xe
+
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT	800
+
+/* Check whether address is multicast. This is little-endian specific check.*/
+#define IXGBE_IS_MULTICAST(Address) \
+		(bool)(((u8 *)(Address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define IXGBE_IS_BROADCAST(Address) \
+		((((u8 *)(Address))[0] == ((u8)0xff)) && \
+		(((u8 *)(Address))[1] == ((u8)0xff)))
+
+/* RAH */
+#define IXGBE_RAH_VIND_MASK	0x003C0000
+#define IXGBE_RAH_VIND_SHIFT	18
+#define IXGBE_RAH_AV		0x80000000
+#define IXGBE_CLEAR_VMDQ_ALL	0xFFFFFFFF
+
+/* Header split receive */
+#define IXGBE_RFCTL_ISCSI_DIS		0x00000001
+#define IXGBE_RFCTL_ISCSI_DWC_MASK	0x0000003E
+#define IXGBE_RFCTL_ISCSI_DWC_SHIFT	1
+#define IXGBE_RFCTL_RSC_DIS		0x00000020
+#define IXGBE_RFCTL_NFSW_DIS		0x00000040
+#define IXGBE_RFCTL_NFSR_DIS		0x00000080
+#define IXGBE_RFCTL_NFS_VER_MASK	0x00000300
+#define IXGBE_RFCTL_NFS_VER_SHIFT	8
+#define IXGBE_RFCTL_NFS_VER_2		0
+#define IXGBE_RFCTL_NFS_VER_3		1
+#define IXGBE_RFCTL_NFS_VER_4		2
+#define IXGBE_RFCTL_IPV6_DIS		0x00000400
+#define IXGBE_RFCTL_IPV6_XSUM_DIS	0x00000800
+#define IXGBE_RFCTL_IPFRSP_DIS		0x00004000
+#define IXGBE_RFCTL_IPV6_EX_DIS		0x00010000
+#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS	0x00020000
+
+/* Transmit Config masks */
+#define IXGBE_TXDCTL_ENABLE		0x02000000 /* Ena specific Tx Queue */
+#define IXGBE_TXDCTL_SWFLSH		0x04000000 /* Tx Desc. wr-bk flushing */
+#define IXGBE_TXDCTL_WTHRESH_SHIFT	16 /* shift to WTHRESH bits */
+/* Enable short packet padding to 64 bytes */
+#define IXGBE_TX_PAD_ENABLE		0x00000400
+#define IXGBE_JUMBO_FRAME_ENABLE	0x00000004  /* Allow jumbo frames */
+/* This allows for 16K packets + 4k for vlan */
+#define IXGBE_MAX_FRAME_SZ		0x40040000
+
+#define IXGBE_TDWBAL_HEAD_WB_ENABLE	0x1 /* Tx head write-back enable */
+#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE	0x2 /* Tx seq# write-back enable */
+
+/* Receive Config masks */
+#define IXGBE_RXCTRL_RXEN		0x00000001 /* Enable Receiver */
+#define IXGBE_RXCTRL_DMBYPS		0x00000002 /* Desc Monitor Bypass */
+#define IXGBE_RXDCTL_ENABLE		0x02000000 /* Ena specific Rx Queue */
+#define IXGBE_RXDCTL_SWFLSH		0x04000000 /* Rx Desc wr-bk flushing */
+#define IXGBE_RXDCTL_RLPMLMASK		0x00003FFF /* X540 supported only */
+#define IXGBE_RXDCTL_RLPML_EN		0x00008000
+#define IXGBE_RXDCTL_VME		0x40000000 /* VLAN mode enable */
+
+#define IXGBE_TSAUXC_EN_CLK		0x00000004
+#define IXGBE_TSAUXC_SYNCLK		0x00000008
+#define IXGBE_TSAUXC_SDP0_INT		0x00000040
+#define IXGBE_TSAUXC_EN_TT0		0x00000001
+#define IXGBE_TSAUXC_EN_TT1		0x00000002
+#define IXGBE_TSAUXC_ST0		0x00000010
+#define IXGBE_TSAUXC_DISABLE_SYSTIME	0x80000000
+
+#define IXGBE_TSSDP_TS_SDP0_SEL_MASK	0x000000C0
+#define IXGBE_TSSDP_TS_SDP0_CLK0	0x00000080
+#define IXGBE_TSSDP_TS_SDP0_EN		0x00000100
+
+#define IXGBE_TSYNCTXCTL_VALID		0x00000001 /* Tx timestamp valid */
+#define IXGBE_TSYNCTXCTL_ENABLED	0x00000010 /* Tx timestamping enabled */
+
+#define IXGBE_TSYNCRXCTL_VALID		0x00000001 /* Rx timestamp valid */
+#define IXGBE_TSYNCRXCTL_TYPE_MASK	0x0000000E /* Rx type mask */
+#define IXGBE_TSYNCRXCTL_TYPE_L2_V2	0x00
+#define IXGBE_TSYNCRXCTL_TYPE_L4_V1	0x02
+#define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2	0x04
+#define IXGBE_TSYNCRXCTL_TYPE_ALL	0x08
+#define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2	0x0A
+#define IXGBE_TSYNCRXCTL_ENABLED	0x00000010 /* Rx Timestamping enabled */
+#define IXGBE_TSYNCRXCTL_TSIP_UT_EN	0x00800000 /* Rx Timestamp in Packet */
+#define IXGBE_TSYNCRXCTL_TSIP_UP_MASK	0xFF000000 /* Rx Timestamp UP Mask */
+
+#define IXGBE_TSIM_SYS_WRAP		0x00000001
+#define IXGBE_TSIM_TXTS			0x00000002
+#define IXGBE_TSIM_TADJ			0x00000080
+
+#define IXGBE_TSICR_SYS_WRAP		IXGBE_TSIM_SYS_WRAP
+#define IXGBE_TSICR_TXTS		IXGBE_TSIM_TXTS
+#define IXGBE_TSICR_TADJ		IXGBE_TSIM_TADJ
+
+#define IXGBE_RXMTRL_V1_CTRLT_MASK	0x000000FF
+#define IXGBE_RXMTRL_V1_SYNC_MSG	0x00
+#define IXGBE_RXMTRL_V1_DELAY_REQ_MSG	0x01
+#define IXGBE_RXMTRL_V1_FOLLOWUP_MSG	0x02
+#define IXGBE_RXMTRL_V1_DELAY_RESP_MSG	0x03
+#define IXGBE_RXMTRL_V1_MGMT_MSG	0x04
+
+#define IXGBE_RXMTRL_V2_MSGID_MASK	0x0000FF00
+#define IXGBE_RXMTRL_V2_SYNC_MSG	0x0000
+#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG	0x0100
+#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG	0x0200
+#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG	0x0300
+#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG	0x0800
+#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG	0x0900
+#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00
+#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG	0x0B00
+#define IXGBE_RXMTRL_V2_SIGNALLING_MSG	0x0C00
+#define IXGBE_RXMTRL_V2_MGMT_MSG	0x0D00
+
+#define IXGBE_FCTRL_SBP		0x00000002 /* Store Bad Packet */
+#define IXGBE_FCTRL_MPE		0x00000100 /* Multicast Promiscuous Ena*/
+#define IXGBE_FCTRL_UPE		0x00000200 /* Unicast Promiscuous Ena */
+#define IXGBE_FCTRL_BAM		0x00000400 /* Broadcast Accept Mode */
+#define IXGBE_FCTRL_PMCF	0x00001000 /* Pass MAC Control Frames */
+#define IXGBE_FCTRL_DPF		0x00002000 /* Discard Pause Frame */
+/* Receive Priority Flow Control Enable */
+#define IXGBE_FCTRL_RPFCE	0x00004000
+#define IXGBE_FCTRL_RFCE	0x00008000 /* Receive Flow Control Ena */
+#define IXGBE_MFLCN_PMCF	0x00000001 /* Pass MAC Control Frames */
+#define IXGBE_MFLCN_DPF		0x00000002 /* Discard Pause Frame */
+#define IXGBE_MFLCN_RPFCE	0x00000004 /* Receive Priority FC Enable */
+#define IXGBE_MFLCN_RFCE	0x00000008 /* Receive FC Enable */
+#define IXGBE_MFLCN_RPFCE_MASK	0x00000FF4 /* Rx Priority FC bitmap mask */
+#define IXGBE_MFLCN_RPFCE_SHIFT	4 /* Rx Priority FC bitmap shift */
+
+/* Multiple Receive Queue Control */
+#define IXGBE_MRQC_RSSEN	0x00000001  /* RSS Enable */
+#define IXGBE_MRQC_MRQE_MASK	0xF /* Bits 3:0 */
+#define IXGBE_MRQC_RT8TCEN	0x00000002 /* 8 TC no RSS */
+#define IXGBE_MRQC_RT4TCEN	0x00000003 /* 4 TC no RSS */
+#define IXGBE_MRQC_RTRSS8TCEN	0x00000004 /* 8 TC w/ RSS */
+#define IXGBE_MRQC_RTRSS4TCEN	0x00000005 /* 4 TC w/ RSS */
+#define IXGBE_MRQC_VMDQEN	0x00000008 /* VMDq2 64 pools no RSS */
+#define IXGBE_MRQC_VMDQRSS32EN	0x0000000A /* VMDq2 32 pools w/ RSS */
+#define IXGBE_MRQC_VMDQRSS64EN	0x0000000B /* VMDq2 64 pools w/ RSS */
+#define IXGBE_MRQC_VMDQRT8TCEN	0x0000000C /* VMDq2/RT 16 pool 8 TC */
+#define IXGBE_MRQC_VMDQRT4TCEN	0x0000000D /* VMDq2/RT 32 pool 4 TC */
+#define IXGBE_MRQC_RSS_FIELD_MASK	0xFFFF0000
+#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP	0x00010000
+#define IXGBE_MRQC_RSS_FIELD_IPV4	0x00020000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX	0x00080000
+#define IXGBE_MRQC_RSS_FIELD_IPV6	0x00100000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP	0x00200000
+#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP	0x00400000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP	0x00800000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
+#define IXGBE_MRQC_MULTIPLE_RSS		0x00002000
+#define IXGBE_MRQC_L3L4TXSWEN		0x00008000
+
+/* Queue Drop Enable */
+#define IXGBE_QDE_ENABLE	0x00000001
+#define IXGBE_QDE_HIDE_VLAN	0x00000002
+#define IXGBE_QDE_IDX_MASK	0x00007F00
+#define IXGBE_QDE_IDX_SHIFT	8
+#define IXGBE_QDE_WRITE		0x00010000
+#define IXGBE_QDE_READ		0x00020000
+
+#define IXGBE_TXD_POPTS_IXSM	0x01 /* Insert IP checksum */
+#define IXGBE_TXD_POPTS_TXSM	0x02 /* Insert TCP/UDP checksum */
+#define IXGBE_TXD_CMD_EOP	0x01000000 /* End of Packet */
+#define IXGBE_TXD_CMD_IFCS	0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IXGBE_TXD_CMD_IC	0x04000000 /* Insert Checksum */
+#define IXGBE_TXD_CMD_RS	0x08000000 /* Report Status */
+#define IXGBE_TXD_CMD_DEXT	0x20000000 /* Desc extension (0 = legacy) */
+#define IXGBE_TXD_CMD_VLE	0x40000000 /* Add VLAN tag */
+#define IXGBE_TXD_STAT_DD	0x00000001 /* Descriptor Done */
+
+#define IXGBE_RXDADV_IPSEC_STATUS_SECP		0x00020000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH	0x10000000
+#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED	0x18000000
+#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK	0x18000000
+/* Multiple Transmit Queue Command Register */
+#define IXGBE_MTQC_RT_ENA	0x1 /* DCB Enable */
+#define IXGBE_MTQC_VT_ENA	0x2 /* VMDQ2 Enable */
+#define IXGBE_MTQC_64Q_1PB	0x0 /* 64 queues 1 pack buffer */
+#define IXGBE_MTQC_32VF		0x8 /* 4 TX Queues per pool w/32VF's */
+#define IXGBE_MTQC_64VF		0x4 /* 2 TX Queues per pool w/64VF's */
+#define IXGBE_MTQC_4TC_4TQ	0x8 /* 4 TC if RT_ENA and VT_ENA */
+#define IXGBE_MTQC_8TC_8TQ	0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
+
+/* Receive Descriptor bit definitions */
+#define IXGBE_RXD_STAT_DD	0x01 /* Descriptor Done */
+#define IXGBE_RXD_STAT_EOP	0x02 /* End of Packet */
+#define IXGBE_RXD_STAT_FLM	0x04 /* FDir Match */
+#define IXGBE_RXD_STAT_VP	0x08 /* IEEE VLAN Packet */
+#define IXGBE_RXDADV_NEXTP_MASK	0x000FFFF0 /* Next Descriptor Index */
+#define IXGBE_RXDADV_NEXTP_SHIFT	0x00000004
+#define IXGBE_RXD_STAT_UDPCS	0x10 /* UDP xsum calculated */
+#define IXGBE_RXD_STAT_L4CS	0x20 /* L4 xsum calculated */
+#define IXGBE_RXD_STAT_IPCS	0x40 /* IP xsum calculated */
+#define IXGBE_RXD_STAT_PIF	0x80 /* passed in-exact filter */
+#define IXGBE_RXD_STAT_CRCV	0x100 /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_OUTERIPCS	0x100 /* Cloud IP xsum calculated */
+#define IXGBE_RXD_STAT_VEXT	0x200 /* 1st VLAN found */
+#define IXGBE_RXD_STAT_UDPV	0x400 /* Valid UDP checksum */
+#define IXGBE_RXD_STAT_DYNINT	0x800 /* Pkt caused INT via DYNINT */
+#define IXGBE_RXD_STAT_LLINT	0x800 /* Pkt caused Low Latency Interrupt */
+#define IXGBE_RXD_STAT_TSIP	0x08000 /* Time Stamp in packet buffer */
+#define IXGBE_RXD_STAT_TS	0x10000 /* Time Stamp */
+#define IXGBE_RXD_STAT_SECP	0x20000 /* Security Processing */
+#define IXGBE_RXD_STAT_LB	0x40000 /* Loopback Status */
+#define IXGBE_RXD_STAT_ACK	0x8000 /* ACK Packet indication */
+#define IXGBE_RXD_ERR_CE	0x01 /* CRC Error */
+#define IXGBE_RXD_ERR_LE	0x02 /* Length Error */
+#define IXGBE_RXD_ERR_PE	0x08 /* Packet Error */
+#define IXGBE_RXD_ERR_OSE	0x10 /* Oversize Error */
+#define IXGBE_RXD_ERR_USE	0x20 /* Undersize Error */
+#define IXGBE_RXD_ERR_TCPE	0x40 /* TCP/UDP Checksum Error */
+#define IXGBE_RXD_ERR_IPE	0x80 /* IP Checksum Error */
+#define IXGBE_RXDADV_ERR_MASK		0xfff00000 /* RDESC.ERRORS mask */
+#define IXGBE_RXDADV_ERR_SHIFT		20 /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_OUTERIPER	0x04000000 /* CRC IP Header error */
+#define IXGBE_RXDADV_ERR_RXE		0x20000000 /* Any MAC Error */
+#define IXGBE_RXDADV_ERR_FCEOFE		0x80000000 /* FCoEFe/IPE */
+#define IXGBE_RXDADV_ERR_FCERR		0x00700000 /* FCERR/FDIRERR */
+#define IXGBE_RXDADV_ERR_FDIR_LEN	0x00100000 /* FDIR Length error */
+#define IXGBE_RXDADV_ERR_FDIR_DROP	0x00200000 /* FDIR Drop error */
+#define IXGBE_RXDADV_ERR_FDIR_COLL	0x00400000 /* FDIR Collision error */
+#define IXGBE_RXDADV_ERR_HBO	0x00800000 /*Header Buffer Overflow */
+#define IXGBE_RXDADV_ERR_CE	0x01000000 /* CRC Error */
+#define IXGBE_RXDADV_ERR_LE	0x02000000 /* Length Error */
+#define IXGBE_RXDADV_ERR_PE	0x08000000 /* Packet Error */
+#define IXGBE_RXDADV_ERR_OSE	0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_USE	0x20000000 /* Undersize Error */
+#define IXGBE_RXDADV_ERR_TCPE	0x40000000 /* TCP/UDP Checksum Error */
+#define IXGBE_RXDADV_ERR_IPE	0x80000000 /* IP Checksum Error */
+#define IXGBE_RXD_VLAN_ID_MASK	0x0FFF  /* VLAN ID is in lower 12 bits */
+#define IXGBE_RXD_PRI_MASK	0xE000  /* Priority is in upper 3 bits */
+#define IXGBE_RXD_PRI_SHIFT	13
+#define IXGBE_RXD_CFI_MASK	0x1000  /* CFI is bit 12 */
+#define IXGBE_RXD_CFI_SHIFT	12
+
+#define IXGBE_RXDADV_STAT_DD		IXGBE_RXD_STAT_DD  /* Done */
+#define IXGBE_RXDADV_STAT_EOP		IXGBE_RXD_STAT_EOP /* End of Packet */
+#define IXGBE_RXDADV_STAT_FLM		IXGBE_RXD_STAT_FLM /* FDir Match */
+#define IXGBE_RXDADV_STAT_VP		IXGBE_RXD_STAT_VP  /* IEEE VLAN Pkt */
+#define IXGBE_RXDADV_STAT_MASK		0x000fffff /* Stat/NEXTP: bit 0-19 */
+#define IXGBE_RXDADV_STAT_FCEOFS	0x00000040 /* FCoE EOF/SOF Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT	0x00000030 /* FCoE Pkt Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH	0x00000000 /* 00: No Ctxt Match */
+#define IXGBE_RXDADV_STAT_FCSTAT_NODDP	0x00000010 /* 01: Ctxt w/o DDP */
+#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP	0x00000020 /* 10: Recv. FCP_RSP */
+#define IXGBE_RXDADV_STAT_FCSTAT_DDP	0x00000030 /* 11: Ctxt w/ DDP */
+#define IXGBE_RXDADV_STAT_TS		0x00010000 /* IEEE1588 Time Stamp */
+#define IXGBE_RXDADV_STAT_TSIP		0x00008000 /* Time Stamp in packet buffer */
+
+/* PSRTYPE bit definitions */
+#define IXGBE_PSRTYPE_TCPHDR	0x00000010
+#define IXGBE_PSRTYPE_UDPHDR	0x00000020
+#define IXGBE_PSRTYPE_IPV4HDR	0x00000100
+#define IXGBE_PSRTYPE_IPV6HDR	0x00000200
+#define IXGBE_PSRTYPE_L2HDR	0x00001000
+
+/* SRRCTL bit definitions */
+#define IXGBE_SRRCTL_BSIZEPKT_SHIFT	10 /* so many KBs */
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT	2 /* 64byte resolution (>> 6)
+					   * + at bit 8 offset (<< 8)
+					   *  = (<< 2)
+					   */
+#define IXGBE_SRRCTL_RDMTS_SHIFT	22
+#define IXGBE_SRRCTL_RDMTS_MASK		0x01C00000
+#define IXGBE_SRRCTL_DROP_EN		0x10000000
+#define IXGBE_SRRCTL_BSIZEPKT_MASK	0x0000007F
+#define IXGBE_SRRCTL_BSIZEHDR_MASK	0x00003F00
+#define IXGBE_SRRCTL_DESCTYPE_LEGACY	0x00000000
+#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT	0x04000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define IXGBE_SRRCTL_DESCTYPE_MASK	0x0E000000
+
+#define IXGBE_RXDPS_HDRSTAT_HDRSP	0x00008000
+#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK	0x000003FF
+
+#define IXGBE_RXDADV_RSSTYPE_MASK	0x0000000F
+#define IXGBE_RXDADV_PKTTYPE_MASK	0x0000FFF0
+#define IXGBE_RXDADV_PKTTYPE_MASK_EX	0x0001FFF0
+#define IXGBE_RXDADV_HDRBUFLEN_MASK	0x00007FE0
+#define IXGBE_RXDADV_RSCCNT_MASK	0x001E0000
+#define IXGBE_RXDADV_RSCCNT_SHIFT	17
+#define IXGBE_RXDADV_HDRBUFLEN_SHIFT	5
+#define IXGBE_RXDADV_SPLITHEADER_EN	0x00001000
+#define IXGBE_RXDADV_SPH		0x8000
+
+/* RSS Hash results */
+#define IXGBE_RXDADV_RSSTYPE_NONE	0x00000000
+#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP	0x00000001
+#define IXGBE_RXDADV_RSSTYPE_IPV4	0x00000002
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP	0x00000003
+#define IXGBE_RXDADV_RSSTYPE_IPV6_EX	0x00000004
+#define IXGBE_RXDADV_RSSTYPE_IPV6	0x00000005
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
+#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP	0x00000007
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP	0x00000008
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
+
+/* RSS Packet Types as indicated in the receive descriptor. */
+#define IXGBE_RXDADV_PKTTYPE_NONE	0x00000000
+#define IXGBE_RXDADV_PKTTYPE_IPV4	0x00000010 /* IPv4 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPV4_EX	0x00000020 /* IPv4 hdr + extensions */
+#define IXGBE_RXDADV_PKTTYPE_IPV6	0x00000040 /* IPv6 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPV6_EX	0x00000080 /* IPv6 hdr + extensions */
+#define IXGBE_RXDADV_PKTTYPE_TCP	0x00000100 /* TCP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_UDP	0x00000200 /* UDP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_SCTP	0x00000400 /* SCTP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_NFS	0x00000800 /* NFS hdr present */
+#define IXGBE_RXDADV_PKTTYPE_VXLAN	0x00000800 /* VXLAN hdr present */
+#define IXGBE_RXDADV_PKTTYPE_TUNNEL	0x00010000 /* Tunnel type */
+#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP	0x00001000 /* IPSec ESP */
+#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH	0x00002000 /* IPSec AH */
+#define IXGBE_RXDADV_PKTTYPE_LINKSEC	0x00004000 /* LinkSec Encap */
+#define IXGBE_RXDADV_PKTTYPE_ETQF	0x00008000 /* PKTTYPE is ETQF index */
+#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK	0x00000070 /* ETQF has 8 indices */
+#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT	4 /* Right-shift 4 bits */
+
+/* Security Processing bit Indication */
+#define IXGBE_RXDADV_LNKSEC_STATUS_SECP		0x00020000
+#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH	0x08000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR	0x10000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK	0x18000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG	0x18000000
+
+/* Masks to determine if packets should be dropped due to frame errors */
+#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
+				IXGBE_RXD_ERR_CE | \
+				IXGBE_RXD_ERR_LE | \
+				IXGBE_RXD_ERR_PE | \
+				IXGBE_RXD_ERR_OSE | \
+				IXGBE_RXD_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
+				IXGBE_RXDADV_ERR_CE | \
+				IXGBE_RXDADV_ERR_LE | \
+				IXGBE_RXDADV_ERR_PE | \
+				IXGBE_RXDADV_ERR_OSE | \
+				IXGBE_RXDADV_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK_82599	IXGBE_RXDADV_ERR_RXE
+
+/* Multicast bit mask */
+#define IXGBE_MCSTCTRL_MFE	0x4
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE	8
+#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE	8
+#define IXGBE_REQ_TX_BUFFER_GRANULARITY		1024
+
+/* Vlan-specific macros */
+#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK	0x0FFF /* VLAN ID in lower 12 bits */
+#define IXGBE_RX_DESC_SPECIAL_PRI_MASK	0xE000 /* Priority in upper 3 bits */
+#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT	0x000D /* Priority in upper 3 of 16 */
+#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT	IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
+
+/* SR-IOV specific macros */
+#define IXGBE_MBVFICR_INDEX(vf_number)	(vf_number >> 4)
+#define IXGBE_MBVFICR(_i)		(0x00710 + ((_i) * 4))
+#define IXGBE_VFLRE(_i)			(((_i & 1) ? 0x001C0 : 0x00600))
+#define IXGBE_VFLREC(_i)		 (0x00700 + ((_i) * 4))
+/* Translated register #defines */
+#define IXGBE_PVFCTRL(P)	(0x00300 + (4 * (P)))
+#define IXGBE_PVFSTATUS(P)	(0x00008 + (0 * (P)))
+#define IXGBE_PVFLINKS(P)	(0x042A4 + (0 * (P)))
+#define IXGBE_PVFRTIMER(P)	(0x00048 + (0 * (P)))
+#define IXGBE_PVFMAILBOX(P)	(0x04C00 + (4 * (P)))
+#define IXGBE_PVFRXMEMWRAP(P)	(0x03190 + (0 * (P)))
+#define IXGBE_PVTEICR(P)	(0x00B00 + (4 * (P)))
+#define IXGBE_PVTEICS(P)	(0x00C00 + (4 * (P)))
+#define IXGBE_PVTEIMS(P)	(0x00D00 + (4 * (P)))
+#define IXGBE_PVTEIMC(P)	(0x00E00 + (4 * (P)))
+#define IXGBE_PVTEIAC(P)	(0x00F00 + (4 * (P)))
+#define IXGBE_PVTEIAM(P)	(0x04D00 + (4 * (P)))
+#define IXGBE_PVTEITR(P)	(((P) < 24) ? (0x00820 + ((P) * 4)) : \
+				 (0x012300 + (((P) - 24) * 4)))
+#define IXGBE_PVTIVAR(P)	(0x12500 + (4 * (P)))
+#define IXGBE_PVTIVAR_MISC(P)	(0x04E00 + (4 * (P)))
+#define IXGBE_PVTRSCINT(P)	(0x12000 + (4 * (P)))
+#define IXGBE_VFPBACL(P)	(0x110C8 + (4 * (P)))
+#define IXGBE_PVFRDBAL(P)	((P < 64) ? (0x01000 + (0x40 * (P))) \
+				 : (0x0D000 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDBAH(P)	((P < 64) ? (0x01004 + (0x40 * (P))) \
+				 : (0x0D004 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDLEN(P)	((P < 64) ? (0x01008 + (0x40 * (P))) \
+				 : (0x0D008 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDH(P)		((P < 64) ? (0x01010 + (0x40 * (P))) \
+				 : (0x0D010 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDT(P)		((P < 64) ? (0x01018 + (0x40 * (P))) \
+				 : (0x0D018 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRXDCTL(P)	((P < 64) ? (0x01028 + (0x40 * (P))) \
+				 : (0x0D028 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFSRRCTL(P)	((P < 64) ? (0x01014 + (0x40 * (P))) \
+				 : (0x0D014 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFPSRTYPE(P)	(0x0EA00 + (4 * (P)))
+#define IXGBE_PVFTDBAL(P)	(0x06000 + (0x40 * (P)))
+#define IXGBE_PVFTDBAH(P)	(0x06004 + (0x40 * (P)))
+#define IXGBE_PVFTTDLEN(P)	(0x06008 + (0x40 * (P)))
+#define IXGBE_PVFTDH(P)		(0x06010 + (0x40 * (P)))
+#define IXGBE_PVFTDT(P)		(0x06018 + (0x40 * (P)))
+#define IXGBE_PVFTXDCTL(P)	(0x06028 + (0x40 * (P)))
+#define IXGBE_PVFTDWBAL(P)	(0x06038 + (0x40 * (P)))
+#define IXGBE_PVFTDWBAH(P)	(0x0603C + (0x40 * (P)))
+#define IXGBE_PVFDCA_RXCTRL(P)	(((P) < 64) ? (0x0100C + (0x40 * (P))) \
+				 : (0x0D00C + (0x40 * ((P) - 64))))
+#define IXGBE_PVFDCA_TXCTRL(P)	(0x0600C + (0x40 * (P)))
+#define IXGBE_PVFGPRC(x)	(0x0101C + (0x40 * (x)))
+#define IXGBE_PVFGPTC(x)	(0x08300 + (0x04 * (x)))
+#define IXGBE_PVFGORC_LSB(x)	(0x01020 + (0x40 * (x)))
+#define IXGBE_PVFGORC_MSB(x)	(0x0D020 + (0x40 * (x)))
+#define IXGBE_PVFGOTC_LSB(x)	(0x08400 + (0x08 * (x)))
+#define IXGBE_PVFGOTC_MSB(x)	(0x08404 + (0x08 * (x)))
+#define IXGBE_PVFMPRC(x)	(0x0D01C + (0x40 * (x)))
+
+#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \
+		(IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index)))
+#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \
+		(IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index)))
+
+#define IXGBE_PVFTDHn(q_per_pool, vf_number, vf_q_index) \
+		(IXGBE_PVFTDH((q_per_pool)*(vf_number) + (vf_q_index)))
+#define IXGBE_PVFTDTn(q_per_pool, vf_number, vf_q_index) \
+		(IXGBE_PVFTDT((q_per_pool)*(vf_number) + (vf_q_index)))
+
+/* Little Endian defines */
+#ifndef __le16
+#define __le16  u16
+#endif
+#ifndef __le32
+#define __le32  u32
+#endif
+#ifndef __le64
+#define __le64  u64
+
+#endif
+#ifndef __be16
+/* Big Endian defines */
+#define __be16  u16
+#define __be32  u32
+#define __be64  u64
+
+#endif
+enum ixgbe_fdir_pballoc_type {
+	IXGBE_FDIR_PBALLOC_NONE = 0,
+	IXGBE_FDIR_PBALLOC_64K  = 1,
+	IXGBE_FDIR_PBALLOC_128K = 2,
+	IXGBE_FDIR_PBALLOC_256K = 3,
+};
+
+/* Flow Director register values */
+#define IXGBE_FDIRCTRL_PBALLOC_64K		0x00000001
+#define IXGBE_FDIRCTRL_PBALLOC_128K		0x00000002
+#define IXGBE_FDIRCTRL_PBALLOC_256K		0x00000003
+#define IXGBE_FDIRCTRL_INIT_DONE		0x00000008
+#define IXGBE_FDIRCTRL_PERFECT_MATCH		0x00000010
+#define IXGBE_FDIRCTRL_REPORT_STATUS		0x00000020
+#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS	0x00000080
+#define IXGBE_FDIRCTRL_DROP_Q_SHIFT		8
+#define IXGBE_FDIRCTRL_FLEX_SHIFT		16
+#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT		21
+#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN	0x0001 /* bit 23:21, 001b */
+#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD		0x0002 /* bit 23:21, 010b */
+#define IXGBE_FDIRCTRL_SEARCHLIM		0x00800000
+#define IXGBE_FDIRCTRL_FILTERMODE_MASK		0x00E00000
+#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT		24
+#define IXGBE_FDIRCTRL_FULL_THRESH_MASK		0xF0000000
+#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT	28
+
+#define IXGBE_FDIRTCPM_DPORTM_SHIFT		16
+#define IXGBE_FDIRUDPM_DPORTM_SHIFT		16
+#define IXGBE_FDIRIP6M_DIPM_SHIFT		16
+#define IXGBE_FDIRM_VLANID			0x00000001
+#define IXGBE_FDIRM_VLANP			0x00000002
+#define IXGBE_FDIRM_POOL			0x00000004
+#define IXGBE_FDIRM_L4P				0x00000008
+#define IXGBE_FDIRM_FLEX			0x00000010
+#define IXGBE_FDIRM_DIPv6			0x00000020
+#define IXGBE_FDIRM_L3P				0x00000040
+
+#define IXGBE_FDIRIP6M_INNER_MAC	0x03F0 /* bit 9:4 */
+#define IXGBE_FDIRIP6M_TUNNEL_TYPE	0x0800 /* bit 11 */
+#define IXGBE_FDIRIP6M_TNI_VNI		0xF000 /* bit 15:12 */
+#define IXGBE_FDIRIP6M_TNI_VNI_24	0x1000 /* bit 12 */
+#define IXGBE_FDIRIP6M_ALWAYS_MASK	0x040F /* bit 10, 3:0 */
+
+#define IXGBE_FDIRFREE_FREE_MASK		0xFFFF
+#define IXGBE_FDIRFREE_FREE_SHIFT		0
+#define IXGBE_FDIRFREE_COLL_MASK		0x7FFF0000
+#define IXGBE_FDIRFREE_COLL_SHIFT		16
+#define IXGBE_FDIRLEN_MAXLEN_MASK		0x3F
+#define IXGBE_FDIRLEN_MAXLEN_SHIFT		0
+#define IXGBE_FDIRLEN_MAXHASH_MASK		0x7FFF0000
+#define IXGBE_FDIRLEN_MAXHASH_SHIFT		16
+#define IXGBE_FDIRUSTAT_ADD_MASK		0xFFFF
+#define IXGBE_FDIRUSTAT_ADD_SHIFT		0
+#define IXGBE_FDIRUSTAT_REMOVE_MASK		0xFFFF0000
+#define IXGBE_FDIRUSTAT_REMOVE_SHIFT		16
+#define IXGBE_FDIRFSTAT_FADD_MASK		0x00FF
+#define IXGBE_FDIRFSTAT_FADD_SHIFT		0
+#define IXGBE_FDIRFSTAT_FREMOVE_MASK		0xFF00
+#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT		8
+#define IXGBE_FDIRPORT_DESTINATION_SHIFT	16
+#define IXGBE_FDIRVLAN_FLEX_SHIFT		16
+#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT	15
+#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT	16
+
+#define IXGBE_FDIRCMD_CMD_MASK			0x00000003
+#define IXGBE_FDIRCMD_CMD_ADD_FLOW		0x00000001
+#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW		0x00000002
+#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT	0x00000003
+#define IXGBE_FDIRCMD_FILTER_VALID		0x00000004
+#define IXGBE_FDIRCMD_FILTER_UPDATE		0x00000008
+#define IXGBE_FDIRCMD_IPv6DMATCH		0x00000010
+#define IXGBE_FDIRCMD_L4TYPE_UDP		0x00000020
+#define IXGBE_FDIRCMD_L4TYPE_TCP		0x00000040
+#define IXGBE_FDIRCMD_L4TYPE_SCTP		0x00000060
+#define IXGBE_FDIRCMD_IPV6			0x00000080
+#define IXGBE_FDIRCMD_CLEARHT			0x00000100
+#define IXGBE_FDIRCMD_DROP			0x00000200
+#define IXGBE_FDIRCMD_INT			0x00000400
+#define IXGBE_FDIRCMD_LAST			0x00000800
+#define IXGBE_FDIRCMD_COLLISION			0x00001000
+#define IXGBE_FDIRCMD_QUEUE_EN			0x00008000
+#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT		5
+#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT		16
+#define IXGBE_FDIRCMD_TUNNEL_FILTER_SHIFT	23
+#define IXGBE_FDIRCMD_VT_POOL_SHIFT		24
+#define IXGBE_FDIR_INIT_DONE_POLL		10
+#define IXGBE_FDIRCMD_CMD_POLL			10
+#define IXGBE_FDIRCMD_TUNNEL_FILTER		0x00800000
+#define IXGBE_FDIR_DROP_QUEUE			127
+
+
+/* Manageablility Host Interface defines */
+#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH	1792 /* Num of bytes in range */
+#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH	448 /* Num of dwords in range */
+#define IXGBE_HI_COMMAND_TIMEOUT	500 /* Process HI command limit */
+#define IXGBE_HI_FLASH_ERASE_TIMEOUT	1000 /* Process Erase command limit */
+#define IXGBE_HI_FLASH_UPDATE_TIMEOUT	5000 /* Process Update command limit */
+#define IXGBE_HI_FLASH_APPLY_TIMEOUT	0 /* Process Apply command limit */
+
+/* CEM Support */
+#define FW_CEM_HDR_LEN			0x4
+#define FW_CEM_CMD_DRIVER_INFO		0xDD
+#define FW_CEM_CMD_DRIVER_INFO_LEN	0x5
+#define FW_CEM_CMD_RESERVED		0X0
+#define FW_CEM_UNUSED_VER		0x0
+#define FW_CEM_MAX_RETRIES		3
+#define FW_CEM_RESP_STATUS_SUCCESS	0x1
+#define FW_READ_SHADOW_RAM_CMD		0x31
+#define FW_READ_SHADOW_RAM_LEN		0x6
+#define FW_WRITE_SHADOW_RAM_CMD		0x33
+#define FW_WRITE_SHADOW_RAM_LEN		0xA /* 8 plus 1 WORD to write */
+#define FW_SHADOW_RAM_DUMP_CMD		0x36
+#define FW_SHADOW_RAM_DUMP_LEN		0
+#define FW_DEFAULT_CHECKSUM		0xFF /* checksum always 0xFF */
+#define FW_NVM_DATA_OFFSET		3
+#define FW_MAX_READ_BUFFER_SIZE		1024
+#define FW_DISABLE_RXEN_CMD		0xDE
+#define FW_DISABLE_RXEN_LEN		0x1
+/* Host Interface Command Structures */
+
+struct ixgbe_hic_hdr {
+	u8 cmd;
+	u8 buf_len;
+	union {
+		u8 cmd_resv;
+		u8 ret_status;
+	} cmd_or_resp;
+	u8 checksum;
+};
+
+struct ixgbe_hic_hdr2_req {
+	u8 cmd;
+	u8 buf_lenh;
+	u8 buf_lenl;
+	u8 checksum;
+};
+
+struct ixgbe_hic_hdr2_rsp {
+	u8 cmd;
+	u8 buf_lenl;
+	u8 buf_lenh_status;	/* 7-5: high bits of buf_len, 4-0: status */
+	u8 checksum;
+};
+
+union ixgbe_hic_hdr2 {
+	struct ixgbe_hic_hdr2_req req;
+	struct ixgbe_hic_hdr2_rsp rsp;
+};
+
+struct ixgbe_hic_drv_info {
+	struct ixgbe_hic_hdr hdr;
+	u8 port_num;
+	u8 ver_sub;
+	u8 ver_build;
+	u8 ver_min;
+	u8 ver_maj;
+	u8 pad; /* end spacing to ensure length is mult. of dword */
+	u16 pad2; /* end spacing to ensure length is mult. of dword2 */
+};
+
+/* These need to be dword aligned */
+struct ixgbe_hic_read_shadow_ram {
+	union ixgbe_hic_hdr2 hdr;
+	u32 address;
+	u16 length;
+	u16 pad2;
+	u16 data;
+	u16 pad3;
+};
+
+struct ixgbe_hic_write_shadow_ram {
+	union ixgbe_hic_hdr2 hdr;
+	u32 address;
+	u16 length;
+	u16 pad2;
+	u16 data;
+	u16 pad3;
+};
+
+struct ixgbe_hic_disable_rxen {
+	struct ixgbe_hic_hdr hdr;
+	u8  port_number;
+	u8  pad2;
+	u16 pad3;
+};
+
+
+/* Transmit Descriptor - Legacy */
+struct ixgbe_legacy_tx_desc {
+	u64 buffer_addr; /* Address of the descriptor's data buffer */
+	union {
+		__le32 data;
+		struct {
+			__le16 length; /* Data buffer length */
+			u8 cso; /* Checksum offset */
+			u8 cmd; /* Descriptor control */
+		} flags;
+	} lower;
+	union {
+		__le32 data;
+		struct {
+			u8 status; /* Descriptor status */
+			u8 css; /* Checksum start */
+			__le16 vlan;
+		} fields;
+	} upper;
+};
+
+/* Transmit Descriptor - Advanced */
+union ixgbe_adv_tx_desc {
+	struct {
+		__le64 buffer_addr; /* Address of descriptor's data buf */
+		__le32 cmd_type_len;
+		__le32 olinfo_status;
+	} read;
+	struct {
+		__le64 rsvd; /* Reserved */
+		__le32 nxtseq_seed;
+		__le32 status;
+	} wb;
+};
+
+/* Receive Descriptor - Legacy */
+struct ixgbe_legacy_rx_desc {
+	__le64 buffer_addr; /* Address of the descriptor's data buffer */
+	__le16 length; /* Length of data DMAed into data buffer */
+	__le16 csum; /* Packet checksum */
+	u8 status;   /* Descriptor status */
+	u8 errors;   /* Descriptor Errors */
+	__le16 vlan;
+};
+
+/* Receive Descriptor - Advanced */
+union ixgbe_adv_rx_desc {
+	struct {
+		__le64 pkt_addr; /* Packet buffer address */
+		__le64 hdr_addr; /* Header buffer address */
+	} read;
+	struct {
+		struct {
+			union {
+				__le32 data;
+				struct {
+					__le16 pkt_info; /* RSS, Pkt type */
+					__le16 hdr_info; /* Splithdr, hdrlen */
+				} hs_rss;
+			} lo_dword;
+			union {
+				__le32 rss; /* RSS Hash */
+				struct {
+					__le16 ip_id; /* IP id */
+					__le16 csum; /* Packet Checksum */
+				} csum_ip;
+			} hi_dword;
+		} lower;
+		struct {
+			__le32 status_error; /* ext status/error */
+			__le16 length; /* Packet length */
+			__le16 vlan; /* VLAN tag */
+		} upper;
+	} wb;  /* writeback */
+};
+
+/* Context descriptors */
+struct ixgbe_adv_tx_context_desc {
+	__le32 vlan_macip_lens;
+	__le32 seqnum_seed;
+	__le32 type_tucmd_mlhl;
+	__le32 mss_l4len_idx;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define IXGBE_ADVTXD_DTALEN_MASK	0x0000FFFF /* Data buf length(bytes) */
+#define IXGBE_ADVTXD_MAC_LINKSEC	0x00040000 /* Insert LinkSec */
+#define IXGBE_ADVTXD_MAC_TSTAMP		0x00080000 /* IEEE1588 time stamp */
+#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */
+#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK	0x000001FF /* IPSec ESP length */
+#define IXGBE_ADVTXD_DTYP_MASK		0x00F00000 /* DTYP mask */
+#define IXGBE_ADVTXD_DTYP_CTXT		0x00200000 /* Adv Context Desc */
+#define IXGBE_ADVTXD_DTYP_DATA		0x00300000 /* Adv Data Descriptor */
+#define IXGBE_ADVTXD_DCMD_EOP		IXGBE_TXD_CMD_EOP  /* End of Packet */
+#define IXGBE_ADVTXD_DCMD_IFCS		IXGBE_TXD_CMD_IFCS /* Insert FCS */
+#define IXGBE_ADVTXD_DCMD_RS		IXGBE_TXD_CMD_RS /* Report Status */
+#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI	0x10000000 /* DDP hdr type or iSCSI */
+#define IXGBE_ADVTXD_DCMD_DEXT		IXGBE_TXD_CMD_DEXT /* Desc ext 1=Adv */
+#define IXGBE_ADVTXD_DCMD_VLE		IXGBE_TXD_CMD_VLE  /* VLAN pkt enable */
+#define IXGBE_ADVTXD_DCMD_TSE		0x80000000 /* TCP Seg enable */
+#define IXGBE_ADVTXD_STAT_DD		IXGBE_TXD_STAT_DD  /* Descriptor Done */
+#define IXGBE_ADVTXD_STAT_SN_CRC	0x00000002 /* NXTSEQ/SEED pres in WB */
+#define IXGBE_ADVTXD_STAT_RSV		0x0000000C /* STA Reserved */
+#define IXGBE_ADVTXD_IDX_SHIFT		4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_CC			0x00000080 /* Check Context */
+#define IXGBE_ADVTXD_POPTS_SHIFT	8  /* Adv desc POPTS shift */
+#define IXGBE_ADVTXD_POPTS_IXSM		(IXGBE_TXD_POPTS_IXSM << \
+					 IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_TXSM		(IXGBE_TXD_POPTS_TXSM << \
+					 IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_ISCO_1ST	0x00000000 /* 1st TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_MDL	0x00000800 /* Middle TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_LAST	0x00001000 /* Last TSO of iSCSI PDU */
+/* 1st&Last TSO-full iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_FULL	0x00001800
+#define IXGBE_ADVTXD_POPTS_RSV		0x00002000 /* POPTS Reserved */
+#define IXGBE_ADVTXD_PAYLEN_SHIFT	14 /* Adv desc PAYLEN shift */
+#define IXGBE_ADVTXD_MACLEN_SHIFT	9  /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_VLAN_SHIFT		16  /* Adv ctxt vlan tag shift */
+#define IXGBE_ADVTXD_TUCMD_IPV4		0x00000400 /* IP Packet Type: 1=IPv4 */
+#define IXGBE_ADVTXD_TUCMD_IPV6		0x00000000 /* IP Packet Type: 0=IPv6 */
+#define IXGBE_ADVTXD_TUCMD_L4T_UDP	0x00000000 /* L4 Packet TYPE of UDP */
+#define IXGBE_ADVTXD_TUCMD_L4T_TCP	0x00000800 /* L4 Packet TYPE of TCP */
+#define IXGBE_ADVTXD_TUCMD_L4T_SCTP	0x00001000 /* L4 Packet TYPE of SCTP */
+#define IXGBE_ADVTXD_TUCMD_MKRREQ	0x00002000 /* req Markers and CRC */
+#define IXGBE_ADVTXD_POPTS_IPSEC	0x00000400 /* IPSec offload request */
+#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
+#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
+#define IXGBE_ADVTXT_TUCMD_FCOE		0x00008000 /* FCoE Frame Type */
+#define IXGBE_ADVTXD_FCOEF_EOF_MASK	(0x3 << 10) /* FC EOF index */
+#define IXGBE_ADVTXD_FCOEF_SOF		((1 << 2) << 10) /* FC SOF index */
+#define IXGBE_ADVTXD_FCOEF_PARINC	((1 << 3) << 10) /* Rel_Off in F_CTL */
+#define IXGBE_ADVTXD_FCOEF_ORIE		((1 << 4) << 10) /* Orientation End */
+#define IXGBE_ADVTXD_FCOEF_ORIS		((1 << 5) << 10) /* Orientation Start */
+#define IXGBE_ADVTXD_FCOEF_EOF_N	(0x0 << 10) /* 00: EOFn */
+#define IXGBE_ADVTXD_FCOEF_EOF_T	(0x1 << 10) /* 01: EOFt */
+#define IXGBE_ADVTXD_FCOEF_EOF_NI	(0x2 << 10) /* 10: EOFni */
+#define IXGBE_ADVTXD_FCOEF_EOF_A	(0x3 << 10) /* 11: EOFa */
+#define IXGBE_ADVTXD_L4LEN_SHIFT	8  /* Adv ctxt L4LEN shift */
+#define IXGBE_ADVTXD_MSS_SHIFT		16  /* Adv ctxt MSS shift */
+
+#define IXGBE_ADVTXD_OUTER_IPLEN	16 /* Adv ctxt OUTERIPLEN shift */
+#define IXGBE_ADVTXD_TUNNEL_LEN 	24 /* Adv ctxt TUNNELLEN shift */
+#define IXGBE_ADVTXD_TUNNEL_TYPE_SHIFT	16 /* Adv Tx Desc Tunnel Type shift */
+#define IXGBE_ADVTXD_OUTERIPCS_SHIFT	17 /* Adv Tx Desc OUTERIPCS Shift */
+#define IXGBE_ADVTXD_TUNNEL_TYPE_NVGRE	1  /* Adv Tx Desc Tunnel Type NVGRE */
+
+/* Autonegotiation advertised speeds */
+typedef u32 ixgbe_autoneg_advertised;
+/* Link speed */
+typedef u32 ixgbe_link_speed;
+#define IXGBE_LINK_SPEED_UNKNOWN	0
+#define IXGBE_LINK_SPEED_100_FULL	0x0008
+#define IXGBE_LINK_SPEED_1GB_FULL	0x0020
+#define IXGBE_LINK_SPEED_2_5GB_FULL	0x0400
+#define IXGBE_LINK_SPEED_5GB_FULL	0x0800
+#define IXGBE_LINK_SPEED_10GB_FULL	0x0080
+#define IXGBE_LINK_SPEED_82598_AUTONEG	(IXGBE_LINK_SPEED_1GB_FULL | \
+					 IXGBE_LINK_SPEED_10GB_FULL)
+#define IXGBE_LINK_SPEED_82599_AUTONEG	(IXGBE_LINK_SPEED_100_FULL | \
+					 IXGBE_LINK_SPEED_1GB_FULL | \
+					 IXGBE_LINK_SPEED_10GB_FULL)
+
+/* Physical layer type */
+typedef u32 ixgbe_physical_layer;
+#define IXGBE_PHYSICAL_LAYER_UNKNOWN		0
+#define IXGBE_PHYSICAL_LAYER_10GBASE_T		0x0001
+#define IXGBE_PHYSICAL_LAYER_1000BASE_T		0x0002
+#define IXGBE_PHYSICAL_LAYER_100BASE_TX		0x0004
+#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU	0x0008
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LR		0x0010
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM	0x0020
+#define IXGBE_PHYSICAL_LAYER_10GBASE_SR		0x0040
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4	0x0080
+#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4	0x0100
+#define IXGBE_PHYSICAL_LAYER_1000BASE_KX	0x0200
+#define IXGBE_PHYSICAL_LAYER_1000BASE_BX	0x0400
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KR		0x0800
+#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI	0x1000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA	0x2000
+#define IXGBE_PHYSICAL_LAYER_1000BASE_SX	0x4000
+
+/* Flow Control Data Sheet defined values
+ * Calculation and defines taken from 802.1bb Annex O
+ */
+
+/* BitTimes (BT) conversion */
+#define IXGBE_BT2KB(BT)		((BT + (8 * 1024 - 1)) / (8 * 1024))
+#define IXGBE_B2BT(BT)		(BT * 8)
+
+/* Calculate Delay to respond to PFC */
+#define IXGBE_PFC_D	672
+
+/* Calculate Cable Delay */
+#define IXGBE_CABLE_DC	5556 /* Delay Copper */
+#define IXGBE_CABLE_DO	5000 /* Delay Optical */
+
+/* Calculate Interface Delay X540 */
+#define IXGBE_PHY_DC	25600 /* Delay 10G BASET */
+#define IXGBE_MAC_DC	8192  /* Delay Copper XAUI interface */
+#define IXGBE_XAUI_DC	(2 * 2048) /* Delay Copper Phy */
+
+#define IXGBE_ID_X540	(IXGBE_MAC_DC + IXGBE_XAUI_DC + IXGBE_PHY_DC)
+
+/* Calculate Interface Delay 82598, 82599 */
+#define IXGBE_PHY_D	12800
+#define IXGBE_MAC_D	4096
+#define IXGBE_XAUI_D	(2 * 1024)
+
+#define IXGBE_ID	(IXGBE_MAC_D + IXGBE_XAUI_D + IXGBE_PHY_D)
+
+/* Calculate Delay incurred from higher layer */
+#define IXGBE_HD	6144
+
+/* Calculate PCI Bus delay for low thresholds */
+#define IXGBE_PCI_DELAY	10000
+
+/* Calculate X540 delay value in bit times */
+#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \
+			((36 * \
+			  (IXGBE_B2BT(_max_frame_link) + \
+			   IXGBE_PFC_D + \
+			   (2 * IXGBE_CABLE_DC) + \
+			   (2 * IXGBE_ID_X540) + \
+			   IXGBE_HD) / 25 + 1) + \
+			 2 * IXGBE_B2BT(_max_frame_tc))
+
+/* Calculate 82599, 82598 delay value in bit times */
+#define IXGBE_DV(_max_frame_link, _max_frame_tc) \
+			((36 * \
+			  (IXGBE_B2BT(_max_frame_link) + \
+			   IXGBE_PFC_D + \
+			   (2 * IXGBE_CABLE_DC) + \
+			   (2 * IXGBE_ID) + \
+			   IXGBE_HD) / 25 + 1) + \
+			 2 * IXGBE_B2BT(_max_frame_tc))
+
+/* Calculate low threshold delay values */
+#define IXGBE_LOW_DV_X540(_max_frame_tc) \
+			(2 * IXGBE_B2BT(_max_frame_tc) + \
+			(36 * IXGBE_PCI_DELAY / 25) + 1)
+#define IXGBE_LOW_DV(_max_frame_tc) \
+			(2 * IXGBE_LOW_DV_X540(_max_frame_tc))
+
+/* Software ATR hash keys */
+#define IXGBE_ATR_BUCKET_HASH_KEY	0x3DAD14E2
+#define IXGBE_ATR_SIGNATURE_HASH_KEY	0x174D3614
+
+/* Software ATR input stream values and masks */
+#define IXGBE_ATR_HASH_MASK		0x7fff
+#define IXGBE_ATR_L4TYPE_MASK		0x3
+#define IXGBE_ATR_L4TYPE_UDP		0x1
+#define IXGBE_ATR_L4TYPE_TCP		0x2
+#define IXGBE_ATR_L4TYPE_SCTP		0x3
+#define IXGBE_ATR_L4TYPE_IPV6_MASK	0x4
+#define IXGBE_ATR_L4TYPE_TUNNEL_MASK	0x10
+enum ixgbe_atr_flow_type {
+	IXGBE_ATR_FLOW_TYPE_IPV4	= 0x0,
+	IXGBE_ATR_FLOW_TYPE_UDPV4	= 0x1,
+	IXGBE_ATR_FLOW_TYPE_TCPV4	= 0x2,
+	IXGBE_ATR_FLOW_TYPE_SCTPV4	= 0x3,
+	IXGBE_ATR_FLOW_TYPE_IPV6	= 0x4,
+	IXGBE_ATR_FLOW_TYPE_UDPV6	= 0x5,
+	IXGBE_ATR_FLOW_TYPE_TCPV6	= 0x6,
+	IXGBE_ATR_FLOW_TYPE_SCTPV6	= 0x7,
+	IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4	= 0x10,
+	IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4	= 0x11,
+	IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4	= 0x12,
+	IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4	= 0x13,
+	IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6	= 0x14,
+	IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6	= 0x15,
+	IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6	= 0x16,
+	IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6	= 0x17,
+};
+
+/* Flow Director ATR input struct. */
+union ixgbe_atr_input {
+	/*
+	 * Byte layout in order, all values with MSB first:
+	 *
+	 * vm_pool	- 1 byte
+	 * flow_type	- 1 byte
+	 * vlan_id	- 2 bytes
+	 * src_ip	- 16 bytes
+	 * inner_mac	- 6 bytes
+	 * cloud_mode	- 2 bytes
+	 * tni_vni	- 4 bytes
+	 * dst_ip	- 16 bytes
+	 * src_port	- 2 bytes
+	 * dst_port	- 2 bytes
+	 * flex_bytes	- 2 bytes
+	 * bkt_hash	- 2 bytes
+	 */
+	struct {
+		u8 vm_pool;
+		u8 flow_type;
+		__be16 vlan_id;
+		__be32 dst_ip[4];
+		__be32 src_ip[4];
+		u8 inner_mac[6];
+		__be16 tunnel_type;
+		__be32 tni_vni;
+		__be16 src_port;
+		__be16 dst_port;
+		__be16 flex_bytes;
+		__be16 bkt_hash;
+	} formatted;
+	__be32 dword_stream[14];
+};
+
+/* Flow Director compressed ATR hash input struct */
+union ixgbe_atr_hash_dword {
+	struct {
+		u8 vm_pool;
+		u8 flow_type;
+		__be16 vlan_id;
+	} formatted;
+	__be32 ip;
+	struct {
+		__be16 src;
+		__be16 dst;
+	} port;
+	__be16 flex_bytes;
+	__be32 dword;
+};
+
+
+/*
+ * Unavailable: The FCoE Boot Option ROM is not present in the flash.
+ * Disabled: Present; boot order is not set for any targets on the port.
+ * Enabled: Present; boot order is set for at least one target on the port.
+ */
+enum ixgbe_fcoe_boot_status {
+	ixgbe_fcoe_bootstatus_disabled = 0,
+	ixgbe_fcoe_bootstatus_enabled = 1,
+	ixgbe_fcoe_bootstatus_unavailable = 0xFFFF
+};
+
+enum ixgbe_eeprom_type {
+	ixgbe_eeprom_uninitialized = 0,
+	ixgbe_eeprom_spi,
+	ixgbe_flash,
+	ixgbe_eeprom_none /* No NVM support */
+};
+
+enum ixgbe_mac_type {
+	ixgbe_mac_unknown = 0,
+	ixgbe_mac_82598EB,
+	ixgbe_mac_82599EB,
+	ixgbe_mac_82599_vf,
+	ixgbe_mac_X540,
+	ixgbe_mac_X540_vf,
+	/*
+	 * X550EM MAC type decoder:
+	 * ixgbe_mac_X550EM_x: "x" = Xeon
+	 * ixgbe_mac_X550EM_a: "a" = Atom
+	 */
+	ixgbe_mac_X550,
+	ixgbe_mac_X550EM_x,
+	ixgbe_mac_X550_vf,
+	ixgbe_mac_X550EM_x_vf,
+	ixgbe_num_macs
+};
+
+enum ixgbe_phy_type {
+	ixgbe_phy_unknown = 0,
+	ixgbe_phy_none,
+	ixgbe_phy_tn,
+	ixgbe_phy_aq,
+	ixgbe_phy_x550em_kr,
+	ixgbe_phy_x550em_kx4,
+	ixgbe_phy_x550em_ext_t,
+	ixgbe_phy_cu_unknown,
+	ixgbe_phy_qt,
+	ixgbe_phy_xaui,
+	ixgbe_phy_nl,
+	ixgbe_phy_sfp_passive_tyco,
+	ixgbe_phy_sfp_passive_unknown,
+	ixgbe_phy_sfp_active_unknown,
+	ixgbe_phy_sfp_avago,
+	ixgbe_phy_sfp_ftl,
+	ixgbe_phy_sfp_ftl_active,
+	ixgbe_phy_sfp_unknown,
+	ixgbe_phy_sfp_intel,
+	ixgbe_phy_qsfp_passive_unknown,
+	ixgbe_phy_qsfp_active_unknown,
+	ixgbe_phy_qsfp_intel,
+	ixgbe_phy_qsfp_unknown,
+	ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/
+	ixgbe_phy_generic
+};
+
+/*
+ * SFP+ module type IDs:
+ *
+ * ID	Module Type
+ * =============
+ * 0	SFP_DA_CU
+ * 1	SFP_SR
+ * 2	SFP_LR
+ * 3	SFP_DA_CU_CORE0 - 82599-specific
+ * 4	SFP_DA_CU_CORE1 - 82599-specific
+ * 5	SFP_SR/LR_CORE0 - 82599-specific
+ * 6	SFP_SR/LR_CORE1 - 82599-specific
+ */
+enum ixgbe_sfp_type {
+	ixgbe_sfp_type_da_cu = 0,
+	ixgbe_sfp_type_sr = 1,
+	ixgbe_sfp_type_lr = 2,
+	ixgbe_sfp_type_da_cu_core0 = 3,
+	ixgbe_sfp_type_da_cu_core1 = 4,
+	ixgbe_sfp_type_srlr_core0 = 5,
+	ixgbe_sfp_type_srlr_core1 = 6,
+	ixgbe_sfp_type_da_act_lmt_core0 = 7,
+	ixgbe_sfp_type_da_act_lmt_core1 = 8,
+	ixgbe_sfp_type_1g_cu_core0 = 9,
+	ixgbe_sfp_type_1g_cu_core1 = 10,
+	ixgbe_sfp_type_1g_sx_core0 = 11,
+	ixgbe_sfp_type_1g_sx_core1 = 12,
+	ixgbe_sfp_type_1g_lx_core0 = 13,
+	ixgbe_sfp_type_1g_lx_core1 = 14,
+	ixgbe_sfp_type_not_present = 0xFFFE,
+	ixgbe_sfp_type_unknown = 0xFFFF
+};
+
+enum ixgbe_media_type {
+	ixgbe_media_type_unknown = 0,
+	ixgbe_media_type_fiber,
+	ixgbe_media_type_fiber_qsfp,
+	ixgbe_media_type_fiber_lco,
+	ixgbe_media_type_copper,
+	ixgbe_media_type_backplane,
+	ixgbe_media_type_cx4,
+	ixgbe_media_type_virtual
+};
+
+/* Flow Control Settings */
+enum ixgbe_fc_mode {
+	ixgbe_fc_none = 0,
+	ixgbe_fc_rx_pause,
+	ixgbe_fc_tx_pause,
+	ixgbe_fc_full,
+	ixgbe_fc_default
+};
+
+/* Smart Speed Settings */
+#define IXGBE_SMARTSPEED_MAX_RETRIES	3
+enum ixgbe_smart_speed {
+	ixgbe_smart_speed_auto = 0,
+	ixgbe_smart_speed_on,
+	ixgbe_smart_speed_off
+};
+
+/* PCI bus types */
+enum ixgbe_bus_type {
+	ixgbe_bus_type_unknown = 0,
+	ixgbe_bus_type_pci,
+	ixgbe_bus_type_pcix,
+	ixgbe_bus_type_pci_express,
+	ixgbe_bus_type_internal,
+	ixgbe_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum ixgbe_bus_speed {
+	ixgbe_bus_speed_unknown	= 0,
+	ixgbe_bus_speed_33	= 33,
+	ixgbe_bus_speed_66	= 66,
+	ixgbe_bus_speed_100	= 100,
+	ixgbe_bus_speed_120	= 120,
+	ixgbe_bus_speed_133	= 133,
+	ixgbe_bus_speed_2500	= 2500,
+	ixgbe_bus_speed_5000	= 5000,
+	ixgbe_bus_speed_8000	= 8000,
+	ixgbe_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum ixgbe_bus_width {
+	ixgbe_bus_width_unknown	= 0,
+	ixgbe_bus_width_pcie_x1	= 1,
+	ixgbe_bus_width_pcie_x2	= 2,
+	ixgbe_bus_width_pcie_x4	= 4,
+	ixgbe_bus_width_pcie_x8	= 8,
+	ixgbe_bus_width_32	= 32,
+	ixgbe_bus_width_64	= 64,
+	ixgbe_bus_width_reserved
+};
+
+struct ixgbe_addr_filter_info {
+	u32 num_mc_addrs;
+	u32 rar_used_count;
+	u32 mta_in_use;
+	u32 overflow_promisc;
+	bool user_set_promisc;
+};
+
+/* Bus parameters */
+struct ixgbe_bus_info {
+	enum ixgbe_bus_speed speed;
+	enum ixgbe_bus_width width;
+	enum ixgbe_bus_type type;
+
+	u16 func;
+	u16 lan_id;
+};
+
+/* Flow control parameters */
+struct ixgbe_fc_info {
+	u32 high_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl High-water */
+	u32 low_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl Low-water */
+	u16 pause_time; /* Flow Control Pause timer */
+	bool send_xon; /* Flow control send XON */
+	bool strict_ieee; /* Strict IEEE mode */
+	bool disable_fc_autoneg; /* Do not autonegotiate FC */
+	bool fc_was_autonegged; /* Is current_mode the result of autonegging? */
+	enum ixgbe_fc_mode current_mode; /* FC mode in effect */
+	enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+/* Statistics counters collected by the MAC */
+struct ixgbe_hw_stats {
+	u64 crcerrs;
+	u64 illerrc;
+	u64 errbc;
+	u64 mspdc;
+	u64 mpctotal;
+	u64 mpc[8];
+	u64 mlfc;
+	u64 mrfc;
+	u64 rlec;
+	u64 lxontxc;
+	u64 lxonrxc;
+	u64 lxofftxc;
+	u64 lxoffrxc;
+	u64 pxontxc[8];
+	u64 pxonrxc[8];
+	u64 pxofftxc[8];
+	u64 pxoffrxc[8];
+	u64 prc64;
+	u64 prc127;
+	u64 prc255;
+	u64 prc511;
+	u64 prc1023;
+	u64 prc1522;
+	u64 gprc;
+	u64 bprc;
+	u64 mprc;
+	u64 gptc;
+	u64 gorc;
+	u64 gotc;
+	u64 rnbc[8];
+	u64 ruc;
+	u64 rfc;
+	u64 roc;
+	u64 rjc;
+	u64 mngprc;
+	u64 mngpdc;
+	u64 mngptc;
+	u64 tor;
+	u64 tpr;
+	u64 tpt;
+	u64 ptc64;
+	u64 ptc127;
+	u64 ptc255;
+	u64 ptc511;
+	u64 ptc1023;
+	u64 ptc1522;
+	u64 mptc;
+	u64 bptc;
+	u64 xec;
+	u64 qprc[16];
+	u64 qptc[16];
+	u64 qbrc[16];
+	u64 qbtc[16];
+	u64 qprdc[16];
+	u64 pxon2offc[8];
+	u64 fdirustat_add;
+	u64 fdirustat_remove;
+	u64 fdirfstat_fadd;
+	u64 fdirfstat_fremove;
+	u64 fdirmatch;
+	u64 fdirmiss;
+	u64 fccrc;
+	u64 fclast;
+	u64 fcoerpdc;
+	u64 fcoeprc;
+	u64 fcoeptc;
+	u64 fcoedwrc;
+	u64 fcoedwtc;
+	u64 fcoe_noddp;
+	u64 fcoe_noddp_ext_buff;
+	u64 ldpcec;
+	u64 pcrc8ec;
+	u64 b2ospc;
+	u64 b2ogprc;
+	u64 o2bgptc;
+	u64 o2bspc;
+};
+
+/* forward declaration */
+struct ixgbe_hw;
+
+/* iterator type for walking multicast address lists */
+typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+				  u32 *vmdq);
+
+/* Function pointer table */
+struct ixgbe_eeprom_operations {
+	s32 (*init_params)(struct ixgbe_hw *);
+	s32 (*read)(struct ixgbe_hw *, u16, u16 *);
+	s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+	s32 (*write)(struct ixgbe_hw *, u16, u16);
+	s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+	s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
+	s32 (*update_checksum)(struct ixgbe_hw *);
+	s32 (*calc_checksum)(struct ixgbe_hw *);
+};
+
+struct ixgbe_mac_operations {
+	s32 (*init_hw)(struct ixgbe_hw *);
+	s32 (*reset_hw)(struct ixgbe_hw *);
+	s32 (*start_hw)(struct ixgbe_hw *);
+	s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
+	void (*enable_relaxed_ordering)(struct ixgbe_hw *);
+	enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+	u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
+	s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+	s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
+	s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *);
+	s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
+	s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
+	s32 (*get_fcoe_boot_status)(struct ixgbe_hw *, u16 *);
+	s32 (*stop_adapter)(struct ixgbe_hw *);
+	s32 (*get_bus_info)(struct ixgbe_hw *);
+	void (*set_lan_id)(struct ixgbe_hw *);
+	s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
+	s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
+	s32 (*setup_sfp)(struct ixgbe_hw *);
+	s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
+	s32 (*disable_sec_rx_path)(struct ixgbe_hw *);
+	s32 (*enable_sec_rx_path)(struct ixgbe_hw *);
+	s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
+	void (*release_swfw_sync)(struct ixgbe_hw *, u32);
+	s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
+	s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
+
+	/* Link */
+	void (*disable_tx_laser)(struct ixgbe_hw *);
+	void (*enable_tx_laser)(struct ixgbe_hw *);
+	void (*flap_tx_laser)(struct ixgbe_hw *);
+	s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+	s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
+	s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
+				     bool *);
+
+	/* Packet Buffer manipulation */
+	void (*setup_rxpba)(struct ixgbe_hw *, int, u32, int);
+
+	/* LED */
+	s32 (*led_on)(struct ixgbe_hw *, u32);
+	s32 (*led_off)(struct ixgbe_hw *, u32);
+	s32 (*blink_led_start)(struct ixgbe_hw *, u32);
+	s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
+
+	/* RAR, Multicast, VLAN */
+	s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
+	s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
+	s32 (*clear_rar)(struct ixgbe_hw *, u32);
+	s32 (*insert_mac_addr)(struct ixgbe_hw *, u8 *, u32);
+	s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
+	s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
+	s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
+	s32 (*init_rx_addrs)(struct ixgbe_hw *);
+	s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+				   ixgbe_mc_addr_itr);
+	s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+				   ixgbe_mc_addr_itr, bool clear);
+	s32 (*enable_mc)(struct ixgbe_hw *);
+	s32 (*disable_mc)(struct ixgbe_hw *);
+	s32 (*clear_vfta)(struct ixgbe_hw *);
+	s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+	s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, bool *);
+	s32 (*init_uta_tables)(struct ixgbe_hw *);
+	void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
+	void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
+
+	/* Flow Control */
+	s32 (*fc_enable)(struct ixgbe_hw *);
+
+	/* Manageability interface */
+	s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
+	s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
+	s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
+	s32 (*dmac_config)(struct ixgbe_hw *hw);
+	s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
+	s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
+	void (*get_rtrup2tc)(struct ixgbe_hw *hw, u8 *map);
+	s32 (*setup_eee)(struct ixgbe_hw *hw, bool enable_eee);
+	void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int);
+	void (*set_source_address_pruning)(struct ixgbe_hw *, bool,
+					   unsigned int);
+	void (*disable_rx)(struct ixgbe_hw *hw);
+	void (*enable_rx)(struct ixgbe_hw *hw);
+	s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
+	s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
+	void (*disable_mdd)(struct ixgbe_hw *hw);
+	void (*enable_mdd)(struct ixgbe_hw *hw);
+	void (*mdd_event)(struct ixgbe_hw *hw, u32 *vf_bitmap);
+	void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf);
+};
+
+struct ixgbe_phy_operations {
+	s32 (*identify)(struct ixgbe_hw *);
+	s32 (*identify_sfp)(struct ixgbe_hw *);
+	s32 (*init)(struct ixgbe_hw *);
+	s32 (*reset)(struct ixgbe_hw *);
+	s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
+	s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
+	s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
+	s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
+	s32 (*setup_link)(struct ixgbe_hw *);
+	s32 (*setup_internal_link)(struct ixgbe_hw *);
+	s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+	s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
+	s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
+	s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
+	s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
+	s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
+	s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
+	s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
+	void (*i2c_bus_clear)(struct ixgbe_hw *);
+	s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
+	s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
+	s32 (*check_overtemp)(struct ixgbe_hw *);
+	s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
+};
+
+struct ixgbe_eeprom_info {
+	struct ixgbe_eeprom_operations ops;
+	enum ixgbe_eeprom_type type;
+	u32 semaphore_delay;
+	u16 word_size;
+	u16 address_bits;
+	u16 word_page_size;
+};
+
+#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED	0x01
+struct ixgbe_mac_info {
+	struct ixgbe_mac_operations ops;
+	enum ixgbe_mac_type type;
+	u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+	u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+	u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+	/* prefix for World Wide Node Name (WWNN) */
+	u16 wwnn_prefix;
+	/* prefix for World Wide Port Name (WWPN) */
+	u16 wwpn_prefix;
+#define IXGBE_MAX_MTA			128
+	u32 mta_shadow[IXGBE_MAX_MTA];
+	s32 mc_filter_type;
+	u32 mcft_size;
+	u32 vft_size;
+	u32 num_rar_entries;
+	u32 rar_highwater;
+	u32 rx_pb_size;
+	u32 max_tx_queues;
+	u32 max_rx_queues;
+	u32 orig_autoc;
+	u8  san_mac_rar_index;
+	bool get_link_status;
+	u32 orig_autoc2;
+	u16 max_msix_vectors;
+	bool arc_subsystem_valid;
+	bool orig_link_settings_stored;
+	bool autotry_restart;
+	u8 flags;
+	struct ixgbe_thermal_sensor_data  thermal_sensor_data;
+	bool thermal_sensor_enabled;
+	struct ixgbe_dmac_config dmac_config;
+	bool set_lben;
+};
+
+struct ixgbe_phy_info {
+	struct ixgbe_phy_operations ops;
+	enum ixgbe_phy_type type;
+	u32 addr;
+	u32 id;
+	enum ixgbe_sfp_type sfp_type;
+	bool sfp_setup_needed;
+	u32 revision;
+	enum ixgbe_media_type media_type;
+	u32 phy_semaphore_mask;
+	bool reset_disable;
+	ixgbe_autoneg_advertised autoneg_advertised;
+	enum ixgbe_smart_speed smart_speed;
+	bool smart_speed_active;
+	bool multispeed_fiber;
+	bool reset_if_overtemp;
+	bool qsfp_shared_i2c_bus;
+};
+
+#include "ixgbe_mbx.h"
+
+struct ixgbe_mbx_operations {
+	void (*init_params)(struct ixgbe_hw *hw);
+	s32  (*read)(struct ixgbe_hw *, u32 *, u16,  u16);
+	s32  (*write)(struct ixgbe_hw *, u32 *, u16, u16);
+	s32  (*read_posted)(struct ixgbe_hw *, u32 *, u16,  u16);
+	s32  (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+	s32  (*check_for_msg)(struct ixgbe_hw *, u16);
+	s32  (*check_for_ack)(struct ixgbe_hw *, u16);
+	s32  (*check_for_rst)(struct ixgbe_hw *, u16);
+};
+
+struct ixgbe_mbx_stats {
+	u32 msgs_tx;
+	u32 msgs_rx;
+
+	u32 acks;
+	u32 reqs;
+	u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+	struct ixgbe_mbx_operations ops;
+	struct ixgbe_mbx_stats stats;
+	u32 timeout;
+	u32 usec_delay;
+	u32 v2p_mailbox;
+	u16 size;
+};
+
+struct ixgbe_hw {
+	u8 IOMEM *hw_addr;
+	void *back;
+	struct ixgbe_mac_info mac;
+	struct ixgbe_addr_filter_info addr_ctrl;
+	struct ixgbe_fc_info fc;
+	struct ixgbe_phy_info phy;
+	struct ixgbe_eeprom_info eeprom;
+	struct ixgbe_bus_info bus;
+	struct ixgbe_mbx_info mbx;
+	u16 device_id;
+	u16 vendor_id;
+	u16 subsystem_device_id;
+	u16 subsystem_vendor_id;
+	u8 revision_id;
+	bool adapter_stopped;
+	int api_version;
+	bool force_full_reset;
+	bool allow_unsupported_sfp;
+	bool wol_enabled;
+};
+
+#define ixgbe_call_func(hw, func, params, error) \
+		(func != NULL) ? func params : error
+
+
+/* Error Codes */
+#define IXGBE_SUCCESS				0
+#define IXGBE_ERR_EEPROM			-1
+#define IXGBE_ERR_EEPROM_CHECKSUM		-2
+#define IXGBE_ERR_PHY				-3
+#define IXGBE_ERR_CONFIG			-4
+#define IXGBE_ERR_PARAM				-5
+#define IXGBE_ERR_MAC_TYPE			-6
+#define IXGBE_ERR_UNKNOWN_PHY			-7
+#define IXGBE_ERR_LINK_SETUP			-8
+#define IXGBE_ERR_ADAPTER_STOPPED		-9
+#define IXGBE_ERR_INVALID_MAC_ADDR		-10
+#define IXGBE_ERR_DEVICE_NOT_SUPPORTED		-11
+#define IXGBE_ERR_MASTER_REQUESTS_PENDING	-12
+#define IXGBE_ERR_INVALID_LINK_SETTINGS		-13
+#define IXGBE_ERR_AUTONEG_NOT_COMPLETE		-14
+#define IXGBE_ERR_RESET_FAILED			-15
+#define IXGBE_ERR_SWFW_SYNC			-16
+#define IXGBE_ERR_PHY_ADDR_INVALID		-17
+#define IXGBE_ERR_I2C				-18
+#define IXGBE_ERR_SFP_NOT_SUPPORTED		-19
+#define IXGBE_ERR_SFP_NOT_PRESENT		-20
+#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT	-21
+#define IXGBE_ERR_NO_SAN_ADDR_PTR		-22
+#define IXGBE_ERR_FDIR_REINIT_FAILED		-23
+#define IXGBE_ERR_EEPROM_VERSION		-24
+#define IXGBE_ERR_NO_SPACE			-25
+#define IXGBE_ERR_OVERTEMP			-26
+#define IXGBE_ERR_FC_NOT_NEGOTIATED		-27
+#define IXGBE_ERR_FC_NOT_SUPPORTED		-28
+#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE	-30
+#define IXGBE_ERR_PBA_SECTION			-31
+#define IXGBE_ERR_INVALID_ARGUMENT		-32
+#define IXGBE_ERR_HOST_INTERFACE_COMMAND	-33
+#define IXGBE_ERR_OUT_OF_MEM			-34
+#define IXGBE_ERR_FEATURE_NOT_SUPPORTED		-36
+#define IXGBE_ERR_EEPROM_PROTECTED_REGION	-37
+#define IXGBE_ERR_FDIR_CMD_INCOMPLETE		-38
+
+#define IXGBE_NOT_IMPLEMENTED			0x7FFFFFFF
+
+
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P)	((P == 0) ? (0x4010) : (0x8010))
+#define IXGBE_KRM_LINK_CTRL_1(P)	((P == 0) ? (0x420C) : (0x820C))
+#define IXGBE_KRM_DSP_TXFFE_STATE_4(P)	((P == 0) ? (0x4634) : (0x8634))
+#define IXGBE_KRM_DSP_TXFFE_STATE_5(P)	((P == 0) ? (0x4638) : (0x8638))
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P)	((P == 0) ? (0x4B00) : (0x8B00))
+#define IXGBE_KRM_PMD_DFX_BURNIN(P)	((P == 0) ? (0x4E00) : (0x8E00))
+#define IXGBE_KRM_TX_COEFF_CTRL_1(P)	((P == 0) ? (0x5520) : (0x9520))
+#define IXGBE_KRM_RX_ANA_CTL(P)		((P == 0) ? (0x5A00) : (0x9A00))
+
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B		(1 << 9)
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS		(1 << 11)
+
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK	(0x7 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G	(2 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G	(4 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ		(1 << 14)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC		(1 << 15)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX		(1 << 16)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR		(1 << 18)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX		(1 << 24)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR		(1 << 26)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE		(1 << 29)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART		(1 << 31)
+
+#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN			(1 << 6)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN		(1 << 15)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN		(1 << 16)
+
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL	(1 << 4)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS	(1 << 2)
+
+#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK	(0x3 << 16)
+
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN	(1 << 1)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN	(1 << 2)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN		(1 << 3)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN		(1 << 31)
+
+#define IXGBE_KX4_LINK_CNTL_1				0x4C
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX		(1 << 16)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4		(1 << 17)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX		(1 << 24)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4		(1 << 25)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE		(1 << 29)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP	(1 << 30)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART		(1 << 31)
+
+#define IXGBE_SB_IOSF_INDIRECT_CTRL	0x00011144
+#define IXGBE_SB_IOSF_INDIRECT_DATA	0x00011148
+
+#define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT		0
+#define IXGBE_SB_IOSF_CTRL_ADDR_MASK		0xFF
+#define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT	18
+#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK	\
+				(0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT)
+#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT	20
+#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK	\
+				(0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT)
+#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT	28
+#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK	0x7
+#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT		31
+#define IXGBE_SB_IOSF_CTRL_BUSY		(1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
+#define IXGBE_SB_IOSF_TARGET_KR_PHY	0
+#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY	1
+#define IXGBE_SB_IOSF_TARGET_KX4_PCS0	2
+#define IXGBE_SB_IOSF_TARGET_KX4_PCS1	3
+
+#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ixgbe/base/ixgbe_vf.c b/drivers/net/ixgbe/base/ixgbe_vf.c
new file mode 100644
index 0000000..e99b17d
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_vf.c
@@ -0,0 +1,724 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+
+#include "ixgbe_api.h"
+#include "ixgbe_type.h"
+#include "ixgbe_vf.h"
+
+#ifndef IXGBE_VFWRITE_REG
+#define IXGBE_VFWRITE_REG IXGBE_WRITE_REG
+#endif
+#ifndef IXGBE_VFREAD_REG
+#define IXGBE_VFREAD_REG IXGBE_READ_REG
+#endif
+
+/**
+ *  ixgbe_init_ops_vf - Initialize the pointers for vf
+ *  @hw: pointer to hardware structure
+ *
+ *  This will assign function pointers, adapter-specific functions can
+ *  override the assignment of generic function pointers by assigning
+ *  their own adapter-specific function pointers.
+ *  Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw)
+{
+	/* MAC */
+	hw->mac.ops.init_hw = ixgbe_init_hw_vf;
+	hw->mac.ops.reset_hw = ixgbe_reset_hw_vf;
+	hw->mac.ops.start_hw = ixgbe_start_hw_vf;
+	/* Cannot clear stats on VF */
+	hw->mac.ops.clear_hw_cntrs = NULL;
+	hw->mac.ops.get_media_type = NULL;
+	hw->mac.ops.get_mac_addr = ixgbe_get_mac_addr_vf;
+	hw->mac.ops.stop_adapter = ixgbe_stop_adapter_vf;
+	hw->mac.ops.get_bus_info = NULL;
+
+	/* Link */
+	hw->mac.ops.setup_link = ixgbe_setup_mac_link_vf;
+	hw->mac.ops.check_link = ixgbe_check_mac_link_vf;
+	hw->mac.ops.get_link_capabilities = NULL;
+
+	/* RAR, Multicast, VLAN */
+	hw->mac.ops.set_rar = ixgbe_set_rar_vf;
+	hw->mac.ops.set_uc_addr = ixgbevf_set_uc_addr_vf;
+	hw->mac.ops.init_rx_addrs = NULL;
+	hw->mac.ops.update_mc_addr_list = ixgbe_update_mc_addr_list_vf;
+	hw->mac.ops.enable_mc = NULL;
+	hw->mac.ops.disable_mc = NULL;
+	hw->mac.ops.clear_vfta = NULL;
+	hw->mac.ops.set_vfta = ixgbe_set_vfta_vf;
+
+	hw->mac.max_tx_queues = 1;
+	hw->mac.max_rx_queues = 1;
+
+	hw->mbx.ops.init_params = ixgbe_init_mbx_params_vf;
+
+	return IXGBE_SUCCESS;
+}
+
+/* ixgbe_virt_clr_reg - Set register to default (power on) state.
+ *  @hw: pointer to hardware structure
+ */
+static void ixgbe_virt_clr_reg(struct ixgbe_hw *hw)
+{
+	int i;
+	u32 vfsrrctl;
+	u32 vfdca_rxctrl;
+	u32 vfdca_txctrl;
+
+	/* VRSRRCTL default values (BSIZEPACKET = 2048, BSIZEHEADER = 256) */
+	vfsrrctl = 0x100 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
+	vfsrrctl |= 0x800 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+
+	/* DCA_RXCTRL default value */
+	vfdca_rxctrl = IXGBE_DCA_RXCTRL_DESC_RRO_EN |
+		       IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+		       IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
+
+	/* DCA_TXCTRL default value */
+	vfdca_txctrl = IXGBE_DCA_TXCTRL_DESC_RRO_EN |
+		       IXGBE_DCA_TXCTRL_DESC_WRO_EN |
+		       IXGBE_DCA_TXCTRL_DATA_RRO_EN;
+
+	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+
+	for (i = 0; i < 7; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), vfsrrctl);
+		IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(i), vfdca_rxctrl);
+		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), vfdca_txctrl);
+	}
+
+	IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ *  ixgbe_start_hw_vf - Prepare hardware for Tx/Rx
+ *  @hw: pointer to hardware structure
+ *
+ *  Starts the hardware by filling the bus info structure and media type, clears
+ *  all on chip counters, initializes receive address registers, multicast
+ *  table, VLAN filter table, calls routine to set up link and flow control
+ *  settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw)
+{
+	/* Clear adapter stopped flag */
+	hw->adapter_stopped = false;
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_init_hw_vf - virtual function hardware initialization
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the hardware by resetting the hardware and then starting
+ *  the hardware
+ **/
+s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw)
+{
+	s32 status = hw->mac.ops.start_hw(hw);
+
+	hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+	return status;
+}
+
+/**
+ *  ixgbe_reset_hw_vf - Performs hardware reset
+ *  @hw: pointer to hardware structure
+ *
+ *  Resets the hardware by reseting the transmit and receive units, masks and
+ *  clears all interrupts.
+ **/
+s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	u32 timeout = IXGBE_VF_INIT_TIMEOUT;
+	s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
+	u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
+	u8 *addr = (u8 *)(&msgbuf[1]);
+
+	DEBUGFUNC("ixgbevf_reset_hw_vf");
+
+	/* Call adapter stop to disable tx/rx and clear interrupts */
+	hw->mac.ops.stop_adapter(hw);
+
+	/* reset the api version */
+	hw->api_version = ixgbe_mbox_api_10;
+
+	DEBUGOUT("Issuing a function level reset to MAC\n");
+
+	IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
+	IXGBE_WRITE_FLUSH(hw);
+
+	msec_delay(50);
+
+	/* we cannot reset while the RSTI / RSTD bits are asserted */
+	while (!mbx->ops.check_for_rst(hw, 0) && timeout) {
+		timeout--;
+		usec_delay(5);
+	}
+
+	if (!timeout)
+		return IXGBE_ERR_RESET_FAILED;
+
+	/* Reset VF registers to initial values */
+	ixgbe_virt_clr_reg(hw);
+
+	/* mailbox timeout can now become active */
+	mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+
+	msgbuf[0] = IXGBE_VF_RESET;
+	mbx->ops.write_posted(hw, msgbuf, 1, 0);
+
+	msec_delay(10);
+
+	/*
+	 * set our "perm_addr" based on info provided by PF
+	 * also set up the mc_filter_type which is piggy backed
+	 * on the mac address in word 3
+	 */
+	ret_val = mbx->ops.read_posted(hw, msgbuf,
+			IXGBE_VF_PERMADDR_MSG_LEN, 0);
+	if (ret_val)
+		return ret_val;
+
+	if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
+	    msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
+		return IXGBE_ERR_INVALID_MAC_ADDR;
+
+	memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+	hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_stop_adapter_vf - Generic stop Tx/Rx units
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ *  disables transmit and receive units. The adapter_stopped flag is used by
+ *  the shared code and drivers to determine if the adapter is in a stopped
+ *  state and should not touch the hardware.
+ **/
+s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw)
+{
+	u32 reg_val;
+	u16 i;
+
+	/*
+	 * Set the adapter_stopped flag so other driver functions stop touching
+	 * the hardware
+	 */
+	hw->adapter_stopped = true;
+
+	/* Clear interrupt mask to stop from interrupts being generated */
+	IXGBE_VFWRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
+
+	/* Clear any pending interrupts, flush previous writes */
+	IXGBE_VFREAD_REG(hw, IXGBE_VTEICR);
+
+	/* Disable the transmit unit.  Each queue must be disabled. */
+	for (i = 0; i < hw->mac.max_tx_queues; i++)
+		IXGBE_VFWRITE_REG(hw, IXGBE_VFTXDCTL(i), IXGBE_TXDCTL_SWFLSH);
+
+	/* Disable the receive unit by stopping each queue */
+	for (i = 0; i < hw->mac.max_rx_queues; i++) {
+		reg_val = IXGBE_VFREAD_REG(hw, IXGBE_VFRXDCTL(i));
+		reg_val &= ~IXGBE_RXDCTL_ENABLE;
+		IXGBE_VFWRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
+	}
+	/* Clear packet split and pool config */
+	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+
+	/* flush all queues disables */
+	IXGBE_WRITE_FLUSH(hw);
+	msec_delay(2);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
+ *  @hw: pointer to hardware structure
+ *  @mc_addr: the multicast address
+ *
+ *  Extracts the 12 bits, from a multicast address, to determine which
+ *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ *  incoming rx multicast addresses, to determine the bit-vector to check in
+ *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ *  by the MO field of the MCSTCTRL. The MO field is set during initialization
+ *  to mc_filter_type.
+ **/
+STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+	u32 vector = 0;
+
+	switch (hw->mac.mc_filter_type) {
+	case 0:   /* use bits [47:36] of the address */
+		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+		break;
+	case 1:   /* use bits [46:35] of the address */
+		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+		break;
+	case 2:   /* use bits [45:34] of the address */
+		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+		break;
+	case 3:   /* use bits [43:32] of the address */
+		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+		break;
+	default:  /* Invalid mc_filter_type */
+		DEBUGOUT("MC filter type param set incorrectly\n");
+		ASSERT(0);
+		break;
+	}
+
+	/* vector can only be 12-bits or boundary will be exceeded */
+	vector &= 0xFFF;
+	return vector;
+}
+
+STATIC void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
+					u32 *msg, u16 size)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	u32 retmsg[IXGBE_VFMAILBOX_SIZE];
+	s32 retval = mbx->ops.write_posted(hw, msg, size, 0);
+
+	if (!retval)
+		mbx->ops.read_posted(hw, retmsg, size, 0);
+}
+
+/**
+ *  ixgbe_set_rar_vf - set device MAC address
+ *  @hw: pointer to hardware structure
+ *  @index: Receive address register to write
+ *  @addr: Address to put into receive address register
+ *  @vmdq: VMDq "set" or "pool" index
+ *  @enable_addr: set flag that address is active
+ **/
+s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+		     u32 enable_addr)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[3];
+	u8 *msg_addr = (u8 *)(&msgbuf[1]);
+	s32 ret_val;
+	UNREFERENCED_3PARAMETER(vmdq, enable_addr, index);
+
+	memset(msgbuf, 0, 12);
+	msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
+	memcpy(msg_addr, addr, 6);
+	ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0);
+
+	if (!ret_val)
+		ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
+
+	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+	/* if nacked the address was rejected, use "perm_addr" */
+	if (!ret_val &&
+	    (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
+		ixgbe_get_mac_addr_vf(hw, hw->mac.addr);
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_update_mc_addr_list_vf - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *  @next: caller supplied function to return next address in list
+ *
+ *  Updates the Multicast Table Array.
+ **/
+s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+				 u32 mc_addr_count, ixgbe_mc_addr_itr next,
+				 bool clear)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
+	u16 *vector_list = (u16 *)&msgbuf[1];
+	u32 vector;
+	u32 cnt, i;
+	u32 vmdq;
+
+	UNREFERENCED_1PARAMETER(clear);
+
+	DEBUGFUNC("ixgbe_update_mc_addr_list_vf");
+
+	/* Each entry in the list uses 1 16 bit word.  We have 30
+	 * 16 bit words available in our HW msg buffer (minus 1 for the
+	 * msg type).  That's 30 hash values if we pack 'em right.  If
+	 * there are more than 30 MC addresses to add then punt the
+	 * extras for now and then add code to handle more than 30 later.
+	 * It would be unusual for a server to request that many multi-cast
+	 * addresses except for in large enterprise network environments.
+	 */
+
+	DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count);
+
+	cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
+	msgbuf[0] = IXGBE_VF_SET_MULTICAST;
+	msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
+
+	for (i = 0; i < cnt; i++) {
+		vector = ixgbe_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
+		DEBUGOUT1("Hash value = 0x%03X\n", vector);
+		vector_list[i] = (u16)vector;
+	}
+
+	return mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE, 0);
+}
+
+/**
+ *  ixgbe_set_vfta_vf - Set/Unset vlan filter table address
+ *  @hw: pointer to the HW structure
+ *  @vlan: 12 bit VLAN ID
+ *  @vind: unused by VF drivers
+ *  @vlan_on: if true then set bit, else clear bit
+ **/
+s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[2];
+	s32 ret_val;
+	UNREFERENCED_1PARAMETER(vind);
+
+	msgbuf[0] = IXGBE_VF_SET_VLAN;
+	msgbuf[1] = vlan;
+	/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+	msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
+
+	ret_val = mbx->ops.write_posted(hw, msgbuf, 2, 0);
+	if (!ret_val)
+		ret_val = mbx->ops.read_posted(hw, msgbuf, 1, 0);
+
+	if (!ret_val && (msgbuf[0] & IXGBE_VT_MSGTYPE_ACK))
+		return IXGBE_SUCCESS;
+
+	return ret_val | (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK);
+}
+
+/**
+ *  ixgbe_get_num_of_tx_queues_vf - Get number of TX queues
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the number of transmit queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw)
+{
+	UNREFERENCED_1PARAMETER(hw);
+	return IXGBE_VF_MAX_TX_QUEUES;
+}
+
+/**
+ *  ixgbe_get_num_of_rx_queues_vf - Get number of RX queues
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the number of receive queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw)
+{
+	UNREFERENCED_1PARAMETER(hw);
+	return IXGBE_VF_MAX_RX_QUEUES;
+}
+
+/**
+ *  ixgbe_get_mac_addr_vf - Read device MAC address
+ *  @hw: pointer to the HW structure
+ **/
+s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+	int i;
+
+	for (i = 0; i < IXGBE_ETH_LENGTH_OF_ADDRESS; i++)
+		mac_addr[i] = hw->mac.perm_addr[i];
+
+	return IXGBE_SUCCESS;
+}
+
+s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[3];
+	u8 *msg_addr = (u8 *)(&msgbuf[1]);
+	s32 ret_val;
+
+	memset(msgbuf, 0, sizeof(msgbuf));
+	/*
+	 * If index is one then this is the start of a new list and needs
+	 * indication to the PF so it can do it's own list management.
+	 * If it is zero then that tells the PF to just clear all of
+	 * this VF's macvlans and there is no new list.
+	 */
+	msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
+	msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
+	if (addr)
+		memcpy(msg_addr, addr, 6);
+	ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0);
+
+	if (!ret_val)
+		ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
+
+	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+	if (!ret_val)
+		if (msgbuf[0] == (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK))
+			ret_val = IXGBE_ERR_OUT_OF_MEM;
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_setup_mac_link_vf - Setup MAC link settings
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: true if autonegotiation enabled
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ *  Set the link speed in the AUTOC register and restarts link.
+ **/
+s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+			    bool autoneg_wait_to_complete)
+{
+	UNREFERENCED_3PARAMETER(hw, speed, autoneg_wait_to_complete);
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_check_mac_link_vf - Get link/speed status
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @link_up: true is link is up, false otherwise
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ *  Reads the links register to determine if link is up and the current speed
+ **/
+s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+			    bool *link_up, bool autoneg_wait_to_complete)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	struct ixgbe_mac_info *mac = &hw->mac;
+	s32 ret_val = IXGBE_SUCCESS;
+	u32 links_reg;
+	u32 in_msg = 0;
+	UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
+
+	/* If we were hit with a reset drop the link */
+	if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+		mac->get_link_status = true;
+
+	if (!mac->get_link_status)
+		goto out;
+
+	/* if link status is down no point in checking to see if pf is up */
+	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+	if (!(links_reg & IXGBE_LINKS_UP))
+		goto out;
+
+	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+	 * before the link status is correct
+	 */
+	if (mac->type == ixgbe_mac_82599_vf) {
+		int i;
+
+		for (i = 0; i < 5; i++) {
+			usec_delay(100);
+			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+			if (!(links_reg & IXGBE_LINKS_UP))
+				goto out;
+		}
+	}
+
+	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+	case IXGBE_LINKS_SPEED_10G_82599:
+		*speed = IXGBE_LINK_SPEED_10GB_FULL;
+		break;
+	case IXGBE_LINKS_SPEED_1G_82599:
+		*speed = IXGBE_LINK_SPEED_1GB_FULL;
+		break;
+	case IXGBE_LINKS_SPEED_100_82599:
+		*speed = IXGBE_LINK_SPEED_100_FULL;
+		break;
+	}
+
+	/* if the read failed it could just be a mailbox collision, best wait
+	 * until we are called again and don't report an error
+	 */
+	if (mbx->ops.read(hw, &in_msg, 1, 0))
+		goto out;
+
+	if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
+		/* msg is not CTS and is NACK we must have lost CTS status */
+		if (in_msg & IXGBE_VT_MSGTYPE_NACK)
+			ret_val = -1;
+		goto out;
+	}
+
+	/* the pf is talking, if we timed out in the past we reinit */
+	if (!mbx->timeout) {
+		ret_val = -1;
+		goto out;
+	}
+
+	/* if we passed all the tests above then the link is up and we no
+	 * longer need to check for link
+	 */
+	mac->get_link_status = false;
+
+out:
+	*link_up = !mac->get_link_status;
+	return ret_val;
+}
+
+/**
+ *  ixgbevf_rlpml_set_vf - Set the maximum receive packet length
+ *  @hw: pointer to the HW structure
+ *  @max_size: value to assign to max frame size
+ **/
+void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
+{
+	u32 msgbuf[2];
+
+	msgbuf[0] = IXGBE_VF_SET_LPE;
+	msgbuf[1] = max_size;
+	ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
+}
+
+/**
+ *  ixgbevf_negotiate_api_version - Negotiate supported API version
+ *  @hw: pointer to the HW structure
+ *  @api: integer containing requested API version
+ **/
+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
+{
+	int err;
+	u32 msg[3];
+
+	/* Negotiate the mailbox API version */
+	msg[0] = IXGBE_VF_API_NEGOTIATE;
+	msg[1] = api;
+	msg[2] = 0;
+	err = hw->mbx.ops.write_posted(hw, msg, 3, 0);
+
+	if (!err)
+		err = hw->mbx.ops.read_posted(hw, msg, 3, 0);
+
+	if (!err) {
+		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+		/* Store value and return 0 on success */
+		if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
+			hw->api_version = api;
+			return 0;
+		}
+
+		err = IXGBE_ERR_INVALID_ARGUMENT;
+	}
+
+	return err;
+}
+
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+		       unsigned int *default_tc)
+{
+	int err;
+	u32 msg[5];
+
+	/* do nothing if API doesn't support ixgbevf_get_queues */
+	switch (hw->api_version) {
+	case ixgbe_mbox_api_11:
+		break;
+	default:
+		return 0;
+	}
+
+	/* Fetch queue configuration from the PF */
+	msg[0] = IXGBE_VF_GET_QUEUES;
+	msg[1] = msg[2] = msg[3] = msg[4] = 0;
+	err = hw->mbx.ops.write_posted(hw, msg, 5, 0);
+
+	if (!err)
+		err = hw->mbx.ops.read_posted(hw, msg, 5, 0);
+
+	if (!err) {
+		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+		/*
+		 * if we we didn't get an ACK there must have been
+		 * some sort of mailbox error so we should treat it
+		 * as such
+		 */
+		if (msg[0] != (IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK))
+			return IXGBE_ERR_MBX;
+
+		/* record and validate values from message */
+		hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
+		if (hw->mac.max_tx_queues == 0 ||
+		    hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
+			hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
+
+		hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
+		if (hw->mac.max_rx_queues == 0 ||
+		    hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
+			hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+
+		*num_tcs = msg[IXGBE_VF_TRANS_VLAN];
+		/* in case of unknown state assume we cannot tag frames */
+		if (*num_tcs > hw->mac.max_rx_queues)
+			*num_tcs = 1;
+
+		*default_tc = msg[IXGBE_VF_DEF_QUEUE];
+		/* default to queue 0 on out-of-bounds queue number */
+		if (*default_tc >= hw->mac.max_tx_queues)
+			*default_tc = 0;
+	}
+
+	return err;
+}
+
diff --git a/drivers/net/ixgbe/base/ixgbe_vf.h b/drivers/net/ixgbe/base/ixgbe_vf.h
new file mode 100644
index 0000000..ae7d58f
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_vf.h
@@ -0,0 +1,140 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef __IXGBE_VF_H__
+#define __IXGBE_VF_H__
+
+#define IXGBE_VF_IRQ_CLEAR_MASK	7
+#define IXGBE_VF_MAX_TX_QUEUES	8
+#define IXGBE_VF_MAX_RX_QUEUES	8
+
+/* DCB define */
+#define IXGBE_VF_MAX_TRAFFIC_CLASS	8
+
+#define IXGBE_VFCTRL		0x00000
+#define IXGBE_VFSTATUS		0x00008
+#define IXGBE_VFLINKS		0x00010
+#define IXGBE_VFFRTIMER		0x00048
+#define IXGBE_VFRXMEMWRAP	0x03190
+#define IXGBE_VTEICR		0x00100
+#define IXGBE_VTEICS		0x00104
+#define IXGBE_VTEIMS		0x00108
+#define IXGBE_VTEIMC		0x0010C
+#define IXGBE_VTEIAC		0x00110
+#define IXGBE_VTEIAM		0x00114
+#define IXGBE_VTEITR(x)		(0x00820 + (4 * (x)))
+#define IXGBE_VTIVAR(x)		(0x00120 + (4 * (x)))
+#define IXGBE_VTIVAR_MISC	0x00140
+#define IXGBE_VTRSCINT(x)	(0x00180 + (4 * (x)))
+/* define IXGBE_VFPBACL  still says TBD in EAS */
+#define IXGBE_VFRDBAL(x)	(0x01000 + (0x40 * (x)))
+#define IXGBE_VFRDBAH(x)	(0x01004 + (0x40 * (x)))
+#define IXGBE_VFRDLEN(x)	(0x01008 + (0x40 * (x)))
+#define IXGBE_VFRDH(x)		(0x01010 + (0x40 * (x)))
+#define IXGBE_VFRDT(x)		(0x01018 + (0x40 * (x)))
+#define IXGBE_VFRXDCTL(x)	(0x01028 + (0x40 * (x)))
+#define IXGBE_VFSRRCTL(x)	(0x01014 + (0x40 * (x)))
+#define IXGBE_VFRSCCTL(x)	(0x0102C + (0x40 * (x)))
+#define IXGBE_VFPSRTYPE		0x00300
+#define IXGBE_VFTDBAL(x)	(0x02000 + (0x40 * (x)))
+#define IXGBE_VFTDBAH(x)	(0x02004 + (0x40 * (x)))
+#define IXGBE_VFTDLEN(x)	(0x02008 + (0x40 * (x)))
+#define IXGBE_VFTDH(x)		(0x02010 + (0x40 * (x)))
+#define IXGBE_VFTDT(x)		(0x02018 + (0x40 * (x)))
+#define IXGBE_VFTXDCTL(x)	(0x02028 + (0x40 * (x)))
+#define IXGBE_VFTDWBAL(x)	(0x02038 + (0x40 * (x)))
+#define IXGBE_VFTDWBAH(x)	(0x0203C + (0x40 * (x)))
+#define IXGBE_VFDCA_RXCTRL(x)	(0x0100C + (0x40 * (x)))
+#define IXGBE_VFDCA_TXCTRL(x)	(0x0200c + (0x40 * (x)))
+#define IXGBE_VFGPRC		0x0101C
+#define IXGBE_VFGPTC		0x0201C
+#define IXGBE_VFGORC_LSB	0x01020
+#define IXGBE_VFGORC_MSB	0x01024
+#define IXGBE_VFGOTC_LSB	0x02020
+#define IXGBE_VFGOTC_MSB	0x02024
+#define IXGBE_VFMPRC		0x01034
+#define IXGBE_VFMRQC		0x3000
+#define IXGBE_VFRSSRK(x)	(0x3100 + ((x) * 4))
+#define IXGBE_VFRETA(x)	(0x3200 + ((x) * 4))
+
+
+struct ixgbevf_hw_stats {
+	u64 base_vfgprc;
+	u64 base_vfgptc;
+	u64 base_vfgorc;
+	u64 base_vfgotc;
+	u64 base_vfmprc;
+
+	u64 last_vfgprc;
+	u64 last_vfgptc;
+	u64 last_vfgorc;
+	u64 last_vfgotc;
+	u64 last_vfmprc;
+
+	u64 vfgprc;
+	u64 vfgptc;
+	u64 vfgorc;
+	u64 vfgotc;
+	u64 vfmprc;
+
+	u64 saved_reset_vfgprc;
+	u64 saved_reset_vfgptc;
+	u64 saved_reset_vfgorc;
+	u64 saved_reset_vfgotc;
+	u64 saved_reset_vfmprc;
+};
+
+s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw);
+s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+			    bool autoneg_wait_to_complete);
+s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+			    bool *link_up, bool autoneg_wait_to_complete);
+s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+		     u32 enable_addr);
+s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr);
+s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+				 u32 mc_addr_count, ixgbe_mc_addr_itr,
+				 bool clear);
+s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+		       unsigned int *default_tc);
+
+#endif /* __IXGBE_VF_H__ */
diff --git a/drivers/net/ixgbe/base/ixgbe_x540.c b/drivers/net/ixgbe/base/ixgbe_x540.c
new file mode 100644
index 0000000..6e8835d
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_x540.c
@@ -0,0 +1,1040 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_x540.h"
+#include "ixgbe_type.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+#define IXGBE_X540_MAX_TX_QUEUES	128
+#define IXGBE_X540_MAX_RX_QUEUES	128
+#define IXGBE_X540_RAR_ENTRIES		128
+#define IXGBE_X540_MC_TBL_SIZE		128
+#define IXGBE_X540_VFT_TBL_SIZE		128
+#define IXGBE_X540_RX_PB_SIZE		384
+
+STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
+STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
+STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
+
+/**
+ *  ixgbe_init_ops_X540 - Inits func ptrs and MAC type
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the function pointers and assign the MAC type for X540.
+ *  Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mac_info *mac = &hw->mac;
+	struct ixgbe_phy_info *phy = &hw->phy;
+	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+	s32 ret_val;
+
+	DEBUGFUNC("ixgbe_init_ops_X540");
+
+	ret_val = ixgbe_init_phy_ops_generic(hw);
+	ret_val = ixgbe_init_ops_generic(hw);
+
+
+	/* EEPROM */
+	eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
+	eeprom->ops.read = ixgbe_read_eerd_X540;
+	eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_X540;
+	eeprom->ops.write = ixgbe_write_eewr_X540;
+	eeprom->ops.write_buffer = ixgbe_write_eewr_buffer_X540;
+	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X540;
+	eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X540;
+	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X540;
+
+	/* PHY */
+	phy->ops.init = ixgbe_init_phy_ops_generic;
+	phy->ops.reset = NULL;
+	if (!ixgbe_mng_present(hw))
+		phy->ops.set_phy_power = ixgbe_set_copper_phy_power;
+
+	/* MAC */
+	mac->ops.reset_hw = ixgbe_reset_hw_X540;
+	mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
+	mac->ops.get_media_type = ixgbe_get_media_type_X540;
+	mac->ops.get_supported_physical_layer =
+				    ixgbe_get_supported_physical_layer_X540;
+	mac->ops.read_analog_reg8 = NULL;
+	mac->ops.write_analog_reg8 = NULL;
+	mac->ops.start_hw = ixgbe_start_hw_X540;
+	mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
+	mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
+	mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
+	mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
+	mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
+	mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540;
+	mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X540;
+	mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
+	mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
+
+	/* RAR, Multicast, VLAN */
+	mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
+	mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
+	mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
+	mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
+	mac->rar_highwater = 1;
+	mac->ops.set_vfta = ixgbe_set_vfta_generic;
+	mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
+	mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
+	mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
+	mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
+	mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
+
+	/* Link */
+	mac->ops.get_link_capabilities =
+				ixgbe_get_copper_link_capabilities_generic;
+	mac->ops.setup_link = ixgbe_setup_mac_link_X540;
+	mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
+	mac->ops.check_link = ixgbe_check_mac_link_generic;
+
+
+	mac->mcft_size		= IXGBE_X540_MC_TBL_SIZE;
+	mac->vft_size		= IXGBE_X540_VFT_TBL_SIZE;
+	mac->num_rar_entries	= IXGBE_X540_RAR_ENTRIES;
+	mac->rx_pb_size		= IXGBE_X540_RX_PB_SIZE;
+	mac->max_rx_queues	= IXGBE_X540_MAX_RX_QUEUES;
+	mac->max_tx_queues	= IXGBE_X540_MAX_TX_QUEUES;
+	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
+
+	/*
+	 * FWSM register
+	 * ARC supported; valid only if manageability features are
+	 * enabled.
+	 */
+	mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
+				   IXGBE_FWSM_MODE_MASK) ? true : false;
+
+	hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
+
+	/* LEDs */
+	mac->ops.blink_led_start = ixgbe_blink_led_start_X540;
+	mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540;
+
+	/* Manageability interface */
+	mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
+
+	mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_get_link_capabilities_X540 - Determines link capabilities
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @autoneg: true when autoneg or autotry is enabled
+ *
+ *  Determines the link capabilities by reading the AUTOC register.
+ **/
+s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
+				     ixgbe_link_speed *speed,
+				     bool *autoneg)
+{
+	ixgbe_get_copper_link_capabilities_generic(hw, speed, autoneg);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_get_media_type_X540 - Get media type
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the media type (fiber, copper, backplane)
+ **/
+enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
+{
+	UNREFERENCED_1PARAMETER(hw);
+	return ixgbe_media_type_copper;
+}
+
+/**
+ *  ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
+ **/
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
+			      ixgbe_link_speed speed,
+			      bool autoneg_wait_to_complete)
+{
+	DEBUGFUNC("ixgbe_setup_mac_link_X540");
+	return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
+}
+
+/**
+ *  ixgbe_reset_hw_X540 - Perform hardware reset
+ *  @hw: pointer to hardware structure
+ *
+ *  Resets the hardware by resetting the transmit and receive units, masks
+ *  and clears all interrupts, and perform a reset.
+ **/
+s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+{
+	s32 status;
+	u32 ctrl, i;
+
+	DEBUGFUNC("ixgbe_reset_hw_X540");
+
+	/* Call adapter stop to disable tx/rx and clear interrupts */
+	status = hw->mac.ops.stop_adapter(hw);
+	if (status != IXGBE_SUCCESS)
+		goto reset_hw_out;
+
+	/* flush pending Tx transactions */
+	ixgbe_clear_tx_pending(hw);
+
+mac_reset_top:
+	ctrl = IXGBE_CTRL_RST;
+	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* Poll for reset bit to self-clear indicating reset is complete */
+	for (i = 0; i < 10; i++) {
+		usec_delay(1);
+		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+		if (!(ctrl & IXGBE_CTRL_RST_MASK))
+			break;
+	}
+
+	if (ctrl & IXGBE_CTRL_RST_MASK) {
+		status = IXGBE_ERR_RESET_FAILED;
+		ERROR_REPORT1(IXGBE_ERROR_POLLING,
+			     "Reset polling failed to complete.\n");
+	}
+	msec_delay(100);
+
+	/*
+	 * Double resets are required for recovery from certain error
+	 * conditions.  Between resets, it is necessary to stall to allow time
+	 * for any pending HW events to complete.
+	 */
+	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+		goto mac_reset_top;
+	}
+
+	/* Set the Rx packet buffer size. */
+	IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
+
+	/* Store the permanent mac address */
+	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+	/*
+	 * Store MAC address from RAR0, clear receive address registers, and
+	 * clear the multicast table.  Also reset num_rar_entries to 128,
+	 * since we modify this value when programming the SAN MAC address.
+	 */
+	hw->mac.num_rar_entries = 128;
+	hw->mac.ops.init_rx_addrs(hw);
+
+	/* Store the permanent SAN mac address */
+	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
+
+	/* Add the SAN MAC address to the RAR only if it's a valid address */
+	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
+				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+		/* Save the SAN MAC RAR index */
+		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+
+		/* Reserve the last RAR for the SAN MAC address */
+		hw->mac.num_rar_entries--;
+	}
+
+	/* Store the alternative WWNN/WWPN prefix */
+	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+				   &hw->mac.wwpn_prefix);
+
+reset_hw_out:
+	return status;
+}
+
+/**
+ *  ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx
+ *  @hw: pointer to hardware structure
+ *
+ *  Starts the hardware using the generic start_hw function
+ *  and the generation start_hw function.
+ *  Then performs revision-specific operations, if any.
+ **/
+s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
+{
+	s32 ret_val = IXGBE_SUCCESS;
+
+	DEBUGFUNC("ixgbe_start_hw_X540");
+
+	ret_val = ixgbe_start_hw_generic(hw);
+	if (ret_val != IXGBE_SUCCESS)
+		goto out;
+
+	ret_val = ixgbe_start_hw_gen2(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
+{
+	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+	u16 ext_ability = 0;
+
+	DEBUGFUNC("ixgbe_get_supported_physical_layer_X540");
+
+	hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+	IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+	if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+		physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+	if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+		physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+	if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
+		physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+
+	return physical_layer;
+}
+
+/**
+ *  ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
+ *  @hw: pointer to hardware structure
+ *
+ *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ *  ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+{
+	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+	u32 eec;
+	u16 eeprom_size;
+
+	DEBUGFUNC("ixgbe_init_eeprom_params_X540");
+
+	if (eeprom->type == ixgbe_eeprom_uninitialized) {
+		eeprom->semaphore_delay = 10;
+		eeprom->type = ixgbe_flash;
+
+		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+		eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+				    IXGBE_EEC_SIZE_SHIFT);
+		eeprom->word_size = 1 << (eeprom_size +
+					  IXGBE_EEPROM_WORD_SIZE_SHIFT);
+
+		DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
+			  eeprom->type, eeprom->word_size);
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_read_eerd_X540- Read EEPROM word using EERD
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+	s32 status = IXGBE_SUCCESS;
+
+	DEBUGFUNC("ixgbe_read_eerd_X540");
+	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+	    IXGBE_SUCCESS) {
+		status = ixgbe_read_eerd_generic(hw, offset, data);
+		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+	} else {
+		status = IXGBE_ERR_SWFW_SYNC;
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to read
+ *  @words: number of words
+ *  @data: word(s) read from the EEPROM
+ *
+ *  Reads a 16 bit word(s) from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
+				u16 offset, u16 words, u16 *data)
+{
+	s32 status = IXGBE_SUCCESS;
+
+	DEBUGFUNC("ixgbe_read_eerd_buffer_X540");
+	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+	    IXGBE_SUCCESS) {
+		status = ixgbe_read_eerd_buffer_generic(hw, offset,
+							words, data);
+		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+	} else {
+		status = IXGBE_ERR_SWFW_SYNC;
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to write
+ *  @data: word write to the EEPROM
+ *
+ *  Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+	s32 status = IXGBE_SUCCESS;
+
+	DEBUGFUNC("ixgbe_write_eewr_X540");
+	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+	    IXGBE_SUCCESS) {
+		status = ixgbe_write_eewr_generic(hw, offset, data);
+		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+	} else {
+		status = IXGBE_ERR_SWFW_SYNC;
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to write
+ *  @words: number of words
+ *  @data: word(s) write to the EEPROM
+ *
+ *  Write a 16 bit word(s) to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
+				 u16 offset, u16 words, u16 *data)
+{
+	s32 status = IXGBE_SUCCESS;
+
+	DEBUGFUNC("ixgbe_write_eewr_buffer_X540");
+	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+	    IXGBE_SUCCESS) {
+		status = ixgbe_write_eewr_buffer_generic(hw, offset,
+							 words, data);
+		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+	} else {
+		status = IXGBE_ERR_SWFW_SYNC;
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
+ *
+ *  This function does not use synchronization for EERD and EEWR. It can
+ *  be used internally by function which utilize ixgbe_acquire_swfw_sync_X540.
+ *
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns a negative error code on error, or the 16-bit checksum
+ **/
+s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+	u16 i, j;
+	u16 checksum = 0;
+	u16 length = 0;
+	u16 pointer = 0;
+	u16 word = 0;
+	u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM;
+	u16 ptr_start = IXGBE_PCIE_ANALOG_PTR;
+
+	/* Do not use hw->eeprom.ops.read because we do not want to take
+	 * the synchronization semaphores here. Instead use
+	 * ixgbe_read_eerd_generic
+	 */
+
+	DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540");
+
+	/* Include 0x0-0x3F in the checksum */
+	for (i = 0; i <= checksum_last_word; i++) {
+		if (ixgbe_read_eerd_generic(hw, i, &word)) {
+			DEBUGOUT("EEPROM read failed\n");
+			return IXGBE_ERR_EEPROM;
+		}
+		if (i != IXGBE_EEPROM_CHECKSUM)
+			checksum += word;
+	}
+
+	/* Include all data from pointers 0x3, 0x6-0xE.  This excludes the
+	 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
+	 */
+	for (i = ptr_start; i < IXGBE_FW_PTR; i++) {
+		if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
+			continue;
+
+		if (ixgbe_read_eerd_generic(hw, i, &pointer)) {
+			DEBUGOUT("EEPROM read failed\n");
+			return IXGBE_ERR_EEPROM;
+		}
+
+		/* Skip pointer section if the pointer is invalid. */
+		if (pointer == 0xFFFF || pointer == 0 ||
+		    pointer >= hw->eeprom.word_size)
+			continue;
+
+		if (ixgbe_read_eerd_generic(hw, pointer, &length)) {
+			DEBUGOUT("EEPROM read failed\n");
+			return IXGBE_ERR_EEPROM;
+		}
+
+		/* Skip pointer section if length is invalid. */
+		if (length == 0xFFFF || length == 0 ||
+		    (pointer + length) >= hw->eeprom.word_size)
+			continue;
+
+		for (j = pointer + 1; j <= pointer + length; j++) {
+			if (ixgbe_read_eerd_generic(hw, j, &word)) {
+				DEBUGOUT("EEPROM read failed\n");
+				return IXGBE_ERR_EEPROM;
+			}
+			checksum += word;
+		}
+	}
+
+	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+	return (s32)checksum;
+}
+
+/**
+ *  ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum
+ *  @hw: pointer to hardware structure
+ *  @checksum_val: calculated checksum
+ *
+ *  Performs checksum calculation and validates the EEPROM checksum.  If the
+ *  caller does not need checksum_val, the value can be NULL.
+ **/
+s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
+					u16 *checksum_val)
+{
+	s32 status;
+	u16 checksum;
+	u16 read_checksum = 0;
+
+	DEBUGFUNC("ixgbe_validate_eeprom_checksum_X540");
+
+	/* Read the first word from the EEPROM. If this times out or fails, do
+	 * not continue or we could be in for a very long wait while every
+	 * EEPROM read fails
+	 */
+	status = hw->eeprom.ops.read(hw, 0, &checksum);
+	if (status) {
+		DEBUGOUT("EEPROM read failed\n");
+		return status;
+	}
+
+	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+		return IXGBE_ERR_SWFW_SYNC;
+
+	status = hw->eeprom.ops.calc_checksum(hw);
+	if (status < 0)
+		goto out;
+
+	checksum = (u16)(status & 0xffff);
+
+	/* Do not use hw->eeprom.ops.read because we do not want to take
+	 * the synchronization semaphores twice here.
+	 */
+	status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
+					 &read_checksum);
+	if (status)
+		goto out;
+
+	/* Verify read checksum from EEPROM is the same as
+	 * calculated checksum
+	 */
+	if (read_checksum != checksum) {
+		ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+			     "Invalid EEPROM checksum");
+		status = IXGBE_ERR_EEPROM_CHECKSUM;
+	}
+
+	/* If the user cares, return the calculated checksum */
+	if (checksum_val)
+		*checksum_val = checksum;
+
+out:
+	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+	return status;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to shadow RAM using EEWR register, software calculates
+ * checksum and updates the EEPROM and instructs the hardware to update
+ * the flash.
+ **/
+s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+	s32 status;
+	u16 checksum;
+
+	DEBUGFUNC("ixgbe_update_eeprom_checksum_X540");
+
+	/* Read the first word from the EEPROM. If this times out or fails, do
+	 * not continue or we could be in for a very long wait while every
+	 * EEPROM read fails
+	 */
+	status = hw->eeprom.ops.read(hw, 0, &checksum);
+	if (status) {
+		DEBUGOUT("EEPROM read failed\n");
+		return status;
+	}
+
+	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+		return IXGBE_ERR_SWFW_SYNC;
+
+	status = hw->eeprom.ops.calc_checksum(hw);
+	if (status < 0)
+		goto out;
+
+	checksum = (u16)(status & 0xffff);
+
+	/* Do not use hw->eeprom.ops.write because we do not want to
+	 * take the synchronization semaphores twice here.
+	 */
+	status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum);
+	if (status)
+		goto out;
+
+	status = ixgbe_update_flash_X540(hw);
+
+out:
+	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+	return status;
+}
+
+/**
+ *  ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
+ *  @hw: pointer to hardware structure
+ *
+ *  Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
+ *  EEPROM from shadow RAM to the flash device.
+ **/
+s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
+{
+	u32 flup;
+	s32 status;
+
+	DEBUGFUNC("ixgbe_update_flash_X540");
+
+	status = ixgbe_poll_flash_update_done_X540(hw);
+	if (status == IXGBE_ERR_EEPROM) {
+		DEBUGOUT("Flash update time out\n");
+		goto out;
+	}
+
+	flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
+	IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+
+	status = ixgbe_poll_flash_update_done_X540(hw);
+	if (status == IXGBE_SUCCESS)
+		DEBUGOUT("Flash update complete\n");
+	else
+		DEBUGOUT("Flash update time out\n");
+
+	if (hw->mac.type == ixgbe_mac_X540 && hw->revision_id == 0) {
+		flup = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+		if (flup & IXGBE_EEC_SEC1VAL) {
+			flup |= IXGBE_EEC_FLUP;
+			IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+		}
+
+		status = ixgbe_poll_flash_update_done_X540(hw);
+		if (status == IXGBE_SUCCESS)
+			DEBUGOUT("Flash update complete\n");
+		else
+			DEBUGOUT("Flash update time out\n");
+	}
+out:
+	return status;
+}
+
+/**
+ *  ixgbe_poll_flash_update_done_X540 - Poll flash update status
+ *  @hw: pointer to hardware structure
+ *
+ *  Polls the FLUDONE (bit 26) of the EEC Register to determine when the
+ *  flash update is done.
+ **/
+STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
+{
+	u32 i;
+	u32 reg;
+	s32 status = IXGBE_ERR_EEPROM;
+
+	DEBUGFUNC("ixgbe_poll_flash_update_done_X540");
+
+	for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
+		reg = IXGBE_READ_REG(hw, IXGBE_EEC);
+		if (reg & IXGBE_EEC_FLUDONE) {
+			status = IXGBE_SUCCESS;
+			break;
+		}
+		msec_delay(5);
+	}
+
+	if (i == IXGBE_FLUDONE_ATTEMPTS)
+		ERROR_REPORT1(IXGBE_ERROR_POLLING,
+			     "Flash update status polling timed out");
+
+	return status;
+}
+
+/**
+ * ixgbe_set_mux - Set mux for port 1 access with CS4227
+ * @hw: pointer to hardware structure
+ * @state: set mux if 1, clear if 0
+ */
+STATIC void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
+{
+	u32 esdp;
+
+	if (!hw->bus.lan_id)
+		return;
+	esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+	if (state)
+		esdp |= IXGBE_ESDP_SDP1;
+	else
+		esdp &= ~IXGBE_ESDP_SDP1;
+	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+	IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ *  ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
+ *  @hw: pointer to hardware structure
+ *  @mask: Mask to specify which semaphore to acquire
+ *
+ *  Acquires the SWFW semaphore thought the SW_FW_SYNC register for
+ *  the specified function (CSR, PHY0, PHY1, NVM, Flash)
+ **/
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
+{
+	u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK;
+	u32 fwmask = swmask << 5;
+	u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK;
+	u32 timeout = 200;
+	u32 hwmask = 0;
+	u32 swfw_sync;
+	u32 i;
+
+	DEBUGFUNC("ixgbe_acquire_swfw_sync_X540");
+
+	if (swmask & IXGBE_GSSR_EEP_SM)
+		hwmask |= IXGBE_GSSR_FLASH_SM;
+
+	/* SW only mask doesn't have FW bit pair */
+	if (mask & IXGBE_GSSR_SW_MNG_SM)
+		swmask |= IXGBE_GSSR_SW_MNG_SM;
+
+	swmask |= swi2c_mask;
+	fwmask |= swi2c_mask << 2;
+	for (i = 0; i < timeout; i++) {
+		/* SW NVM semaphore bit is used for access to all
+		 * SW_FW_SYNC bits (not just NVM)
+		 */
+		if (ixgbe_get_swfw_sync_semaphore(hw))
+			return IXGBE_ERR_SWFW_SYNC;
+
+		swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+		if (!(swfw_sync & (fwmask | swmask | hwmask))) {
+			swfw_sync |= swmask;
+			IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+			ixgbe_release_swfw_sync_semaphore(hw);
+			msec_delay(5);
+			if (swi2c_mask)
+				ixgbe_set_mux(hw, 1);
+			return IXGBE_SUCCESS;
+		}
+		/* Firmware currently using resource (fwmask), hardware
+		 * currently using resource (hwmask), or other software
+		 * thread currently using resource (swmask)
+		 */
+		ixgbe_release_swfw_sync_semaphore(hw);
+		msec_delay(5);
+	}
+
+	/* Failed to get SW only semaphore */
+	if (swmask == IXGBE_GSSR_SW_MNG_SM) {
+		ERROR_REPORT1(IXGBE_ERROR_POLLING,
+			     "Failed to get SW only semaphore");
+		return IXGBE_ERR_SWFW_SYNC;
+	}
+
+	/* If the resource is not released by the FW/HW the SW can assume that
+	 * the FW/HW malfunctions. In that case the SW should set the SW bit(s)
+	 * of the requested resource(s) while ignoring the corresponding FW/HW
+	 * bits in the SW_FW_SYNC register.
+	 */
+	if (ixgbe_get_swfw_sync_semaphore(hw))
+		return IXGBE_ERR_SWFW_SYNC;
+	swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+	if (swfw_sync & (fwmask | hwmask)) {
+		swfw_sync |= swmask;
+		IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+		ixgbe_release_swfw_sync_semaphore(hw);
+		msec_delay(5);
+		if (swi2c_mask)
+			ixgbe_set_mux(hw, 1);
+		return IXGBE_SUCCESS;
+	}
+	/* If the resource is not released by other SW the SW can assume that
+	 * the other SW malfunctions. In that case the SW should clear all SW
+	 * flags that it does not own and then repeat the whole process once
+	 * again.
+	 */
+	if (swfw_sync & swmask) {
+		u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
+			    IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM;
+
+		if (swi2c_mask)
+			rmask |= IXGBE_GSSR_I2C_MASK;
+		ixgbe_release_swfw_sync_X540(hw, rmask);
+		ixgbe_release_swfw_sync_semaphore(hw);
+		return IXGBE_ERR_SWFW_SYNC;
+	}
+	ixgbe_release_swfw_sync_semaphore(hw);
+
+	return IXGBE_ERR_SWFW_SYNC;
+}
+
+/**
+ *  ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
+ *  @hw: pointer to hardware structure
+ *  @mask: Mask to specify which semaphore to release
+ *
+ *  Releases the SWFW semaphore through the SW_FW_SYNC register
+ *  for the specified function (CSR, PHY0, PHY1, EVM, Flash)
+ **/
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
+{
+	u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM);
+	u32 swfw_sync;
+
+	DEBUGFUNC("ixgbe_release_swfw_sync_X540");
+
+	if (mask & IXGBE_GSSR_I2C_MASK) {
+		swmask |= mask & IXGBE_GSSR_I2C_MASK;
+		ixgbe_set_mux(hw, 0);
+	}
+	ixgbe_get_swfw_sync_semaphore(hw);
+
+	swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+	swfw_sync &= ~swmask;
+	IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+
+	ixgbe_release_swfw_sync_semaphore(hw);
+	msec_delay(5);
+}
+
+/**
+ *  ixgbe_get_swfw_sync_semaphore - Get hardware semaphore
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the hardware semaphores so SW/FW can gain control of shared resources
+ **/
+STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_ERR_EEPROM;
+	u32 timeout = 2000;
+	u32 i;
+	u32 swsm;
+
+	DEBUGFUNC("ixgbe_get_swfw_sync_semaphore");
+
+	/* Get SMBI software semaphore between device drivers first */
+	for (i = 0; i < timeout; i++) {
+		/*
+		 * If the SMBI bit is 0 when we read it, then the bit will be
+		 * set and we have the semaphore
+		 */
+		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+		if (!(swsm & IXGBE_SWSM_SMBI)) {
+			status = IXGBE_SUCCESS;
+			break;
+		}
+		usec_delay(50);
+	}
+
+	/* Now get the semaphore between SW/FW through the REGSMP bit */
+	if (status == IXGBE_SUCCESS) {
+		for (i = 0; i < timeout; i++) {
+			swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+			if (!(swsm & IXGBE_SWFW_REGSMP))
+				break;
+
+			usec_delay(50);
+		}
+
+		/*
+		 * Release semaphores and return error if SW NVM semaphore
+		 * was not granted because we don't have access to the EEPROM
+		 */
+		if (i >= timeout) {
+			ERROR_REPORT1(IXGBE_ERROR_POLLING,
+				"REGSMP Software NVM semaphore not granted.\n");
+			ixgbe_release_swfw_sync_semaphore(hw);
+			status = IXGBE_ERR_EEPROM;
+		}
+	} else {
+		ERROR_REPORT1(IXGBE_ERROR_POLLING,
+			     "Software semaphore SMBI between device drivers "
+			     "not granted.\n");
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_release_swfw_sync_semaphore - Release hardware semaphore
+ *  @hw: pointer to hardware structure
+ *
+ *  This function clears hardware semaphore bits.
+ **/
+STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+	u32 swsm;
+
+	DEBUGFUNC("ixgbe_release_swfw_sync_semaphore");
+
+	/* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
+
+	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+	swsm &= ~IXGBE_SWSM_SMBI;
+	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+
+	swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+	swsm &= ~IXGBE_SWFW_REGSMP;
+	IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
+
+	IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_blink_led_start_X540 - Blink LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to blink
+ *
+ * Devices that implement the version 2 interface:
+ *   X540
+ **/
+s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
+{
+	u32 macc_reg;
+	u32 ledctl_reg;
+	ixgbe_link_speed speed;
+	bool link_up;
+
+	DEBUGFUNC("ixgbe_blink_led_start_X540");
+
+	/*
+	 * Link should be up in order for the blink bit in the LED control
+	 * register to work. Force link and speed in the MAC if link is down.
+	 * This will be reversed when we stop the blinking.
+	 */
+	hw->mac.ops.check_link(hw, &speed, &link_up, false);
+	if (link_up == false) {
+		macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+		macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
+		IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+	}
+	/* Set the LED to LINK_UP + BLINK. */
+	ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+	ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
+	ledctl_reg |= IXGBE_LED_BLINK(index);
+	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to stop blinking
+ *
+ * Devices that implement the version 2 interface:
+ *   X540
+ **/
+s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
+{
+	u32 macc_reg;
+	u32 ledctl_reg;
+
+	DEBUGFUNC("ixgbe_blink_led_stop_X540");
+
+	/* Restore the LED to its default value. */
+	ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+	ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
+	ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
+	ledctl_reg &= ~IXGBE_LED_BLINK(index);
+	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
+
+	/* Unforce link and speed in the MAC. */
+	macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+	macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS);
+	IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return IXGBE_SUCCESS;
+}
+
+
diff --git a/drivers/net/ixgbe/base/ixgbe_x540.h b/drivers/net/ixgbe/base/ixgbe_x540.h
new file mode 100644
index 0000000..bca1dff
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_x540.h
@@ -0,0 +1,66 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_X540_H_
+#define _IXGBE_X540_H_
+
+#include "ixgbe_type.h"
+
+s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
+				     ixgbe_link_speed *speed, bool *autoneg);
+enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+			      bool link_up_wait_to_complete);
+s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
+u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
+
+s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
+s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
+				u16 *data);
+s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
+				 u16 *data);
+s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw);
+s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val);
+s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw);
+s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
+
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+
+s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
+#endif /* _IXGBE_X540_H_ */
+
diff --git a/drivers/net/ixgbe/base/ixgbe_x550.c b/drivers/net/ixgbe/base/ixgbe_x550.c
new file mode 100644
index 0000000..9572697
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_x550.c
@@ -0,0 +1,2113 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_x550.h"
+#include "ixgbe_x540.h"
+#include "ixgbe_type.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+/**
+ *  ixgbe_init_ops_X550 - Inits func ptrs and MAC type
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the function pointers and assign the MAC type for X550.
+ *  Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mac_info *mac = &hw->mac;
+	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+	s32 ret_val;
+
+	DEBUGFUNC("ixgbe_init_ops_X550");
+
+	ret_val = ixgbe_init_ops_X540(hw);
+	mac->ops.dmac_config = ixgbe_dmac_config_X550;
+	mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
+	mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
+	mac->ops.setup_eee = ixgbe_setup_eee_X550;
+	mac->ops.set_source_address_pruning =
+			ixgbe_set_source_address_pruning_X550;
+	mac->ops.set_ethertype_anti_spoofing =
+			ixgbe_set_ethertype_anti_spoofing_X550;
+
+	mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
+	eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
+	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
+	eeprom->ops.read = ixgbe_read_ee_hostif_X550;
+	eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
+	eeprom->ops.write = ixgbe_write_ee_hostif_X550;
+	eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
+	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
+	eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
+
+	mac->ops.disable_mdd = ixgbe_disable_mdd_X550;
+	mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
+	mac->ops.mdd_event = ixgbe_mdd_event_X550;
+	mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
+	mac->ops.disable_rx = ixgbe_disable_rx_x550;
+	return ret_val;
+}
+
+/**
+ * ixgbe_identify_phy_x550em - Get PHY type based on device id
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
+{
+	u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+	switch (hw->device_id) {
+	case IXGBE_DEV_ID_X550EM_X_SFP:
+		/* set up for CS4227 usage */
+		hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+		if (hw->bus.lan_id) {
+
+			esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
+			esdp |= IXGBE_ESDP_SDP1_DIR;
+		}
+		esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
+		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+
+		return ixgbe_identify_module_generic(hw);
+		break;
+	case IXGBE_DEV_ID_X550EM_X_KX4:
+		hw->phy.type = ixgbe_phy_x550em_kx4;
+		break;
+	case IXGBE_DEV_ID_X550EM_X_KR:
+		hw->phy.type = ixgbe_phy_x550em_kr;
+		break;
+	case IXGBE_DEV_ID_X550EM_X_1G_T:
+	case IXGBE_DEV_ID_X550EM_X_10G_T:
+		return ixgbe_identify_phy_generic(hw);
+	default:
+		break;
+	}
+	return IXGBE_SUCCESS;
+}
+
+STATIC s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+				     u32 device_type, u16 *phy_data)
+{
+	UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
+	return IXGBE_NOT_IMPLEMENTED;
+}
+
+STATIC s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+				      u32 device_type, u16 phy_data)
+{
+	UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
+	return IXGBE_NOT_IMPLEMENTED;
+}
+
+/**
+*  ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
+*  @hw: pointer to hardware structure
+*
+*  Initialize the function pointers and for MAC type X550EM.
+*  Does not touch the hardware.
+**/
+s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mac_info *mac = &hw->mac;
+	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+	struct ixgbe_phy_info *phy = &hw->phy;
+	s32 ret_val;
+
+	DEBUGFUNC("ixgbe_init_ops_X550EM");
+
+	/* Similar to X550 so start there. */
+	ret_val = ixgbe_init_ops_X550(hw);
+
+	/* Since this function eventually calls
+	 * ixgbe_init_ops_540 by design, we are setting
+	 * the pointers to NULL explicitly here to overwrite
+	 * the values being set in the x540 function.
+	 */
+	/* Thermal sensor not supported in x550EM */
+	mac->ops.get_thermal_sensor_data = NULL;
+	mac->ops.init_thermal_sensor_thresh = NULL;
+	mac->thermal_sensor_enabled = false;
+
+	/* FCOE not supported in x550EM */
+	mac->ops.get_san_mac_addr = NULL;
+	mac->ops.set_san_mac_addr = NULL;
+	mac->ops.get_wwn_prefix = NULL;
+	mac->ops.get_fcoe_boot_status = NULL;
+
+	/* IPsec not supported in x550EM */
+	mac->ops.disable_sec_rx_path = NULL;
+	mac->ops.enable_sec_rx_path = NULL;
+
+	/* X550EM bus type is internal*/
+	hw->bus.type = ixgbe_bus_type_internal;
+	mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
+
+	mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
+	mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
+	mac->ops.get_media_type = ixgbe_get_media_type_X550em;
+	mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
+	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
+	mac->ops.reset_hw = ixgbe_reset_hw_X550em;
+	mac->ops.get_supported_physical_layer =
+				    ixgbe_get_supported_physical_layer_X550em;
+
+	/* PHY */
+	phy->ops.init = ixgbe_init_phy_ops_X550em;
+	phy->ops.identify = ixgbe_identify_phy_x550em;
+	if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
+		phy->ops.set_phy_power = NULL;
+
+
+	/* EEPROM */
+	eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
+	eeprom->ops.read = ixgbe_read_ee_hostif_X550;
+	eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
+	eeprom->ops.write = ixgbe_write_ee_hostif_X550;
+	eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
+	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
+	eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
+	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_dmac_config_X550
+ *  @hw: pointer to hardware structure
+ *
+ *  Configure DMA coalescing. If enabling dmac, dmac is activated.
+ *  When disabling dmac, dmac enable dmac bit is cleared.
+ **/
+s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
+{
+	u32 reg, high_pri_tc;
+
+	DEBUGFUNC("ixgbe_dmac_config_X550");
+
+	/* Disable DMA coalescing before configuring */
+	reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
+	reg &= ~IXGBE_DMACR_DMAC_EN;
+	IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
+
+	/* Disable DMA Coalescing if the watchdog timer is 0 */
+	if (!hw->mac.dmac_config.watchdog_timer)
+		goto out;
+
+	ixgbe_dmac_config_tcs_X550(hw);
+
+	/* Configure DMA Coalescing Control Register */
+	reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
+
+	/* Set the watchdog timer in units of 40.96 usec */
+	reg &= ~IXGBE_DMACR_DMACWT_MASK;
+	reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
+
+	reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
+	/* If fcoe is enabled, set high priority traffic class */
+	if (hw->mac.dmac_config.fcoe_en) {
+		high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
+		reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
+			IXGBE_DMACR_HIGH_PRI_TC_MASK);
+	}
+	reg |= IXGBE_DMACR_EN_MNG_IND;
+
+	/* Enable DMA coalescing after configuration */
+	reg |= IXGBE_DMACR_DMAC_EN;
+	IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
+
+out:
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_dmac_config_tcs_X550
+ *  @hw: pointer to hardware structure
+ *
+ *  Configure DMA coalescing threshold per TC. The dmac enable bit must
+ *  be cleared before configuring.
+ **/
+s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
+{
+	u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
+
+	DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
+
+	/* Configure DMA coalescing enabled */
+	switch (hw->mac.dmac_config.link_speed) {
+	case IXGBE_LINK_SPEED_100_FULL:
+		pb_headroom = IXGBE_DMACRXT_100M;
+		break;
+	case IXGBE_LINK_SPEED_1GB_FULL:
+		pb_headroom = IXGBE_DMACRXT_1G;
+		break;
+	default:
+		pb_headroom = IXGBE_DMACRXT_10G;
+		break;
+	}
+
+	maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
+			     IXGBE_MHADD_MFS_SHIFT) / 1024);
+
+	/* Set the per Rx packet buffer receive threshold */
+	for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
+		reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
+		reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
+
+		if (tc < hw->mac.dmac_config.num_tcs) {
+			/* Get Rx PB size */
+			rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
+			rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
+				IXGBE_RXPBSIZE_SHIFT;
+
+			/* Calculate receive buffer threshold in kilobytes */
+			if (rx_pb_size > pb_headroom)
+				rx_pb_size = rx_pb_size - pb_headroom;
+			else
+				rx_pb_size = 0;
+
+			/* Minimum of MFS shall be set for DMCTH */
+			reg |= (rx_pb_size > maxframe_size_kb) ?
+				rx_pb_size : maxframe_size_kb;
+		}
+		IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
+	}
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_dmac_update_tcs_X550
+ *  @hw: pointer to hardware structure
+ *
+ *  Disables dmac, updates per TC settings, and then enables dmac.
+ **/
+s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
+{
+	u32 reg;
+
+	DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
+
+	/* Disable DMA coalescing before configuring */
+	reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
+	reg &= ~IXGBE_DMACR_DMAC_EN;
+	IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
+
+	ixgbe_dmac_config_tcs_X550(hw);
+
+	/* Enable DMA coalescing after configuration */
+	reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
+	reg |= IXGBE_DMACR_DMAC_EN;
+	IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
+ *  @hw: pointer to hardware structure
+ *
+ *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ *  ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
+{
+	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+	u32 eec;
+	u16 eeprom_size;
+
+	DEBUGFUNC("ixgbe_init_eeprom_params_X550");
+
+	if (eeprom->type == ixgbe_eeprom_uninitialized) {
+		eeprom->semaphore_delay = 10;
+		eeprom->type = ixgbe_flash;
+
+		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+		eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+				    IXGBE_EEC_SIZE_SHIFT);
+		eeprom->word_size = 1 << (eeprom_size +
+					  IXGBE_EEPROM_WORD_SIZE_SHIFT);
+
+		DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
+			  eeprom->type, eeprom->word_size);
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_setup_eee_X550 - Enable/disable EEE support
+ *  @hw: pointer to the HW structure
+ *  @enable_eee: boolean flag to enable EEE
+ *
+ *  Enable/disable EEE based on enable_eee flag.
+ *  Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C
+ *  are modified.
+ *
+ **/
+s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee)
+{
+	u32 eeer;
+	u16 autoneg_eee_reg;
+	u32 link_reg;
+	s32 status;
+
+	DEBUGFUNC("ixgbe_setup_eee_X550");
+
+	eeer = IXGBE_READ_REG(hw, IXGBE_EEER);
+	/* Enable or disable EEE per flag */
+	if (enable_eee) {
+		eeer |= (IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
+
+		if (hw->device_id == IXGBE_DEV_ID_X550T) {
+			/* Advertise EEE capability */
+			hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
+				IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg);
+
+			autoneg_eee_reg |= (IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
+				IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
+				IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
+
+			hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
+				IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg);
+		} else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
+			status = ixgbe_read_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
+			if (status != IXGBE_SUCCESS)
+				return status;
+
+			link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
+				    IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX;
+
+			status = ixgbe_write_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
+			if (status != IXGBE_SUCCESS)
+				return status;
+		}
+	} else {
+		eeer &= ~(IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
+
+		if (hw->device_id == IXGBE_DEV_ID_X550T) {
+			/* Disable advertised EEE capability */
+			hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
+				IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg);
+
+			autoneg_eee_reg &= ~(IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
+				IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
+				IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
+
+			hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
+				IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg);
+		} else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
+			status = ixgbe_read_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
+			if (status != IXGBE_SUCCESS)
+				return status;
+
+			link_reg &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
+				IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX);
+
+			status = ixgbe_write_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
+			if (status != IXGBE_SUCCESS)
+				return status;
+		}
+	}
+	IXGBE_WRITE_REG(hw, IXGBE_EEER, eeer);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable source address pruning
+ * @pool: Rx pool to set source address pruning for
+ **/
+void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
+					   unsigned int pool)
+{
+	u64 pfflp;
+
+	/* max rx pool is 63 */
+	if (pool > 63)
+		return;
+
+	pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
+	pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
+
+	if (enable)
+		pfflp |= (1ULL << pool);
+	else
+		pfflp &= ~(1ULL << pool);
+
+	IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
+	IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
+}
+
+/**
+ *  ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing
+ *  @hw: pointer to hardware structure
+ *  @enable: enable or disable switch for Ethertype anti-spoofing
+ *  @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
+ *
+ **/
+void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
+		bool enable, int vf)
+{
+	int vf_target_reg = vf >> 3;
+	int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
+	u32 pfvfspoof;
+
+	DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
+
+	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
+	if (enable)
+		pfvfspoof |= (1 << vf_target_shift);
+	else
+		pfvfspoof &= ~(1 << vf_target_shift);
+
+	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
+}
+
+/**
+ *  ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF
+ *  device
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit PHY register to write
+ *  @device_type: 3 bit device type
+ *  @data: Data to write to the register
+ **/
+s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+			    u32 device_type, u32 data)
+{
+	u32 i, command, error;
+
+	command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
+		   (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
+
+	/* Write IOSF control register */
+	IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
+
+	/* Write IOSF data register */
+	IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
+	/*
+	 * Check every 10 usec to see if the address cycle completed.
+	 * The SB IOSF BUSY bit will clear when the operation is
+	 * complete
+	 */
+	for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+		usec_delay(10);
+
+		command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
+		if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
+			break;
+	}
+
+	if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
+		error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
+			 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+		ERROR_REPORT2(IXGBE_ERROR_POLLING,
+			      "Failed to write, error %x\n", error);
+		return IXGBE_ERR_PHY;
+	}
+
+	if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
+		ERROR_REPORT1(IXGBE_ERROR_POLLING, "Write timed out\n");
+		return IXGBE_ERR_PHY;
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF
+ *  device
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit PHY register to write
+ *  @device_type: 3 bit device type
+ *  @phy_data: Pointer to read data from the register
+ **/
+s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+			   u32 device_type, u32 *data)
+{
+	u32 i, command, error;
+
+	command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
+		   (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
+
+	/* Write IOSF control register */
+	IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
+
+	/*
+	 * Check every 10 usec to see if the address cycle completed.
+	 * The SB IOSF BUSY bit will clear when the operation is
+	 * complete
+	 */
+	for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+		usec_delay(10);
+
+		command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
+		if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
+			break;
+	}
+
+	if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
+		error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
+			 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+		ERROR_REPORT2(IXGBE_ERROR_POLLING,
+				"Failed to read, error %x\n", error);
+		return IXGBE_ERR_PHY;
+	}
+
+	if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
+		ERROR_REPORT1(IXGBE_ERROR_POLLING, "Read timed out\n");
+		return IXGBE_ERR_PHY;
+	}
+
+	*data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_disable_mdd_X550
+ *  @hw: pointer to hardware structure
+ *
+ *  Disable malicious driver detection
+ **/
+void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
+{
+	u32 reg;
+
+	DEBUGFUNC("ixgbe_disable_mdd_X550");
+
+	/* Disable MDD for TX DMA and interrupt */
+	reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+	reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
+	IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
+
+	/* Disable MDD for RX and interrupt */
+	reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+	reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
+	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
+}
+
+/**
+ *  ixgbe_enable_mdd_X550
+ *  @hw: pointer to hardware structure
+ *
+ *  Enable malicious driver detection
+ **/
+void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
+{
+	u32 reg;
+
+	DEBUGFUNC("ixgbe_enable_mdd_X550");
+
+	/* Enable MDD for TX DMA and interrupt */
+	reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+	reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
+	IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
+
+	/* Enable MDD for RX and interrupt */
+	reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+	reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
+	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
+}
+
+/**
+ *  ixgbe_restore_mdd_vf_X550
+ *  @hw: pointer to hardware structure
+ *  @vf: vf index
+ *
+ *  Restore VF that was disabled during malicious driver detection event
+ **/
+void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
+{
+	u32 idx, reg, num_qs, start_q, bitmask;
+
+	DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
+
+	/* Map VF to queues */
+	reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+	switch (reg & IXGBE_MRQC_MRQE_MASK) {
+	case IXGBE_MRQC_VMDQRT8TCEN:
+		num_qs = 8;  /* 16 VFs / pools */
+		bitmask = 0x000000FF;
+		break;
+	case IXGBE_MRQC_VMDQRSS32EN:
+	case IXGBE_MRQC_VMDQRT4TCEN:
+		num_qs = 4;  /* 32 VFs / pools */
+		bitmask = 0x0000000F;
+		break;
+	default:            /* 64 VFs / pools */
+		num_qs = 2;
+		bitmask = 0x00000003;
+		break;
+	}
+	start_q = vf * num_qs;
+
+	/* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
+	idx = start_q / 32;
+	reg = 0;
+	reg |= (bitmask << (start_q % 32));
+	IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
+	IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
+}
+
+/**
+ *  ixgbe_mdd_event_X550
+ *  @hw: pointer to hardware structure
+ *  @vf_bitmap: vf bitmap of malicious vfs
+ *
+ *  Handle malicious driver detection event.
+ **/
+void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
+{
+	u32 wqbr;
+	u32 i, j, reg, q, shift, vf, idx;
+
+	DEBUGFUNC("ixgbe_mdd_event_X550");
+
+	/* figure out pool size for mapping to vf's */
+	reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+	switch (reg & IXGBE_MRQC_MRQE_MASK) {
+	case IXGBE_MRQC_VMDQRT8TCEN:
+		shift = 3;  /* 16 VFs / pools */
+		break;
+	case IXGBE_MRQC_VMDQRSS32EN:
+	case IXGBE_MRQC_VMDQRT4TCEN:
+		shift = 2;  /* 32 VFs / pools */
+		break;
+	default:
+		shift = 1;  /* 64 VFs / pools */
+		break;
+	}
+
+	/* Read WQBR_TX and WQBR_RX and check for malicious queues */
+	for (i = 0; i < 4; i++) {
+		wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
+		wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
+
+		if (!wqbr)
+			continue;
+
+		/* Get malicious queue */
+		for (j = 0; j < 32 && wqbr; j++) {
+
+			if (!(wqbr & (1 << j)))
+				continue;
+
+			/* Get queue from bitmask */
+			q = j + (i * 32);
+
+			/* Map queue to vf */
+			vf = (q >> shift);
+
+			/* Set vf bit in vf_bitmap */
+			idx = vf / 32;
+			vf_bitmap[idx] |= (1 << (vf % 32));
+			wqbr &= ~(1 << j);
+		}
+	}
+}
+
+/**
+ *  ixgbe_get_media_type_X550em - Get media type
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the media type (fiber, copper, backplane)
+ */
+enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
+{
+	enum ixgbe_media_type media_type;
+
+	DEBUGFUNC("ixgbe_get_media_type_X550em");
+
+	/* Detect if there is a copper PHY attached. */
+	switch (hw->device_id) {
+	case IXGBE_DEV_ID_X550EM_X_KR:
+	case IXGBE_DEV_ID_X550EM_X_KX4:
+		media_type = ixgbe_media_type_backplane;
+		break;
+	case IXGBE_DEV_ID_X550EM_X_SFP:
+		media_type = ixgbe_media_type_fiber;
+		break;
+	case IXGBE_DEV_ID_X550EM_X_1G_T:
+	case IXGBE_DEV_ID_X550EM_X_10G_T:
+		media_type = ixgbe_media_type_copper;
+		break;
+	default:
+		media_type = ixgbe_media_type_unknown;
+		break;
+	}
+	return media_type;
+}
+
+/**
+ *  ixgbe_setup_sfp_modules_X550em - Setup SFP module
+ *  @hw: pointer to hardware structure
+ */
+s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
+{
+	bool setup_linear;
+	u16 reg_slice, edc_mode;
+	s32 ret_val;
+
+	DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
+
+	switch (hw->phy.sfp_type) {
+	case ixgbe_sfp_type_unknown:
+		return IXGBE_SUCCESS;
+	case ixgbe_sfp_type_not_present:
+		return IXGBE_ERR_SFP_NOT_PRESENT;
+	case ixgbe_sfp_type_da_cu_core0:
+	case ixgbe_sfp_type_da_cu_core1:
+		setup_linear = true;
+		break;
+	case ixgbe_sfp_type_srlr_core0:
+	case ixgbe_sfp_type_srlr_core1:
+	case ixgbe_sfp_type_da_act_lmt_core0:
+	case ixgbe_sfp_type_da_act_lmt_core1:
+	case ixgbe_sfp_type_1g_sx_core0:
+	case ixgbe_sfp_type_1g_sx_core1:
+	case ixgbe_sfp_type_1g_lx_core0:
+	case ixgbe_sfp_type_1g_lx_core1:
+		setup_linear = false;
+		break;
+	default:
+		return IXGBE_ERR_SFP_NOT_SUPPORTED;
+	}
+
+	ixgbe_init_mac_link_ops_X550em(hw);
+	hw->phy.ops.reset = NULL;
+
+	/* The CS4227 slice address is the base address + the port-pair reg
+	 * offset. I.e. Slice 0 = 0x12B0 and slice 1 = 0x22B0.
+	 */
+	reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->bus.lan_id << 12);
+
+	if (setup_linear)
+		edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+	else
+		edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
+
+	/* Configure CS4227 for connection type. */
+	ret_val = ixgbe_write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
+					   edc_mode);
+
+	if (ret_val != IXGBE_SUCCESS)
+		ret_val = ixgbe_write_i2c_combined(hw, 0x80, reg_slice,
+						   edc_mode);
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_init_mac_link_ops_X550em - init mac link function pointers
+ *  @hw: pointer to hardware structure
+ */
+void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mac_info *mac = &hw->mac;
+
+	DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
+
+	/* CS4227 does not support autoneg, so disable the laser control
+	 * functions for SFP+ fiber
+	 */
+	 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
+		mac->ops.disable_tx_laser = NULL;
+		mac->ops.enable_tx_laser = NULL;
+		mac->ops.flap_tx_laser = NULL;
+	 }
+}
+
+/**
+ *  ixgbe_get_link_capabilities_x550em - Determines link capabilities
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @autoneg: true when autoneg or autotry is enabled
+ */
+s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
+				       ixgbe_link_speed *speed,
+				       bool *autoneg)
+{
+	DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
+
+	/* SFP */
+	if (hw->phy.media_type == ixgbe_media_type_fiber) {
+
+		/* CS4227 SFP must not enable auto-negotiation */
+		*autoneg = false;
+
+		/* Check if 1G SFP module. */
+		if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+		    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
+		    || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+		    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
+			*speed = IXGBE_LINK_SPEED_1GB_FULL;
+			return IXGBE_SUCCESS;
+		}
+
+		/* Link capabilities are based on SFP */
+		if (hw->phy.multispeed_fiber)
+			*speed = IXGBE_LINK_SPEED_10GB_FULL |
+				 IXGBE_LINK_SPEED_1GB_FULL;
+		else
+			*speed = IXGBE_LINK_SPEED_10GB_FULL;
+	} else {
+		*speed = IXGBE_LINK_SPEED_10GB_FULL |
+			 IXGBE_LINK_SPEED_1GB_FULL;
+		*autoneg = true;
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_init_phy_ops_X550em - PHY/SFP specific init
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize any function pointers that were not able to be
+ *  set during init_shared_code because the PHY/SFP type was
+ *  not known.  Perform the SFP init if necessary.
+ */
+s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
+{
+	struct ixgbe_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u32 esdp;
+
+	DEBUGFUNC("ixgbe_init_phy_ops_X550em");
+
+	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
+		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+		phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+
+		if (hw->bus.lan_id) {
+			esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
+			esdp |= IXGBE_ESDP_SDP1_DIR;
+		}
+		esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
+		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+	}
+
+	/* Identify the PHY or SFP module */
+	ret_val = phy->ops.identify(hw);
+	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
+		return ret_val;
+
+	/* Setup function pointers based on detected SFP module and speeds */
+	ixgbe_init_mac_link_ops_X550em(hw);
+	if (phy->sfp_type != ixgbe_sfp_type_unknown)
+		phy->ops.reset = NULL;
+
+	/* Set functions pointers based on phy type */
+	switch (hw->phy.type) {
+	case ixgbe_phy_x550em_kx4:
+		phy->ops.setup_link = ixgbe_setup_kx4_x550em;
+		phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
+		phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
+		break;
+	case ixgbe_phy_x550em_kr:
+		phy->ops.setup_link = ixgbe_setup_kr_x550em;
+		phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
+		phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
+		break;
+	case ixgbe_phy_x550em_ext_t:
+		phy->ops.setup_internal_link = ixgbe_setup_internal_phy_x550em;
+		break;
+	default:
+		break;
+	}
+	return ret_val;
+}
+
+/**
+ *  ixgbe_reset_hw_X550em - Perform hardware reset
+ *  @hw: pointer to hardware structure
+ *
+ *  Resets the hardware by resetting the transmit and receive units, masks
+ *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ *  reset.
+ */
+s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
+{
+	ixgbe_link_speed link_speed;
+	s32 status;
+	u32 ctrl = 0;
+	u32 i;
+	bool link_up = false;
+
+	DEBUGFUNC("ixgbe_reset_hw_X550em");
+
+	/* Call adapter stop to disable Tx/Rx and clear interrupts */
+	status = hw->mac.ops.stop_adapter(hw);
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	/* flush pending Tx transactions */
+	ixgbe_clear_tx_pending(hw);
+
+	/* PHY ops must be identified and initialized prior to reset */
+
+	/* Identify PHY and related function pointers */
+	status = hw->phy.ops.init(hw);
+
+	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+		return status;
+
+	/* start the external PHY */
+	if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
+		status = ixgbe_init_ext_t_x550em(hw);
+		if (status)
+			return status;
+	}
+
+	/* Setup SFP module if there is one present. */
+	if (hw->phy.sfp_setup_needed) {
+		status = hw->mac.ops.setup_sfp(hw);
+		hw->phy.sfp_setup_needed = false;
+	}
+
+	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+		return status;
+
+	/* Reset PHY */
+	if (!hw->phy.reset_disable && hw->phy.ops.reset)
+		hw->phy.ops.reset(hw);
+
+mac_reset_top:
+	/* Issue global reset to the MAC.  Needs to be SW reset if link is up.
+	 * If link reset is used when link is up, it might reset the PHY when
+	 * mng is using it.  If link is down or the flag to force full link
+	 * reset is set, then perform link reset.
+	 */
+	ctrl = IXGBE_CTRL_LNK_RST;
+	if (!hw->force_full_reset) {
+		hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+		if (link_up)
+			ctrl = IXGBE_CTRL_RST;
+	}
+
+	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* Poll for reset bit to self-clear meaning reset is complete */
+	for (i = 0; i < 10; i++) {
+		usec_delay(1);
+		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+		if (!(ctrl & IXGBE_CTRL_RST_MASK))
+			break;
+	}
+
+	if (ctrl & IXGBE_CTRL_RST_MASK) {
+		status = IXGBE_ERR_RESET_FAILED;
+		DEBUGOUT("Reset polling failed to complete.\n");
+	}
+
+	msec_delay(50);
+
+	/* Double resets are required for recovery from certain error
+	 * conditions.  Between resets, it is necessary to stall to
+	 * allow time for any pending HW events to complete.
+	 */
+	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+		goto mac_reset_top;
+	}
+
+	/* Store the permanent mac address */
+	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+	/* Store MAC address from RAR0, clear receive address registers, and
+	 * clear the multicast table.  Also reset num_rar_entries to 128,
+	 * since we modify this value when programming the SAN MAC address.
+	 */
+	hw->mac.num_rar_entries = 128;
+	hw->mac.ops.init_rx_addrs(hw);
+
+
+	return status;
+}
+
+/**
+ * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
+ * @hw: pointer to hardware structure
+ */
+s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
+{
+	u32 status;
+	u16 reg;
+	u32 retries = 1;
+
+	/* TODO: The number of attempts and delay between attempts is undefined */
+	do {
+		/* decrement retries counter and exit if we hit 0 */
+		if (retries < 1) {
+			ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+				      "External PHY not yet finished resetting.");
+			return IXGBE_ERR_PHY;
+		}
+		retries--;
+
+		usec_delay(0);
+
+		status = hw->phy.ops.read_reg(hw,
+					      IXGBE_MDIO_TX_VENDOR_ALARMS_3,
+					      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+					      &reg);
+
+		if (status != IXGBE_SUCCESS)
+			return status;
+
+		/* Verify PHY FW reset has completed */
+	} while ((reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) != 1);
+
+	/* Set port to low power mode */
+	status = hw->phy.ops.read_reg(hw,
+				      IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
+				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+				      &reg);
+
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
+
+	status = hw->phy.ops.write_reg(hw,
+				       IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
+				       IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+				       reg);
+
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	/* Enable the transmitter */
+	status = hw->phy.ops.read_reg(hw,
+				      IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
+				      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+				      &reg);
+
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	reg &= ~IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE;
+
+	status = hw->phy.ops.write_reg(hw,
+				       IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
+				       IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+				       reg);
+
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	/* Un-stall the PHY FW */
+	status = hw->phy.ops.read_reg(hw,
+				      IXGBE_MDIO_GLOBAL_RES_PR_10,
+				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+				      &reg);
+
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	reg &= ~IXGBE_MDIO_POWER_UP_STALL;
+
+	status = hw->phy.ops.write_reg(hw,
+				       IXGBE_MDIO_GLOBAL_RES_PR_10,
+				       IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+				       reg);
+
+	return status;
+}
+
+/**
+ *  ixgbe_setup_kr_x550em - Configure the KR PHY.
+ *  @hw: pointer to hardware structure
+ *
+ *  Configures the integrated KR PHY.
+ **/
+s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
+{
+	s32 status;
+	u32 reg_val;
+
+	status = ixgbe_read_iosf_sb_reg_x550(hw,
+		IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+		IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+	if (status)
+		return status;
+
+	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ;
+	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
+	reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
+		     IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
+
+	/* Advertise 10G support. */
+	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+		reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
+
+	/* Advertise 1G support. */
+	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+		reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
+
+	/* Restart auto-negotiation. */
+	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+	status = ixgbe_write_iosf_sb_reg_x550(hw,
+		IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+		IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+	return status;
+}
+
+/**
+ *  ixgbe_setup_kx4_x550em - Configure the KX4 PHY.
+ *  @hw: pointer to hardware structure
+ *
+ *  Configures the integrated KX4 PHY.
+ **/
+s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
+{
+	s32 status;
+	u32 reg_val;
+
+	status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
+		IXGBE_SB_IOSF_TARGET_KX4_PCS0 + hw->bus.lan_id, &reg_val);
+	if (status)
+		return status;
+
+	reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 |
+			IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX);
+
+	reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE;
+
+	/* Advertise 10G support. */
+	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+		reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4;
+
+	/* Advertise 1G support. */
+	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+		reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX;
+
+	/* Restart auto-negotiation. */
+	reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART;
+	status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
+		IXGBE_SB_IOSF_TARGET_KX4_PCS0 + hw->bus.lan_id, reg_val);
+
+	return status;
+}
+
+/**
+ *  ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
+ *  @hw: pointer to hardware structure
+ *  @speed: the link speed to force
+ *
+ *  Configures the integrated KR PHY to use iXFI mode. Used to connect an
+ *  internal and external PHY at a specific speed, without autonegotiation.
+ **/
+STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+{
+	s32 status;
+	u32 reg_val;
+
+	/* Disable AN and force speed to 10G Serial. */
+	status = ixgbe_read_iosf_sb_reg_x550(hw,
+					IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+					IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+	reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+
+	/* Select forced link speed for internal PHY. */
+	switch (*speed) {
+	case IXGBE_LINK_SPEED_10GB_FULL:
+		reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
+		break;
+	case IXGBE_LINK_SPEED_1GB_FULL:
+		reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+		break;
+	default:
+		/* Other link speeds are not supported by internal KR PHY. */
+		return IXGBE_ERR_LINK_SETUP;
+	}
+
+	status = ixgbe_write_iosf_sb_reg_x550(hw,
+					IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+					IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	/* Disable training protocol FSM. */
+	status = ixgbe_read_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+	if (status != IXGBE_SUCCESS)
+		return status;
+	reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
+	status = ixgbe_write_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	/* Disable Flex from training TXFFE. */
+	status = ixgbe_read_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+	if (status != IXGBE_SUCCESS)
+		return status;
+	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
+	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
+	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
+	status = ixgbe_write_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+	if (status != IXGBE_SUCCESS)
+		return status;
+	status = ixgbe_read_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+	if (status != IXGBE_SUCCESS)
+		return status;
+	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
+	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
+	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
+	status = ixgbe_write_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	/* Enable override for coefficients. */
+	status = ixgbe_read_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+	if (status != IXGBE_SUCCESS)
+		return status;
+	reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
+	reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
+	reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
+	reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
+	status = ixgbe_write_iosf_sb_reg_x550(hw,
+				IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
+				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	/* Toggle port SW reset by AN reset. */
+	status = ixgbe_read_iosf_sb_reg_x550(hw,
+					IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+					IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+	if (status != IXGBE_SUCCESS)
+		return status;
+	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+	status = ixgbe_write_iosf_sb_reg_x550(hw,
+					IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+					IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+	return status;
+}
+
+/**
+ * ixgbe_setup_internal_phy_x550em - Configure integrated KR PHY
+ * @hw: point to hardware structure
+ *
+ * Configures the integrated KR PHY to talk to the external PHY. The base
+ * driver will call this function when it gets notification via interrupt from
+ * the external PHY. This function forces the internal PHY into iXFI mode at
+ * the correct speed.
+ *
+ * A return of a non-zero value indicates an error, and the base driver should
+ * not report link up.
+ */
+s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
+{
+	u32 status;
+	u16 lasi, autoneg_status, speed;
+	ixgbe_link_speed force_speed;
+
+	/* Verify that the external link status has changed */
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_XENPAK_LASI_STATUS,
+				      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+				      &lasi);
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	/* If there was no change in link status, we can just exit */
+	if (!(lasi & IXGBE_XENPAK_LASI_LINK_STATUS_ALARM))
+		return IXGBE_SUCCESS;
+
+	/* we read this twice back to back to indicate current status */
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				      &autoneg_status);
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				      &autoneg_status);
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	/* If link is not up return an error indicating treat link as down */
+	if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
+		return IXGBE_ERR_INVALID_LINK_SETTINGS;
+
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				      &speed);
+
+	/* clear everything but the speed and duplex bits */
+	speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
+
+	switch (speed) {
+	case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
+		force_speed = IXGBE_LINK_SPEED_10GB_FULL;
+		break;
+	case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
+		force_speed = IXGBE_LINK_SPEED_1GB_FULL;
+		break;
+	default:
+		/* Internal PHY does not support anything else */
+		return IXGBE_ERR_INVALID_LINK_SETTINGS;
+	}
+
+	return ixgbe_setup_ixfi_x550em(hw, &force_speed);
+}
+
+/**
+ *  ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
+ *  @hw: pointer to hardware structure
+ *
+ *  Configures the integrated KR PHY to use internal loopback mode.
+ **/
+s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
+{
+	s32 status;
+	u32 reg_val;
+
+	/* Disable AN and force speed to 10G Serial. */
+	status = ixgbe_read_iosf_sb_reg_x550(hw,
+		IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+		IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+	if (status != IXGBE_SUCCESS)
+		return status;
+	reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+	reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
+	status = ixgbe_write_iosf_sb_reg_x550(hw,
+		IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+		IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	/* Set near-end loopback clocks. */
+	status = ixgbe_read_iosf_sb_reg_x550(hw,
+		IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
+		IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+	if (status != IXGBE_SUCCESS)
+		return status;
+	reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
+	reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
+	status = ixgbe_write_iosf_sb_reg_x550(hw,
+		IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
+		IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	/* Set loopback enable. */
+	status = ixgbe_read_iosf_sb_reg_x550(hw,
+		IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
+		IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+	if (status != IXGBE_SUCCESS)
+		return status;
+	reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
+	status = ixgbe_write_iosf_sb_reg_x550(hw,
+		IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
+		IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	/* Training bypass. */
+	status = ixgbe_read_iosf_sb_reg_x550(hw,
+		IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+		IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+	if (status != IXGBE_SUCCESS)
+		return status;
+	reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
+	status = ixgbe_write_iosf_sb_reg_x550(hw,
+		IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+		IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+	return status;
+}
+
+/**
+ *  ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
+ *  assuming that the semaphore is already obtained.
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM using the hostif.
+ **/
+s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
+				   u16 *data)
+{
+	s32 status;
+	struct ixgbe_hic_read_shadow_ram buffer;
+
+	DEBUGFUNC("ixgbe_read_ee_hostif_data_X550");
+	buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
+	buffer.hdr.req.buf_lenh = 0;
+	buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
+	buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+	/* convert offset from words to bytes */
+	buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
+	/* one word */
+	buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
+
+	status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+					      sizeof(buffer),
+					      IXGBE_HI_COMMAND_TIMEOUT, false);
+
+	if (status)
+		return status;
+
+	*data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+					  FW_NVM_DATA_OFFSET);
+
+	return 0;
+}
+
+/**
+ *  ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM using the hostif.
+ **/
+s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
+			      u16 *data)
+{
+	s32 status = IXGBE_SUCCESS;
+
+	DEBUGFUNC("ixgbe_read_ee_hostif_X550");
+
+	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+	    IXGBE_SUCCESS) {
+		status = ixgbe_read_ee_hostif_data_X550(hw, offset, data);
+		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+	} else {
+		status = IXGBE_ERR_SWFW_SYNC;
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to read
+ *  @words: number of words
+ *  @data: word(s) read from the EEPROM
+ *
+ *  Reads a 16 bit word(s) from the EEPROM using the hostif.
+ **/
+s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+				     u16 offset, u16 words, u16 *data)
+{
+	struct ixgbe_hic_read_shadow_ram buffer;
+	u32 current_word = 0;
+	u16 words_to_read;
+	s32 status;
+	u32 i;
+
+	DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
+
+	/* Take semaphore for the entire operation. */
+	status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+	if (status) {
+		DEBUGOUT("EEPROM read buffer - semaphore failed\n");
+		return status;
+	}
+	while (words) {
+		if (words > FW_MAX_READ_BUFFER_SIZE / 2)
+			words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
+		else
+			words_to_read = words;
+
+		buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
+		buffer.hdr.req.buf_lenh = 0;
+		buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
+		buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+		/* convert offset from words to bytes */
+		buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
+		buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
+
+		status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+						      sizeof(buffer),
+						      IXGBE_HI_COMMAND_TIMEOUT,
+						      false);
+
+		if (status) {
+			DEBUGOUT("Host interface command failed\n");
+			goto out;
+		}
+
+		for (i = 0; i < words_to_read; i++) {
+			u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
+				  2 * i;
+			u32 value = IXGBE_READ_REG(hw, reg);
+
+			data[current_word] = (u16)(value & 0xffff);
+			current_word++;
+			i++;
+			if (i < words_to_read) {
+				value >>= 16;
+				data[current_word] = (u16)(value & 0xffff);
+				current_word++;
+			}
+		}
+		words -= words_to_read;
+	}
+
+out:
+	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+	return status;
+}
+
+/**
+ *  ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to write
+ *  @data: word write to the EEPROM
+ *
+ *  Write a 16 bit word to the EEPROM using the hostif.
+ **/
+s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
+				    u16 data)
+{
+	s32 status;
+	struct ixgbe_hic_write_shadow_ram buffer;
+
+	DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
+
+	buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
+	buffer.hdr.req.buf_lenh = 0;
+	buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
+	buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+	 /* one word */
+	buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
+	buffer.data = data;
+	buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
+
+	status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+					      sizeof(buffer),
+					      IXGBE_HI_COMMAND_TIMEOUT, false);
+
+	return status;
+}
+
+/**
+ *  ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to write
+ *  @data: word write to the EEPROM
+ *
+ *  Write a 16 bit word to the EEPROM using the hostif.
+ **/
+s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
+			       u16 data)
+{
+	s32 status = IXGBE_SUCCESS;
+
+	DEBUGFUNC("ixgbe_write_ee_hostif_X550");
+
+	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+	    IXGBE_SUCCESS) {
+		status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
+		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+	} else {
+		DEBUGOUT("write ee hostif failed to get semaphore");
+		status = IXGBE_ERR_SWFW_SYNC;
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to write
+ *  @words: number of words
+ *  @data: word(s) write to the EEPROM
+ *
+ *  Write a 16 bit word(s) to the EEPROM using the hostif.
+ **/
+s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+				      u16 offset, u16 words, u16 *data)
+{
+	s32 status = IXGBE_SUCCESS;
+	u32 i = 0;
+
+	DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
+
+	/* Take semaphore for the entire operation. */
+	status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+	if (status != IXGBE_SUCCESS) {
+		DEBUGOUT("EEPROM write buffer - semaphore failed\n");
+		goto out;
+	}
+
+	for (i = 0; i < words; i++) {
+		status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
+							 data[i]);
+
+		if (status != IXGBE_SUCCESS) {
+			DEBUGOUT("Eeprom buffered write failed\n");
+			break;
+		}
+	}
+
+	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+out:
+
+	return status;
+}
+
+/**
+ * ixgbe_checksum_ptr_x550 - Checksum one pointer region
+ * @hw: pointer to hardware structure
+ * @ptr: pointer offset in eeprom
+ * @size: size of section pointed by ptr, if 0 first word will be used as size
+ * @csum: address of checksum to update
+ *
+ * Returns error status for any failure
+ */
+STATIC s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
+				   u16 size, u16 *csum, u16 *buffer,
+				   u32 buffer_size)
+{
+	u16 buf[256];
+	s32 status;
+	u16 length, bufsz, i, start;
+	u16 *local_buffer;
+
+	bufsz = sizeof(buf) / sizeof(buf[0]);
+
+	/* Read a chunk at the pointer location */
+	if (!buffer) {
+		status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
+		if (status) {
+			DEBUGOUT("Failed to read EEPROM image\n");
+			return status;
+		}
+		local_buffer = buf;
+	} else {
+		if (buffer_size < ptr)
+			return  IXGBE_ERR_PARAM;
+		local_buffer = &buffer[ptr];
+	}
+
+	if (size) {
+		start = 0;
+		length = size;
+	} else {
+		start = 1;
+		length = local_buffer[0];
+
+		/* Skip pointer section if length is invalid. */
+		if (length == 0xFFFF || length == 0 ||
+		    (ptr + length) >= hw->eeprom.word_size)
+			return IXGBE_SUCCESS;
+	}
+
+	if (buffer && ((u32)start + (u32)length > buffer_size))
+		return IXGBE_ERR_PARAM;
+
+	for (i = start; length; i++, length--) {
+		if (i == bufsz && !buffer) {
+			ptr += bufsz;
+			i = 0;
+			if (length < bufsz)
+				bufsz = length;
+
+			/* Read a chunk at the pointer location */
+			status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
+								  bufsz, buf);
+			if (status) {
+				DEBUGOUT("Failed to read EEPROM image\n");
+				return status;
+			}
+		}
+		*csum += local_buffer[i];
+	}
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_calc_checksum_X550 - Calculates and returns the checksum
+ *  @hw: pointer to hardware structure
+ *  @buffer: pointer to buffer containing calculated checksum
+ *  @buffer_size: size of buffer
+ *
+ *  Returns a negative error code on error, or the 16-bit checksum
+ **/
+s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
+{
+	u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
+	u16 *local_buffer;
+	s32 status;
+	u16 checksum = 0;
+	u16 pointer, i, size;
+
+	DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
+
+	hw->eeprom.ops.init_params(hw);
+
+	if (!buffer) {
+		/* Read pointer area */
+		status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
+						     IXGBE_EEPROM_LAST_WORD + 1,
+						     eeprom_ptrs);
+		if (status) {
+			DEBUGOUT("Failed to read EEPROM image\n");
+			return status;
+		}
+		local_buffer = eeprom_ptrs;
+	} else {
+		if (buffer_size < IXGBE_EEPROM_LAST_WORD)
+			return IXGBE_ERR_PARAM;
+		local_buffer = buffer;
+	}
+
+	/*
+	 * For X550 hardware include 0x0-0x41 in the checksum, skip the
+	 * checksum word itself
+	 */
+	for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
+		if (i != IXGBE_EEPROM_CHECKSUM)
+			checksum += local_buffer[i];
+
+	/*
+	 * Include all data from pointers 0x3, 0x6-0xE.  This excludes the
+	 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
+	 */
+	for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
+		if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
+			continue;
+
+		pointer = local_buffer[i];
+
+		/* Skip pointer section if the pointer is invalid. */
+		if (pointer == 0xFFFF || pointer == 0 ||
+		    pointer >= hw->eeprom.word_size)
+			continue;
+
+		switch (i) {
+		case IXGBE_PCIE_GENERAL_PTR:
+			size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
+			break;
+		case IXGBE_PCIE_CONFIG0_PTR:
+		case IXGBE_PCIE_CONFIG1_PTR:
+			size = IXGBE_PCIE_CONFIG_SIZE;
+			break;
+		default:
+			size = 0;
+			break;
+		}
+
+		status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
+						buffer, buffer_size);
+		if (status)
+			return status;
+	}
+
+	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+	return (s32)checksum;
+}
+
+/**
+ *  ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns a negative error code on error, or the 16-bit checksum
+ **/
+s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
+{
+	return ixgbe_calc_checksum_X550(hw, NULL, 0);
+}
+
+/**
+ *  ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
+ *  @hw: pointer to hardware structure
+ *  @checksum_val: calculated checksum
+ *
+ *  Performs checksum calculation and validates the EEPROM checksum.  If the
+ *  caller does not need checksum_val, the value can be NULL.
+ **/
+s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+	s32 status;
+	u16 checksum;
+	u16 read_checksum = 0;
+
+	DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
+
+	/* Read the first word from the EEPROM. If this times out or fails, do
+	 * not continue or we could be in for a very long wait while every
+	 * EEPROM read fails
+	 */
+	status = hw->eeprom.ops.read(hw, 0, &checksum);
+	if (status) {
+		DEBUGOUT("EEPROM read failed\n");
+		return status;
+	}
+
+	status = hw->eeprom.ops.calc_checksum(hw);
+	if (status < 0)
+		return status;
+
+	checksum = (u16)(status & 0xffff);
+
+	status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
+					   &read_checksum);
+	if (status)
+		return status;
+
+	/* Verify read checksum from EEPROM is the same as
+	 * calculated checksum
+	 */
+	if (read_checksum != checksum) {
+		status = IXGBE_ERR_EEPROM_CHECKSUM;
+		ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+			     "Invalid EEPROM checksum");
+	}
+
+	/* If the user cares, return the calculated checksum */
+	if (checksum_val)
+		*checksum_val = checksum;
+
+	return status;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to shadow RAM using EEWR register, software calculates
+ * checksum and updates the EEPROM and instructs the hardware to update
+ * the flash.
+ **/
+s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
+{
+	s32 status;
+	u16 checksum = 0;
+
+	DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
+
+	/* Read the first word from the EEPROM. If this times out or fails, do
+	 * not continue or we could be in for a very long wait while every
+	 * EEPROM read fails
+	 */
+	status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
+	if (status) {
+		DEBUGOUT("EEPROM read failed\n");
+		return status;
+	}
+
+	status = ixgbe_calc_eeprom_checksum_X550(hw);
+	if (status < 0)
+		return status;
+
+	checksum = (u16)(status & 0xffff);
+
+	status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
+					    checksum);
+	if (status)
+		return status;
+
+	status = ixgbe_update_flash_X550(hw);
+
+	return status;
+}
+
+/**
+ *  ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
+ *  @hw: pointer to hardware structure
+ *
+ *  Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
+ **/
+s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_SUCCESS;
+	union ixgbe_hic_hdr2 buffer;
+
+	DEBUGFUNC("ixgbe_update_flash_X550");
+
+	buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
+	buffer.req.buf_lenh = 0;
+	buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
+	buffer.req.checksum = FW_DEFAULT_CHECKSUM;
+
+	status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+					      sizeof(buffer),
+					      IXGBE_HI_COMMAND_TIMEOUT, false);
+
+	return status;
+}
+
+/**
+ *  ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
+{
+	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+	u16 ext_ability = 0;
+
+	DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
+
+	hw->phy.ops.identify(hw);
+
+	switch (hw->phy.type) {
+	case ixgbe_phy_x550em_kr:
+		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
+				 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+		break;
+	case ixgbe_phy_x550em_kx4:
+		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
+				 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+		break;
+	case ixgbe_phy_x550em_ext_t:
+		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+				     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+				     &ext_ability);
+		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+		break;
+	default:
+		break;
+	}
+
+	if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
+		physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
+
+	return physical_layer;
+}
+
+/**
+ * ixgbe_get_bus_info_x550em - Set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Sets bus link width and speed to unknown because X550em is
+ * not a PCI device.
+ **/
+s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
+{
+
+	DEBUGFUNC("ixgbe_get_bus_info_x550em");
+
+	hw->bus.width = ixgbe_bus_width_unknown;
+	hw->bus.speed = ixgbe_bus_speed_unknown;
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_disable_rx_x550 - Disable RX unit
+ *
+ * Enables the Rx DMA unit for x550
+ **/
+void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
+{
+	u32 rxctrl, pfdtxgswc;
+	s32 status;
+	struct ixgbe_hic_disable_rxen fw_cmd;
+
+	DEBUGFUNC("ixgbe_enable_rx_dma_x550");
+
+	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+	if (rxctrl & IXGBE_RXCTRL_RXEN) {
+		pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+		if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+			pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+			IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+			hw->mac.set_lben = true;
+		} else {
+			hw->mac.set_lben = false;
+		}
+
+		fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
+		fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
+		fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+		fw_cmd.port_number = (u8)hw->bus.lan_id;
+
+		status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+					sizeof(struct ixgbe_hic_disable_rxen),
+					IXGBE_HI_COMMAND_TIMEOUT, true);
+
+		/* If we fail - disable RX using register write */
+		if (status) {
+			rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+			if (rxctrl & IXGBE_RXCTRL_RXEN) {
+				rxctrl &= ~IXGBE_RXCTRL_RXEN;
+				IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+			}
+		}
+	}
+}
diff --git a/drivers/net/ixgbe/base/ixgbe_x550.h b/drivers/net/ixgbe/base/ixgbe_x550.h
new file mode 100644
index 0000000..1aff9b3
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_x550.h
@@ -0,0 +1,91 @@ 
+/*******************************************************************************
+
+Copyright (c) 2001-2014, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_X550_H_
+#define _IXGBE_X550_H_
+
+#include "ixgbe_type.h"
+
+s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw);
+s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw);
+s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw);
+
+s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw);
+s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw);
+s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw);
+s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size);
+s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val);
+s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw);
+s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+				      u16 offset, u16 words, u16 *data);
+s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
+			       u16 data);
+s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+				     u16 offset, u16 words, u16 *data);
+s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
+u16				*data);
+s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
+				   u16 *data);
+s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
+				    u16 data);
+s32 ixgbe_set_eee_X550(struct ixgbe_hw *hw, bool enable_eee);
+s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee);
+void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
+					   unsigned int pool);
+void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
+					    bool enable, int vf);
+s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+				 u32 device_type, u32 data);
+s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+				u32 device_type, u32 *data);
+void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw);
+void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw);
+void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap);
+void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf);
+enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
+				       ixgbe_link_speed *speed, bool *autoneg);
+void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw);
+s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw);
+s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw);
+s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw);
+s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw);
+s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw);
+u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw);
+void ixgbe_disable_rx_x550(struct ixgbe_hw *hw);
+#endif /* _IXGBE_X550_H_ */
+
diff --git a/drivers/net/ixgbe/ixgbe_82599_bypass.c b/drivers/net/ixgbe/ixgbe_82599_bypass.c
new file mode 100644
index 0000000..76d75fb
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_82599_bypass.c
@@ -0,0 +1,314 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "base/ixgbe_type.h"
+#include "base/ixgbe_82599.h"
+#include "base/ixgbe_api.h"
+#include "base/ixgbe_common.h"
+#include "base/ixgbe_phy.h"
+#include "ixgbe_bypass_defines.h"
+#include "ixgbe_bypass.h"
+
+/**
+ *  ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
+ *  @hw: pointer to hardware structure
+ *  @speed: link speed to set
+ *
+ *  We set the module speed differently for fixed fiber.  For other
+ *  multi-speed devices we don't have an error value so here if we
+ *  detect an error we just log it and exit.
+ */
+static void
+ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
+{
+	s32 status;
+	u8 rs, eeprom_data;
+
+	switch (speed) {
+	case IXGBE_LINK_SPEED_10GB_FULL:
+		/* one bit mask same as setting on */
+		rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
+		break;
+	case IXGBE_LINK_SPEED_1GB_FULL:
+		rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid fixed module speed");
+		return;
+	}
+
+	/* Set RS0 */
+	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+					   IXGBE_I2C_EEPROM_DEV_ADDR2,
+					   &eeprom_data);
+	if (status) {
+		PMD_DRV_LOG(ERR, "Failed to read Rx Rate Select RS0");
+		goto out;
+	}
+
+	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+
+	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+					    IXGBE_I2C_EEPROM_DEV_ADDR2,
+					    eeprom_data);
+	if (status) {
+		PMD_DRV_LOG(ERR, "Failed to write Rx Rate Select RS0");
+		goto out;
+	}
+
+	/* Set RS1 */
+	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+					   IXGBE_I2C_EEPROM_DEV_ADDR2,
+					   &eeprom_data);
+	if (status) {
+		PMD_DRV_LOG(ERR, "Failed to read Rx Rate Select RS1");
+		goto out;
+	}
+
+	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+
+	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+					    IXGBE_I2C_EEPROM_DEV_ADDR2,
+					    eeprom_data);
+	if (status) {
+		PMD_DRV_LOG(ERR, "Failed to write Rx Rate Select RS1");
+		goto out;
+	}
+out:
+	return;
+}
+
+/**
+ *  ixgbe_setup_mac_link_multispeed_fixed_fiber - Set MAC link speed
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ *  Set the link speed in the AUTOC register and restarts link.
+ **/
+static s32
+ixgbe_setup_mac_link_multispeed_fixed_fiber(struct ixgbe_hw *hw,
+				     ixgbe_link_speed speed,
+				     bool autoneg_wait_to_complete)
+{
+	s32 status = IXGBE_SUCCESS;
+	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+	u32 speedcnt = 0;
+	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+	u32 i = 0;
+	bool link_up = false;
+	bool negotiation;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* Mask off requested but non-supported speeds */
+	status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	speed &= link_speed;
+
+	/*
+	 * Try each speed one by one, highest priority first.  We do this in
+	 * software because 10gb fiber doesn't support speed autonegotiation.
+	 */
+	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+		speedcnt++;
+		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+		/* If we already have link at this speed, just jump out */
+		status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+		if (status != IXGBE_SUCCESS)
+			return status;
+
+		if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
+			goto out;
+		/* Set the module link speed */
+		ixgbe_set_fiber_fixed_speed(hw, IXGBE_LINK_SPEED_10GB_FULL);
+
+		/* Set the module link speed */
+		esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
+		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+		IXGBE_WRITE_FLUSH(hw);
+
+		/* Allow module to change analog characteristics (1G->10G) */
+		msec_delay(40);
+
+		status = ixgbe_setup_mac_link_82599(hw,
+						    IXGBE_LINK_SPEED_10GB_FULL,
+						    autoneg_wait_to_complete);
+		if (status != IXGBE_SUCCESS)
+			return status;
+
+		/* Flap the tx laser if it has not already been done */
+		ixgbe_flap_tx_laser(hw);
+
+		/*
+		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
+		 * Section 73.10.2, we may have to wait up to 500ms if KR is
+		 * attempted.  82599 uses the same timing for 10g SFI.
+		 */
+		for (i = 0; i < 5; i++) {
+			/* Wait for the link partner to also set speed */
+			msec_delay(100);
+
+			/* If we have link, just jump out */
+			status = ixgbe_check_link(hw, &link_speed,
+						  &link_up, false);
+			if (status != IXGBE_SUCCESS)
+				return status;
+
+			if (link_up)
+				goto out;
+		}
+	}
+
+	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+		speedcnt++;
+		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
+			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+		/* If we already have link at this speed, just jump out */
+		status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+		if (status != IXGBE_SUCCESS)
+			return status;
+
+		if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
+			goto out;
+
+		/* Set the module link speed */
+		ixgbe_set_fiber_fixed_speed(hw, IXGBE_LINK_SPEED_1GB_FULL);
+
+		/* Allow module to change analog characteristics (10G->1G) */
+		msec_delay(40);
+
+		status = ixgbe_setup_mac_link_82599(hw,
+						    IXGBE_LINK_SPEED_1GB_FULL,
+						    autoneg_wait_to_complete);
+		if (status != IXGBE_SUCCESS)
+			return status;
+
+		/* Flap the tx laser if it has not already been done */
+		ixgbe_flap_tx_laser(hw);
+
+		/* Wait for the link partner to also set speed */
+		msec_delay(100);
+
+		/* If we have link, just jump out */
+		status = ixgbe_check_link(hw, &link_speed, &link_up, false);
+		if (status != IXGBE_SUCCESS)
+			return status;
+
+		if (link_up)
+			goto out;
+	}
+
+	/*
+	 * We didn't get link.  Configure back to the highest speed we tried,
+	 * (if there was more than one).  We call ourselves back with just the
+	 * single highest speed that the user requested.
+	 */
+	if (speedcnt > 1)
+		status = ixgbe_setup_mac_link_multispeed_fixed_fiber(hw,
+			highest_link_speed, autoneg_wait_to_complete);
+
+out:
+	/* Set autoneg_advertised value based on input link speed */
+	hw->phy.autoneg_advertised = 0;
+
+	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+	return status;
+}
+
+static enum ixgbe_media_type
+ixgbe_bypass_get_media_type(struct ixgbe_hw *hw)
+{
+	enum ixgbe_media_type media_type;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+		media_type = ixgbe_media_type_fiber;
+	} else {
+		media_type = ixgbe_get_media_type_82599(hw);
+	}
+	return (media_type);
+}
+
+/*
+ * Wrapper around shared code (base driver) to support BYPASS nic.
+ */
+s32
+ixgbe_bypass_init_shared_code(struct ixgbe_hw *hw)
+{
+	s32 ret_val;
+
+	if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+		hw->mac.type = ixgbe_mac_82599EB;
+	}
+
+	ret_val = ixgbe_init_shared_code(hw);
+	if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+		hw->mac.ops.get_media_type = &ixgbe_bypass_get_media_type;
+		ixgbe_init_mac_link_ops_82599(hw);
+	}
+
+	return ret_val;
+}
+
+s32
+ixgbe_bypass_init_hw(struct ixgbe_hw *hw)
+{
+	int rc;
+
+	if ((rc  = ixgbe_init_hw(hw)) == 0 &&
+			hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+
+		hw->mac.ops.setup_link =
+			&ixgbe_setup_mac_link_multispeed_fixed_fiber;
+
+		hw->mac.ops.get_media_type = &ixgbe_bypass_get_media_type;
+
+		hw->mac.ops.disable_tx_laser = NULL;
+                hw->mac.ops.enable_tx_laser = NULL;
+                hw->mac.ops.flap_tx_laser = NULL;
+	}
+
+	return (rc);
+}
diff --git a/drivers/net/ixgbe/ixgbe_bypass.c b/drivers/net/ixgbe/ixgbe_bypass.c
new file mode 100644
index 0000000..832f415
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_bypass.c
@@ -0,0 +1,414 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <time.h>
+#include <rte_atomic.h>
+#include <rte_ethdev.h>
+#include "ixgbe_ethdev.h"
+#include "ixgbe_bypass_api.h"
+
+#define	BYPASS_STATUS_OFF_MASK	3
+
+/* Macros to check for invlaid function pointers. */
+#define	FUNC_PTR_OR_ERR_RET(func, retval) do {              \
+	if ((func) == NULL) {                               \
+		PMD_DRV_LOG(ERR, "%s:%d function not supported", \
+			    __func__, __LINE__);            \
+		return retval;                            \
+	}                                                   \
+} while(0)
+
+#define	FUNC_PTR_OR_RET(func) do {                          \
+	if ((func) == NULL) {                               \
+		PMD_DRV_LOG(ERR, "%s:%d function not supported", \
+			    __func__, __LINE__);            \
+		return;                                     \
+	}                                                   \
+} while(0)
+
+
+/**
+ *  ixgbe_bypass_set_time - Set bypass FW time epoc.
+ *
+ *  @hw: pointer to hardware structure
+ *
+ *  This function with sync the FW date stamp with that of the
+ *  system clock.
+ **/
+static void
+ixgbe_bypass_set_time(struct ixgbe_adapter *adapter)
+{
+	u32 mask, value;
+	u32 sec;
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	sec = 0;
+
+	/*
+	 * Send the FW our current time and turn on time_valid and
+	 * timer_reset bits.
+	 */
+	mask = BYPASS_CTL1_TIME_M |
+	       BYPASS_CTL1_VALID_M |
+	       BYPASS_CTL1_OFFTRST_M;
+	value = (sec & BYPASS_CTL1_TIME_M) |
+	        BYPASS_CTL1_VALID |
+		BYPASS_CTL1_OFFTRST;
+
+	FUNC_PTR_OR_RET(adapter->bps.ops.bypass_set);
+
+	/* Store FW reset time (in seconds from epoch). */
+	adapter->bps.reset_tm = time(NULL);
+
+	/* reset FW timer. */
+	adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL1, mask, value);
+}
+
+/**
+ * ixgbe_bypass_init - Make some environment changes for bypass
+ *
+ * @adapter: pointer to ixgbe_adapter structure for access to state bits
+ *
+ * This function collects all the modifications needed by the bypass
+ * driver.
+ **/
+void
+ixgbe_bypass_init(struct rte_eth_dev *dev)
+{
+	struct ixgbe_adapter *adapter;
+	struct ixgbe_hw *hw;
+
+	adapter = IXGBE_DEV_TO_ADPATER(dev);
+	hw = &adapter->hw;
+
+	/* Only allow BYPASS ops on the first port */
+	if (hw->device_id != IXGBE_DEV_ID_82599_BYPASS ||
+			hw->bus.func != 0) {
+		PMD_DRV_LOG(ERR, "bypass function is not supported on that device");
+		return;
+	}
+
+	/* set bypass ops. */
+	adapter->bps.ops.bypass_rw = &ixgbe_bypass_rw_generic;
+	adapter->bps.ops.bypass_valid_rd = &ixgbe_bypass_valid_rd_generic;
+	adapter->bps.ops.bypass_set = &ixgbe_bypass_set_generic;
+	adapter->bps.ops.bypass_rd_eep = &ixgbe_bypass_rd_eep_generic;
+
+	/* set the time for logging. */
+	ixgbe_bypass_set_time(adapter);
+
+	/* Don't have the SDP to the laser */
+	hw->mac.ops.disable_tx_laser = NULL;
+	hw->mac.ops.enable_tx_laser = NULL;
+	hw->mac.ops.flap_tx_laser = NULL;
+}
+
+s32
+ixgbe_bypass_state_show(struct rte_eth_dev *dev, u32 *state)
+{
+	struct ixgbe_hw *hw;
+	s32 ret_val;
+	u32 cmd;
+	u32 by_ctl = 0;
+	struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+
+	hw = &adapter->hw;
+	FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
+
+	cmd = BYPASS_PAGE_CTL0;
+	ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl);
+
+	/* Assume bypass_rw didn't error out, if it did state will
+	 * be ignored anyway.
+	 */
+	*state = (by_ctl >> BYPASS_STATUS_OFF_SHIFT) &  BYPASS_STATUS_OFF_MASK;
+
+	return (ret_val);
+}
+
+
+s32
+ixgbe_bypass_state_store(struct rte_eth_dev *dev, u32 *new_state)
+{
+	struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+	struct ixgbe_hw *hw;
+	s32 ret_val;
+
+	hw = &adapter->hw;
+	FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP);
+
+	/* Set the new state */
+	ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
+					 BYPASS_MODE_OFF_M, *new_state);
+	if (ret_val)
+		goto exit;
+
+	/* Set AUTO back on so FW can receive events */
+	ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
+					 BYPASS_MODE_OFF_M, BYPASS_AUTO);
+
+exit:
+	return ret_val;
+
+}
+
+s32
+ixgbe_bypass_event_show(struct rte_eth_dev *dev, u32 event,
+			    u32 *state)
+{
+	struct ixgbe_hw *hw;
+	s32 ret_val;
+	u32 shift;
+	u32 cmd;
+	u32 by_ctl = 0;
+	struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+
+	hw = &adapter->hw;
+	FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
+
+	cmd = BYPASS_PAGE_CTL0;
+	ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl);
+
+	/* Assume bypass_rw didn't error out, if it did event will
+	 * be ignored anyway.
+	 */
+	switch (event) {
+	case BYPASS_EVENT_WDT_TO:
+		shift = BYPASS_WDTIMEOUT_SHIFT;
+		break;
+	case BYPASS_EVENT_MAIN_ON:
+		shift = BYPASS_MAIN_ON_SHIFT;
+		break;
+	case BYPASS_EVENT_MAIN_OFF:
+		shift = BYPASS_MAIN_OFF_SHIFT;
+		break;
+	case BYPASS_EVENT_AUX_ON:
+		shift = BYPASS_AUX_ON_SHIFT;
+		break;
+	case BYPASS_EVENT_AUX_OFF:
+		shift = BYPASS_AUX_OFF_SHIFT;
+		break;
+	default:
+		return EINVAL;
+	}
+
+	*state = (by_ctl >> shift) & 0x3;
+
+	return ret_val;
+}
+
+s32
+ixgbe_bypass_event_store(struct rte_eth_dev *dev, u32 event,
+			     u32 state)
+{
+	struct ixgbe_hw *hw;
+	u32 status;
+	u32 off;
+	s32 ret_val;
+	struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+
+	hw = &adapter->hw;
+	FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP);
+
+	switch (event) {
+	case BYPASS_EVENT_WDT_TO:
+		off = BYPASS_WDTIMEOUT_M;
+		status = state << BYPASS_WDTIMEOUT_SHIFT;
+		break;
+	case BYPASS_EVENT_MAIN_ON:
+		off = BYPASS_MAIN_ON_M;
+		status = state << BYPASS_MAIN_ON_SHIFT;
+		break;
+	case BYPASS_EVENT_MAIN_OFF:
+		off = BYPASS_MAIN_OFF_M;
+		status = state << BYPASS_MAIN_OFF_SHIFT;
+		break;
+	case BYPASS_EVENT_AUX_ON:
+		off = BYPASS_AUX_ON_M;
+		status = state << BYPASS_AUX_ON_SHIFT;
+		break;
+	case BYPASS_EVENT_AUX_OFF:
+		off = BYPASS_AUX_OFF_M;
+		status = state << BYPASS_AUX_OFF_SHIFT;
+		break;
+	default:
+		return EINVAL;
+	}
+
+	ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
+		off, status);
+
+	return ret_val;
+}
+
+s32
+ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout)
+{
+	struct ixgbe_hw *hw;
+        u32 status;
+        u32 mask;
+	s32 ret_val;
+	struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+
+	hw = &adapter->hw;
+	FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP);
+
+	/* disable the timer with timeout of zero */
+	if (timeout == RTE_BYPASS_TMT_OFF) {
+		status = 0x0;   /* WDG enable off */
+		mask = BYPASS_WDT_ENABLE_M;
+	} else {
+		/* set time out value */
+		mask = BYPASS_WDT_VALUE_M;
+
+		/* enable the timer */
+		status = timeout << BYPASS_WDT_TIME_SHIFT;
+		status |= 0x1 << BYPASS_WDT_ENABLE_SHIFT;
+		mask |= BYPASS_WDT_ENABLE_M;
+	}
+
+	ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
+		mask, status);
+
+	return ret_val;
+}
+
+s32
+ixgbe_bypass_ver_show(struct rte_eth_dev *dev, u32 *ver)
+{
+	struct ixgbe_hw *hw;
+	u32 cmd;
+	u32 status;
+	s32 ret_val;
+	struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+
+	hw = &adapter->hw;
+	FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
+
+	cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
+	cmd |= (BYPASS_EEPROM_VER_ADD << BYPASS_CTL2_OFFSET_SHIFT) &
+	       BYPASS_CTL2_OFFSET_M;
+	ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status);
+	if (ret_val)
+		goto exit;
+
+	/* wait for the write to stick */
+	msleep(100);
+
+	/* Now read the results */
+	cmd &= ~BYPASS_WE;
+	ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status);
+	if (ret_val)
+		goto exit;
+
+	*ver = status & BYPASS_CTL2_DATA_M;      /* only one byte of date */
+
+exit:
+	return ret_val;
+}
+
+s32
+ixgbe_bypass_wd_timeout_show(struct rte_eth_dev *dev, u32 *wd_timeout)
+{
+	struct ixgbe_hw *hw;
+	u32 by_ctl = 0;
+	u32 cmd;
+	u32 wdg;
+	s32 ret_val;
+	struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+
+	hw = &adapter->hw;
+	FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
+
+	cmd = BYPASS_PAGE_CTL0;
+	ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl);
+
+	wdg = by_ctl & BYPASS_WDT_ENABLE_M;
+	if (!wdg)
+		*wd_timeout = RTE_BYPASS_TMT_OFF;
+	else
+		*wd_timeout = (by_ctl >> BYPASS_WDT_TIME_SHIFT) &
+			BYPASS_WDT_MASK;
+
+	return ret_val;
+}
+
+s32
+ixgbe_bypass_wd_reset(struct rte_eth_dev *dev)
+{
+	u32 cmd;
+	u32 status;
+	u32 sec;
+	u32 count = 0;
+	s32 ret_val;
+	struct ixgbe_hw *hw;
+	struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
+
+	hw = &adapter->hw;
+
+	FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
+	FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_valid_rd, -ENOTSUP);
+
+	/* Use the lower level bit-bang functions since we don't need
+	 * to read the register first to get it's current state as we
+	 * are setting every thing in this write.
+	 */
+	/* Set up WD pet */
+	cmd = BYPASS_PAGE_CTL1 | BYPASS_WE | BYPASS_CTL1_WDT_PET;
+
+	/* Resync the FW time while writing to CTL1 anyway */
+	adapter->bps.reset_tm = time(NULL);
+	sec = 0;
+
+	cmd |= (sec & BYPASS_CTL1_TIME_M) | BYPASS_CTL1_VALID;
+
+	/* reset FW timer offset since we are resetting the clock */
+	cmd |= BYPASS_CTL1_OFFTRST;
+
+	ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status);
+
+	/* Read until it matches what we wrote, or we time out */
+	do {
+		if (count++ > 10) {
+			ret_val = IXGBE_BYPASS_FW_WRITE_FAILURE;
+			break;
+		}
+
+		if (adapter->bps.ops.bypass_rw(hw, BYPASS_PAGE_CTL1, &status)) {
+			ret_val = IXGBE_ERR_INVALID_ARGUMENT;
+			break;
+		}
+	} while (!adapter->bps.ops.bypass_valid_rd(cmd, status));
+
+	return ret_val;
+}
diff --git a/drivers/net/ixgbe/ixgbe_bypass.h b/drivers/net/ixgbe/ixgbe_bypass.h
new file mode 100644
index 0000000..fcd9774
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_bypass.h
@@ -0,0 +1,68 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IXGBE_BYPASS_H_
+#define _IXGBE_BYPASS_H_
+
+#ifdef RTE_NIC_BYPASS
+
+struct ixgbe_bypass_mac_ops {
+	s32 (*bypass_rw) (struct ixgbe_hw *hw, u32 cmd, u32 *status);
+	bool (*bypass_valid_rd) (u32 in_reg, u32 out_reg);
+	s32 (*bypass_set) (struct ixgbe_hw *hw, u32 cmd, u32 event, u32 action);
+	s32 (*bypass_rd_eep) (struct ixgbe_hw *hw, u32 addr, u8 *value);
+};
+
+struct ixgbe_bypass_info {
+	uint64_t reset_tm;
+	struct ixgbe_bypass_mac_ops ops;
+};
+
+struct rte_eth_dev;
+
+void ixgbe_bypass_init(struct rte_eth_dev *dev);
+s32 ixgbe_bypass_state_show(struct rte_eth_dev *dev, u32 *state);
+s32 ixgbe_bypass_state_store(struct rte_eth_dev *dev, u32 *new_state);
+s32 ixgbe_bypass_event_show(struct rte_eth_dev *dev, u32 event, u32 *state);
+s32 ixgbe_bypass_event_store(struct rte_eth_dev *dev, u32 event, u32 state);
+s32 ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout);
+s32 ixgbe_bypass_ver_show(struct rte_eth_dev *dev, u32 *ver);
+s32 ixgbe_bypass_wd_timeout_show(struct rte_eth_dev *dev, u32 *wd_timeout);
+s32 ixgbe_bypass_wd_reset(struct rte_eth_dev *dev);
+
+s32 ixgbe_bypass_init_shared_code(struct ixgbe_hw *hw);
+s32 ixgbe_bypass_init_hw(struct ixgbe_hw *hw);
+
+#endif /* RTE_NIC_BYPASS */
+
+#endif /*  _IXGBE_BYPASS_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_bypass_api.h b/drivers/net/ixgbe/ixgbe_bypass_api.h
new file mode 100644
index 0000000..b4a7386
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_bypass_api.h
@@ -0,0 +1,299 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IXGBE_BYPASS_API_H_
+#define _IXGBE_BYPASS_API_H_
+
+#ifdef RTE_NIC_BYPASS
+
+#include "ixgbe_bypass_defines.h"
+/**
+ *  ixgbe_bypass_rw_generic - Bit bang data into by_pass FW
+ *
+ *  @hw: pointer to hardware structure
+ *  @cmd: Command we send to the FW
+ *  @status: The reply from the FW
+ *
+ *  Bit-bangs the cmd to the by_pass FW status points to what is returned.
+ **/
+#define IXGBE_BYPASS_BB_WAIT 1
+static s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status)
+{
+	int i;
+	u32 sck, sdi, sdo, dir_sck, dir_sdi, dir_sdo;
+	u32 esdp;
+
+	if (!status)
+		return IXGBE_ERR_PARAM;
+
+	*status = 0;
+
+	/* SDP vary by MAC type */
+	switch (hw->mac.type) {
+	case ixgbe_mac_82599EB:
+		sck = IXGBE_ESDP_SDP7;
+		sdi = IXGBE_ESDP_SDP0;
+		sdo = IXGBE_ESDP_SDP6;
+		dir_sck = IXGBE_ESDP_SDP7_DIR;
+		dir_sdi = IXGBE_ESDP_SDP0_DIR;
+		dir_sdo = IXGBE_ESDP_SDP6_DIR;
+		break;
+	case ixgbe_mac_X540:
+		sck = IXGBE_ESDP_SDP2;
+		sdi = IXGBE_ESDP_SDP0;
+		sdo = IXGBE_ESDP_SDP1;
+		dir_sck = IXGBE_ESDP_SDP2_DIR;
+		dir_sdi = IXGBE_ESDP_SDP0_DIR;
+		dir_sdo = IXGBE_ESDP_SDP1_DIR;
+		break;
+	case ixgbe_mac_X550:
+	case ixgbe_mac_X550EM_x:
+		sck = IXGBE_ESDP_SDP2;
+		sdi = IXGBE_ESDP_SDP0;
+		sdo = IXGBE_ESDP_SDP1;
+		dir_sck = IXGBE_ESDP_SDP2_DIR;
+		dir_sdi = IXGBE_ESDP_SDP0_DIR;
+		dir_sdo = IXGBE_ESDP_SDP1_DIR;
+		break;
+	default:
+		return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+	}
+
+	/* Set SDP pins direction */
+	esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+	esdp |= dir_sck;	/* SCK as output */
+	esdp |= dir_sdi;	/* SDI as output */
+	esdp &= ~dir_sdo;	/* SDO as input */
+	esdp |= sck;
+	esdp |= sdi;
+	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+	IXGBE_WRITE_FLUSH(hw);
+  //  TODO:
+	msleep(IXGBE_BYPASS_BB_WAIT);
+
+	/* Generate start condition */
+	esdp &= ~sdi;
+	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+	IXGBE_WRITE_FLUSH(hw);
+	msleep(IXGBE_BYPASS_BB_WAIT);
+
+	esdp &= ~sck;
+	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+	IXGBE_WRITE_FLUSH(hw);
+	msleep(IXGBE_BYPASS_BB_WAIT);
+
+	/* Clock out the new control word and clock in the status */
+	for (i = 0; i < 32; i++) {
+		if ((cmd >> (31 - i)) & 0x01) {
+			esdp |= sdi;
+			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+		} else {
+			esdp &= ~sdi;
+			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+		}
+		IXGBE_WRITE_FLUSH(hw);
+		msleep(IXGBE_BYPASS_BB_WAIT);
+
+		esdp |= sck;
+		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+		IXGBE_WRITE_FLUSH(hw);
+		msleep(IXGBE_BYPASS_BB_WAIT);
+
+		esdp &= ~sck;
+		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+		IXGBE_WRITE_FLUSH(hw);
+		msleep(IXGBE_BYPASS_BB_WAIT);
+
+		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+		if (esdp & sdo)
+			*status = (*status << 1) | 0x01;
+		else
+			*status = (*status << 1) | 0x00;
+		msleep(IXGBE_BYPASS_BB_WAIT);
+	}
+
+	/* stop condition */
+	esdp |= sck;
+	esdp &= ~sdi;
+	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+	IXGBE_WRITE_FLUSH(hw);
+	msleep(IXGBE_BYPASS_BB_WAIT);
+
+	esdp |= sdi;
+	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* set the page bits to match the cmd that the status it belongs to */
+	*status = (*status & 0x3fffffff) | (cmd & 0xc0000000);
+
+	return 0;
+}
+
+/**
+ * ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang.
+ *
+ * If we send a write we can't be sure it took until we can read back
+ * that same register.  It can be a problem as some of the feilds may
+ * for valid reasons change between the time wrote the register and
+ * we read it again to verify.  So this function check everything we
+ * can check and then assumes it worked.
+ *
+ * @u32 in_reg - The register cmd for the bit-bang read.
+ * @u32 out_reg - The register returned from a bit-bang read.
+ **/
+static bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg)
+{
+	u32 mask;
+
+	/* Page must match for all control pages */
+	if ((in_reg & BYPASS_PAGE_M) != (out_reg & BYPASS_PAGE_M))
+		return false;
+
+	switch (in_reg & BYPASS_PAGE_M) {
+	case BYPASS_PAGE_CTL0:
+		/* All the following can't change since the last write
+		 *  - All the event actions
+		 *  - The timeout value
+		 */
+		mask = BYPASS_AUX_ON_M | BYPASS_MAIN_ON_M |
+		       BYPASS_MAIN_OFF_M | BYPASS_AUX_OFF_M |
+		       BYPASS_WDTIMEOUT_M |
+		       BYPASS_WDT_VALUE_M;
+		if ((out_reg & mask) != (in_reg & mask))
+			return false;
+
+		/* 0x0 is never a valid value for bypass status */
+		if (!(out_reg & BYPASS_STATUS_OFF_M))
+			return false;
+		break;
+	case BYPASS_PAGE_CTL1:
+		/* All the following can't change since the last write
+		 *  - time valid bit
+		 *  - time we last sent
+		 */
+		mask = BYPASS_CTL1_VALID_M | BYPASS_CTL1_TIME_M;
+		if ((out_reg & mask) != (in_reg & mask))
+			return false;
+		break;
+	case BYPASS_PAGE_CTL2:
+		/* All we can check in this page is control number
+		 * which is already done above.
+		 */
+		break;
+	}
+
+	/* We are as sure as we can be return true */
+	return true;
+}
+
+/**
+ *  ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter.
+ *
+ *  @hw: pointer to hardware structure
+ *  @cmd: The control word we are setting.
+ *  @event: The event we are setting in the FW.  This also happens to
+ *	    be the mask for the event we are setting (handy)
+ *  @action: The action we set the event to in the FW. This is in a
+ *	     bit field that happens to be what we want to put in
+ *	     the event spot (also handy)
+ **/
+static s32 ixgbe_bypass_set_generic(struct ixgbe_hw *hw, u32 ctrl, u32 event,
+			     u32 action)
+{
+	u32 by_ctl = 0;
+	u32 cmd, verify;
+	u32 count = 0;
+
+	/* Get current values */
+	cmd = ctrl;	/* just reading only need control number */
+	if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
+		return IXGBE_ERR_INVALID_ARGUMENT;
+
+	/* Set to new action */
+	cmd = (by_ctl & ~event) | BYPASS_WE | action;
+	if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
+		return IXGBE_ERR_INVALID_ARGUMENT;
+
+	/* Page 0 force a FW eeprom write which is slow so verify */
+	if ((cmd & BYPASS_PAGE_M) == BYPASS_PAGE_CTL0) {
+		verify = BYPASS_PAGE_CTL0;
+		do {
+			if (count++ > 5)
+				return IXGBE_BYPASS_FW_WRITE_FAILURE;
+
+			if (ixgbe_bypass_rw_generic(hw, verify, &by_ctl))
+				return IXGBE_ERR_INVALID_ARGUMENT;
+		} while (!ixgbe_bypass_valid_rd_generic(cmd, by_ctl));
+	} else {
+		/* We have give the FW time for the write to stick */
+		msleep(100);
+	}
+
+	return 0;
+}
+
+/**
+ *  ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom address.
+ *
+ *  @hw: pointer to hardware structure
+ *  @addr: The bypass eeprom address to read.
+ *  @value: The 8b of data at the address above.
+ **/
+static s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value)
+{
+	u32 cmd;
+	u32 status;
+
+
+	/* send the request */
+	cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
+	cmd |= (addr << BYPASS_CTL2_OFFSET_SHIFT) & BYPASS_CTL2_OFFSET_M;
+	if (ixgbe_bypass_rw_generic(hw, cmd, &status))
+		return IXGBE_ERR_INVALID_ARGUMENT;
+
+	/* We have give the FW time for the write to stick */
+	msleep(100);
+
+	/* now read the results */
+	cmd &= ~BYPASS_WE;
+	if (ixgbe_bypass_rw_generic(hw, cmd, &status))
+		return IXGBE_ERR_INVALID_ARGUMENT;
+
+	*value = status & BYPASS_CTL2_DATA_M;
+
+	return 0;
+}
+
+#endif /* RTE_NIC_BYPASS */
+
+#endif /* _IXGBE_BYPASS_API_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_bypass_defines.h b/drivers/net/ixgbe/ixgbe_bypass_defines.h
new file mode 100644
index 0000000..22570ac
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_bypass_defines.h
@@ -0,0 +1,160 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IXGBE_BYPASS_DEFINES_H_
+#define _IXGBE_BYPASS_DEFINES_H_
+
+#ifdef RTE_NIC_BYPASS
+
+#define msleep(x)             rte_delay_us(x*1000)
+#define usleep_range(min, max) rte_delay_us(min)
+
+#define BYPASS_PAGE_CTL0	0x00000000
+#define BYPASS_PAGE_CTL1	0x40000000
+#define BYPASS_PAGE_CTL2	0x80000000
+#define BYPASS_PAGE_M		0xc0000000
+#define BYPASS_WE		0x20000000
+
+#define BYPASS_AUTO	0x0
+#define BYPASS_NOP	0x0
+#define BYPASS_NORM	0x1
+#define BYPASS_BYPASS	0x2
+#define BYPASS_ISOLATE	0x3
+
+#define BYPASS_EVENT_MAIN_ON	0x1
+#define BYPASS_EVENT_AUX_ON	0x2
+#define BYPASS_EVENT_MAIN_OFF	0x3
+#define BYPASS_EVENT_AUX_OFF	0x4
+#define BYPASS_EVENT_WDT_TO	0x5
+#define BYPASS_EVENT_USR	0x6
+
+#define BYPASS_MODE_OFF_M	0x00000003
+#define BYPASS_STATUS_OFF_M	0x0000000c
+#define BYPASS_AUX_ON_M		0x00000030
+#define BYPASS_MAIN_ON_M	0x000000c0
+#define BYPASS_MAIN_OFF_M	0x00000300
+#define BYPASS_AUX_OFF_M	0x00000c00
+#define BYPASS_WDTIMEOUT_M	0x00003000
+#define BYPASS_WDT_ENABLE_M	0x00004000
+#define BYPASS_WDT_VALUE_M	0x00070000
+
+#define BYPASS_MODE_OFF_SHIFT	0
+#define BYPASS_STATUS_OFF_SHIFT	2
+#define BYPASS_AUX_ON_SHIFT	4
+#define BYPASS_MAIN_ON_SHIFT	6
+#define BYPASS_MAIN_OFF_SHIFT	8
+#define BYPASS_AUX_OFF_SHIFT	10
+#define BYPASS_WDTIMEOUT_SHIFT	12
+#define BYPASS_WDT_ENABLE_SHIFT	14
+#define BYPASS_WDT_TIME_SHIFT	16
+
+#define BYPASS_WDT_1	0x0
+#define BYPASS_WDT_1_5	0x1
+#define BYPASS_WDT_2	0x2
+#define BYPASS_WDT_3	0x3
+#define BYPASS_WDT_4	0x4
+#define BYPASS_WDT_8	0x5
+#define BYPASS_WDT_16	0x6
+#define BYPASS_WDT_32	0x7
+#define BYPASS_WDT_OFF	0xffff
+
+#define BYPASS_WDT_MASK	0x7
+
+#define BYPASS_CTL1_TIME_M	0x01ffffff
+#define BYPASS_CTL1_VALID_M	0x02000000
+#define BYPASS_CTL1_OFFTRST_M	0x04000000
+#define BYPASS_CTL1_WDT_PET_M	0x08000000
+
+#define BYPASS_CTL1_VALID	0x02000000
+#define BYPASS_CTL1_OFFTRST	0x04000000
+#define BYPASS_CTL1_WDT_PET	0x08000000
+
+#define BYPASS_CTL2_DATA_M	0x000000ff
+#define BYPASS_CTL2_OFFSET_M	0x0000ff00
+#define BYPASS_CTL2_RW_M	0x00010000
+#define BYPASS_CTL2_HEAD_M	0x0ff00000
+
+#define BYPASS_CTL2_OFFSET_SHIFT	8
+#define BYPASS_CTL2_HEAD_SHIFT		20
+
+#define BYPASS_CTL2_RW		0x00010000
+
+enum ixgbe_state_t {
+	__IXGBE_TESTING,
+	__IXGBE_RESETTING,
+	__IXGBE_DOWN,
+	__IXGBE_SERVICE_SCHED,
+	__IXGBE_IN_SFP_INIT,
+	__IXGBE_IN_BYPASS_LOW,
+	__IXGBE_IN_BYPASS_HIGH,
+	__IXGBE_IN_BYPASS_LOG,
+};
+
+#define BYPASS_MAX_LOGS		43
+#define BYPASS_LOG_SIZE		5
+#define BYPASS_LOG_LINE_SIZE	37
+
+#define BYPASS_EEPROM_VER_ADD	0x02
+
+#define BYPASS_LOG_TIME_M	0x01ffffff
+#define BYPASS_LOG_TIME_VALID_M	0x02000000
+#define BYPASS_LOG_HEAD_M	0x04000000
+#define BYPASS_LOG_CLEAR_M	0x08000000
+#define BYPASS_LOG_EVENT_M	0xf0000000
+#define BYPASS_LOG_ACTION_M	0x03
+
+#define BYPASS_LOG_EVENT_SHIFT	28
+#define BYPASS_LOG_CLEAR_SHIFT	24 /* bit offset */
+#define IXGBE_DEV_TO_ADPATER(dev) \
+	((struct ixgbe_adapter*)(dev->data->dev_private))
+
+/* extractions from ixgbe_phy.h */
+#define	IXGBE_I2C_EEPROM_DEV_ADDR2	0xA2
+
+#define IXGBE_SFF_SFF_8472_SWAP		0x5C
+#define IXGBE_SFF_SFF_8472_COMP		0x5E
+#define IXGBE_SFF_SFF_8472_OSCB		0x6E
+#define IXGBE_SFF_SFF_8472_ESCB		0x76
+
+#define IXGBE_SFF_SOFT_RS_SELECT_MASK	0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_10G	0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_1G	0x0
+
+/* extractions from ixgbe_type.h */
+#define IXGBE_DEV_ID_82599_BYPASS	0x155D
+
+#define IXGBE_BYPASS_FW_WRITE_FAILURE	-35
+
+#endif /* RTE_NIC_BYPASS */
+
+#endif /* _IXGBE_BYPASS_DEFINES_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
new file mode 100644
index 0000000..0d9f9b2
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -0,0 +1,4453 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+
+#include "ixgbe_logs.h"
+#include "base/ixgbe_api.h"
+#include "base/ixgbe_vf.h"
+#include "base/ixgbe_common.h"
+#include "ixgbe_ethdev.h"
+#include "ixgbe_bypass.h"
+#include "ixgbe_rxtx.h"
+
+/*
+ * High threshold controlling when to start sending XOFF frames. Must be at
+ * least 8 bytes less than receive packet buffer size. This value is in units
+ * of 1024 bytes.
+ */
+#define IXGBE_FC_HI    0x80
+
+/*
+ * Low threshold controlling when to start sending XON frames. This value is
+ * in units of 1024 bytes.
+ */
+#define IXGBE_FC_LO    0x40
+
+/* Timer value included in XOFF frames. */
+#define IXGBE_FC_PAUSE 0x680
+
+#define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
+#define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
+#define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
+
+#define IXGBE_MMW_SIZE_DEFAULT        0x4
+#define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
+
+/*
+ *  Default values for RX/TX configuration
+ */
+#define IXGBE_DEFAULT_RX_FREE_THRESH  32
+#define IXGBE_DEFAULT_RX_PTHRESH      8
+#define IXGBE_DEFAULT_RX_HTHRESH      8
+#define IXGBE_DEFAULT_RX_WTHRESH      0
+
+#define IXGBE_DEFAULT_TX_FREE_THRESH  32
+#define IXGBE_DEFAULT_TX_PTHRESH      32
+#define IXGBE_DEFAULT_TX_HTHRESH      0
+#define IXGBE_DEFAULT_TX_WTHRESH      0
+#define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
+
+/* Bit shift and mask */
+#define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
+#define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
+#define IXGBE_8_BIT_WIDTH  CHAR_BIT
+#define IXGBE_8_BIT_MASK   UINT8_MAX
+
+#define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
+
+#define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
+
+static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
+static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
+static int  ixgbe_dev_start(struct rte_eth_dev *dev);
+static void ixgbe_dev_stop(struct rte_eth_dev *dev);
+static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
+static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
+static void ixgbe_dev_close(struct rte_eth_dev *dev);
+static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
+				int wait_to_complete);
+static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
+				struct rte_eth_stats *stats);
+static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
+static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
+					     uint16_t queue_id,
+					     uint8_t stat_idx,
+					     uint8_t is_rx);
+static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
+			       struct rte_eth_dev_info *dev_info);
+static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
+				 struct rte_eth_dev_info *dev_info);
+static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
+static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
+		uint16_t vlan_id, int on);
+static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
+static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
+		uint16_t queue, bool on);
+static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
+		int on);
+static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
+static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
+static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
+static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
+
+static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
+static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
+static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
+			       struct rte_eth_fc_conf *fc_conf);
+static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
+			       struct rte_eth_fc_conf *fc_conf);
+static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
+		struct rte_eth_pfc_conf *pfc_conf);
+static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_reta_entry64 *reta_conf,
+			uint16_t reta_size);
+static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
+			struct rte_eth_rss_reta_entry64 *reta_conf,
+			uint16_t reta_size);
+static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
+static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
+static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
+static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
+		void *param);
+static void ixgbe_dev_interrupt_delayed_handler(void *param);
+static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+		uint32_t index, uint32_t pool);
+static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
+static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
+
+/* For Virtual Function support */
+static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
+static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
+static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
+static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
+static void ixgbevf_dev_close(struct rte_eth_dev *dev);
+static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
+static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
+		struct rte_eth_stats *stats);
+static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
+static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
+		uint16_t vlan_id, int on);
+static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
+		uint16_t queue, int on);
+static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
+
+/* For Eth VMDQ APIs support */
+static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
+		ether_addr* mac_addr,uint8_t on);
+static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
+static int  ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev,  uint16_t pool,
+		uint16_t rx_mask, uint8_t on);
+static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
+static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
+static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
+		uint64_t pool_mask,uint8_t vlan_on);
+static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
+		struct rte_eth_vmdq_mirror_conf *mirror_conf,
+		uint8_t rule_id, uint8_t on);
+static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
+		uint8_t	rule_id);
+
+static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+		uint16_t queue_idx, uint16_t tx_rate);
+static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+		uint16_t tx_rate, uint64_t q_msk);
+
+static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
+				 struct ether_addr *mac_addr,
+				 uint32_t index, uint32_t pool);
+static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
+static int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
+			struct rte_eth_syn_filter *filter,
+			bool add);
+static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
+			struct rte_eth_syn_filter *filter);
+static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
+			enum rte_filter_op filter_op,
+			void *arg);
+static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
+			struct ixgbe_5tuple_filter *filter);
+static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+			struct ixgbe_5tuple_filter *filter);
+static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
+			struct rte_eth_ntuple_filter *filter,
+			bool add);
+static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
+				enum rte_filter_op filter_op,
+				void *arg);
+static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
+			struct rte_eth_ntuple_filter *filter);
+static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+			struct rte_eth_ethertype_filter *filter,
+			bool add);
+static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
+				enum rte_filter_op filter_op,
+				void *arg);
+static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
+			struct rte_eth_ethertype_filter *filter);
+static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+		     enum rte_filter_type filter_type,
+		     enum rte_filter_op filter_op,
+		     void *arg);
+static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
+
+/*
+ * Define VF Stats MACRO for Non "cleared on read" register
+ */
+#define UPDATE_VF_STAT(reg, last, cur)	                        \
+{                                                               \
+	u32 latest = IXGBE_READ_REG(hw, reg);                   \
+	cur += latest - last;                                   \
+	last = latest;                                          \
+}
+
+#define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
+{                                                                \
+	u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
+	u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
+	u64 latest = ((new_msb << 32) | new_lsb);                \
+	cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
+	last = latest;                                           \
+}
+
+#define IXGBE_SET_HWSTRIP(h, q) do{\
+		uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
+		uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+		(h)->bitmap[idx] |= 1 << bit;\
+	}while(0)
+
+#define IXGBE_CLEAR_HWSTRIP(h, q) do{\
+		uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
+		uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+		(h)->bitmap[idx] &= ~(1 << bit);\
+	}while(0)
+
+#define IXGBE_GET_HWSTRIP(h, q, r) do{\
+		uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
+		uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+		(r) = (h)->bitmap[idx] >> bit & 1;\
+	}while(0)
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_ixgbe_map[] = {
+
+#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
+
+{ .vendor_id = 0, /* sentinel */ },
+};
+
+
+/*
+ * The set of PCI devices this driver supports (for 82599 VF)
+ */
+static const struct rte_pci_id pci_id_ixgbevf_map[] = {
+
+#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
+{ .vendor_id = 0, /* sentinel */ },
+
+};
+
+static const struct eth_dev_ops ixgbe_eth_dev_ops = {
+	.dev_configure        = ixgbe_dev_configure,
+	.dev_start            = ixgbe_dev_start,
+	.dev_stop             = ixgbe_dev_stop,
+	.dev_set_link_up    = ixgbe_dev_set_link_up,
+	.dev_set_link_down  = ixgbe_dev_set_link_down,
+	.dev_close            = ixgbe_dev_close,
+	.promiscuous_enable   = ixgbe_dev_promiscuous_enable,
+	.promiscuous_disable  = ixgbe_dev_promiscuous_disable,
+	.allmulticast_enable  = ixgbe_dev_allmulticast_enable,
+	.allmulticast_disable = ixgbe_dev_allmulticast_disable,
+	.link_update          = ixgbe_dev_link_update,
+	.stats_get            = ixgbe_dev_stats_get,
+	.stats_reset          = ixgbe_dev_stats_reset,
+	.queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
+	.dev_infos_get        = ixgbe_dev_info_get,
+	.mtu_set              = ixgbe_dev_mtu_set,
+	.vlan_filter_set      = ixgbe_vlan_filter_set,
+	.vlan_tpid_set        = ixgbe_vlan_tpid_set,
+	.vlan_offload_set     = ixgbe_vlan_offload_set,
+	.vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
+	.rx_queue_start	      = ixgbe_dev_rx_queue_start,
+	.rx_queue_stop        = ixgbe_dev_rx_queue_stop,
+	.tx_queue_start	      = ixgbe_dev_tx_queue_start,
+	.tx_queue_stop        = ixgbe_dev_tx_queue_stop,
+	.rx_queue_setup       = ixgbe_dev_rx_queue_setup,
+	.rx_queue_release     = ixgbe_dev_rx_queue_release,
+	.rx_queue_count       = ixgbe_dev_rx_queue_count,
+	.rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
+	.tx_queue_setup       = ixgbe_dev_tx_queue_setup,
+	.tx_queue_release     = ixgbe_dev_tx_queue_release,
+	.dev_led_on           = ixgbe_dev_led_on,
+	.dev_led_off          = ixgbe_dev_led_off,
+	.flow_ctrl_get        = ixgbe_flow_ctrl_get,
+	.flow_ctrl_set        = ixgbe_flow_ctrl_set,
+	.priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
+	.mac_addr_add         = ixgbe_add_rar,
+	.mac_addr_remove      = ixgbe_remove_rar,
+	.uc_hash_table_set    = ixgbe_uc_hash_table_set,
+	.uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
+	.mirror_rule_set      = ixgbe_mirror_rule_set,
+	.mirror_rule_reset    = ixgbe_mirror_rule_reset,
+	.set_vf_rx_mode       = ixgbe_set_pool_rx_mode,
+	.set_vf_rx            = ixgbe_set_pool_rx,
+	.set_vf_tx            = ixgbe_set_pool_tx,
+	.set_vf_vlan_filter   = ixgbe_set_pool_vlan_filter,
+	.set_queue_rate_limit = ixgbe_set_queue_rate_limit,
+	.set_vf_rate_limit    = ixgbe_set_vf_rate_limit,
+	.reta_update          = ixgbe_dev_rss_reta_update,
+	.reta_query           = ixgbe_dev_rss_reta_query,
+#ifdef RTE_NIC_BYPASS
+	.bypass_init          = ixgbe_bypass_init,
+	.bypass_state_set     = ixgbe_bypass_state_store,
+	.bypass_state_show    = ixgbe_bypass_state_show,
+	.bypass_event_set     = ixgbe_bypass_event_store,
+	.bypass_event_show    = ixgbe_bypass_event_show,
+	.bypass_wd_timeout_set  = ixgbe_bypass_wd_timeout_store,
+	.bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show,
+	.bypass_ver_show      = ixgbe_bypass_ver_show,
+	.bypass_wd_reset      = ixgbe_bypass_wd_reset,
+#endif /* RTE_NIC_BYPASS */
+	.rss_hash_update      = ixgbe_dev_rss_hash_update,
+	.rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
+	.filter_ctrl          = ixgbe_dev_filter_ctrl,
+};
+
+/*
+ * dev_ops for virtual function, bare necessities for basic vf
+ * operation have been implemented
+ */
+static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
+	.dev_configure        = ixgbevf_dev_configure,
+	.dev_start            = ixgbevf_dev_start,
+	.dev_stop             = ixgbevf_dev_stop,
+	.link_update          = ixgbe_dev_link_update,
+	.stats_get            = ixgbevf_dev_stats_get,
+	.stats_reset          = ixgbevf_dev_stats_reset,
+	.dev_close            = ixgbevf_dev_close,
+	.dev_infos_get        = ixgbevf_dev_info_get,
+	.mtu_set              = ixgbevf_dev_set_mtu,
+	.vlan_filter_set      = ixgbevf_vlan_filter_set,
+	.vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
+	.vlan_offload_set     = ixgbevf_vlan_offload_set,
+	.rx_queue_setup       = ixgbe_dev_rx_queue_setup,
+	.rx_queue_release     = ixgbe_dev_rx_queue_release,
+	.tx_queue_setup       = ixgbe_dev_tx_queue_setup,
+	.tx_queue_release     = ixgbe_dev_tx_queue_release,
+	.mac_addr_add         = ixgbevf_add_mac_addr,
+	.mac_addr_remove      = ixgbevf_remove_mac_addr,
+};
+
+/**
+ * Atomically reads the link status information from global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ *   - Pointer to the structure rte_eth_dev to read from.
+ *   - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ *   - On success, zero.
+ *   - On failure, negative value.
+ */
+static inline int
+rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
+				struct rte_eth_link *link)
+{
+	struct rte_eth_link *dst = link;
+	struct rte_eth_link *src = &(dev->data->dev_link);
+
+	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+					*(uint64_t *)src) == 0)
+		return -1;
+
+	return 0;
+}
+
+/**
+ * Atomically writes the link status information into global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ *   - Pointer to the structure rte_eth_dev to read from.
+ *   - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ *   - On success, zero.
+ *   - On failure, negative value.
+ */
+static inline int
+rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
+				struct rte_eth_link *link)
+{
+	struct rte_eth_link *dst = &(dev->data->dev_link);
+	struct rte_eth_link *src = link;
+
+	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+					*(uint64_t *)src) == 0)
+		return -1;
+
+	return 0;
+}
+
+/*
+ * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
+ */
+static inline int
+ixgbe_is_sfp(struct ixgbe_hw *hw)
+{
+	switch (hw->phy.type) {
+	case ixgbe_phy_sfp_avago:
+	case ixgbe_phy_sfp_ftl:
+	case ixgbe_phy_sfp_intel:
+	case ixgbe_phy_sfp_unknown:
+	case ixgbe_phy_sfp_passive_tyco:
+	case ixgbe_phy_sfp_passive_unknown:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+static inline int32_t
+ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
+{
+	uint32_t ctrl_ext;
+	int32_t status;
+
+	status = ixgbe_reset_hw(hw);
+
+	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
+	ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return status;
+}
+
+static inline void
+ixgbe_enable_intr(struct rte_eth_dev *dev)
+{
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
+	IXGBE_WRITE_FLUSH(hw);
+}
+
+/*
+ * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
+ */
+static void
+ixgbe_disable_intr(struct ixgbe_hw *hw)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	if (hw->mac.type == ixgbe_mac_82598EB) {
+		IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
+	} else {
+		IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
+		IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
+		IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
+	}
+	IXGBE_WRITE_FLUSH(hw);
+}
+
+/*
+ * This function resets queue statistics mapping registers.
+ * From Niantic datasheet, Initialization of Statistics section:
+ * "...if software requires the queue counters, the RQSMR and TQSM registers
+ * must be re-programmed following a device reset.
+ */
+static void
+ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
+{
+	uint32_t i;
+
+	for(i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
+	}
+}
+
+
+static int
+ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
+				  uint16_t queue_id,
+				  uint8_t stat_idx,
+				  uint8_t is_rx)
+{
+#define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
+#define NB_QMAP_FIELDS_PER_QSM_REG 4
+#define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
+
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+	struct ixgbe_stat_mapping_registers *stat_mappings =
+		IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
+	uint32_t qsmr_mask = 0;
+	uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
+	uint32_t q_map;
+	uint8_t n, offset;
+
+	if ((hw->mac.type != ixgbe_mac_82599EB) &&
+		(hw->mac.type != ixgbe_mac_X540) &&
+		(hw->mac.type != ixgbe_mac_X550) &&
+		(hw->mac.type != ixgbe_mac_X550EM_x))
+		return -ENOSYS;
+
+	PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d",
+		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+		     queue_id, stat_idx);
+
+	n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
+	if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
+		PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
+		return -EIO;
+	}
+	offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
+
+	/* Now clear any previous stat_idx set */
+	clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
+	if (!is_rx)
+		stat_mappings->tqsm[n] &= ~clearing_mask;
+	else
+		stat_mappings->rqsmr[n] &= ~clearing_mask;
+
+	q_map = (uint32_t)stat_idx;
+	q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
+	qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
+	if (!is_rx)
+		stat_mappings->tqsm[n] |= qsmr_mask;
+	else
+		stat_mappings->rqsmr[n] |= qsmr_mask;
+
+	PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d",
+		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+		     queue_id, stat_idx);
+	PMD_INIT_LOG(INFO, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
+		     is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
+
+	/* Now write the mapping in the appropriate register */
+	if (is_rx) {
+		PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d",
+			     stat_mappings->rqsmr[n], n);
+		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
+	}
+	else {
+		PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d",
+			     stat_mappings->tqsm[n], n);
+		IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
+	}
+	return 0;
+}
+
+static void
+ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
+{
+	struct ixgbe_stat_mapping_registers *stat_mappings =
+		IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int i;
+
+	/* write whatever was in stat mapping table to the NIC */
+	for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
+		/* rx */
+		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
+
+		/* tx */
+		IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
+	}
+}
+
+static void
+ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
+{
+	uint8_t i;
+	struct ixgbe_dcb_tc_config *tc;
+	uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+
+	dcb_config->num_tcs.pg_tcs = dcb_max_tc;
+	dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
+	for (i = 0; i < dcb_max_tc; i++) {
+		tc = &dcb_config->tc_config[i];
+		tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
+		tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+				 (uint8_t)(100/dcb_max_tc + (i & 1));
+		tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
+		tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
+				 (uint8_t)(100/dcb_max_tc + (i & 1));
+		tc->pfc = ixgbe_dcb_pfc_disabled;
+	}
+
+	/* Initialize default user to priority mapping, UPx->TC0 */
+	tc = &dcb_config->tc_config[0];
+	tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
+	tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
+	for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) {
+		dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
+		dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
+	}
+	dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
+	dcb_config->pfc_mode_enable = false;
+	dcb_config->vt_mode = true;
+	dcb_config->round_robin_enable = false;
+	/* support all DCB capabilities in 82599 */
+	dcb_config->support.capabilities = 0xFF;
+
+	/*we only support 4 Tcs for X540, X550 */
+	if (hw->mac.type == ixgbe_mac_X540 ||
+		hw->mac.type == ixgbe_mac_X550 ||
+		hw->mac.type == ixgbe_mac_X550EM_x) {
+		dcb_config->num_tcs.pg_tcs = 4;
+		dcb_config->num_tcs.pfc_tcs = 4;
+	}
+}
+
+/*
+ * Ensure that all locks are released before first NVM or PHY access
+ */
+static void
+ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
+{
+	uint16_t mask;
+
+	/*
+	 * Phy lock should not fail in this early stage. If this is the case,
+	 * it is due to an improper exit of the application.
+	 * So force the release of the faulty lock. Release of common lock
+	 * is done automatically by swfw_sync function.
+	 */
+	mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
+	if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
+		PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
+	}
+	ixgbe_release_swfw_semaphore(hw, mask);
+
+	/*
+	 * These ones are more tricky since they are common to all ports; but
+	 * swfw_sync retries last long enough (1s) to be almost sure that if
+	 * lock can not be taken it is due to an improper lock of the
+	 * semaphore.
+	 */
+	mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
+	if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
+		PMD_DRV_LOG(DEBUG, "SWFW common locks released");
+	}
+	ixgbe_release_swfw_semaphore(hw, mask);
+}
+
+/*
+ * This function is based on code in ixgbe_attach() in base/ixgbe.c.
+ * It returns 0 on success.
+ */
+static int
+eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
+{
+	struct rte_pci_device *pci_dev;
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+	struct ixgbe_vfta * shadow_vfta =
+		IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
+	struct ixgbe_hwstrip *hwstrip =
+		IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
+	struct ixgbe_dcb_config *dcb_config =
+		IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
+	struct ixgbe_filter_info *filter_info =
+		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+	uint32_t ctrl_ext;
+	uint16_t csum;
+	int diag, i;
+
+	PMD_INIT_FUNC_TRACE();
+
+	eth_dev->dev_ops = &ixgbe_eth_dev_ops;
+	eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
+	eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
+
+	/*
+	 * For secondary processes, we don't initialise any further as primary
+	 * has already done this work. Only check we don't need a different
+	 * RX and TX function.
+	 */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+		struct ixgbe_tx_queue *txq;
+		/* TX queue function in primary, set by last queue initialized
+		 * Tx queue may not initialized by primary process */
+		if (eth_dev->data->tx_queues) {
+			txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
+			ixgbe_set_tx_function(eth_dev, txq);
+		} else {
+			/* Use default TX function if we get here */
+			PMD_INIT_LOG(INFO, "No TX queues configured yet. "
+			                   "Using default TX function.");
+		}
+
+		ixgbe_set_rx_function(eth_dev);
+
+		return 0;
+	}
+	pci_dev = eth_dev->pci_dev;
+
+	/* Vendor and Device ID need to be set before init of shared code */
+	hw->device_id = pci_dev->id.device_id;
+	hw->vendor_id = pci_dev->id.vendor_id;
+	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+	hw->allow_unsupported_sfp = 1;
+
+	/* Initialize the shared code (base driver) */
+#ifdef RTE_NIC_BYPASS
+	diag = ixgbe_bypass_init_shared_code(hw);
+#else
+	diag = ixgbe_init_shared_code(hw);
+#endif /* RTE_NIC_BYPASS */
+
+	if (diag != IXGBE_SUCCESS) {
+		PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
+		return -EIO;
+	}
+
+	/* pick up the PCI bus settings for reporting later */
+	ixgbe_get_bus_info(hw);
+
+	/* Unlock any pending hardware semaphore */
+	ixgbe_swfw_lock_reset(hw);
+
+	/* Initialize DCB configuration*/
+	memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
+	ixgbe_dcb_init(hw,dcb_config);
+	/* Get Hardware Flow Control setting */
+	hw->fc.requested_mode = ixgbe_fc_full;
+	hw->fc.current_mode = ixgbe_fc_full;
+	hw->fc.pause_time = IXGBE_FC_PAUSE;
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		hw->fc.low_water[i] = IXGBE_FC_LO;
+		hw->fc.high_water[i] = IXGBE_FC_HI;
+	}
+	hw->fc.send_xon = 1;
+
+	/* Make sure we have a good EEPROM before we read from it */
+	diag = ixgbe_validate_eeprom_checksum(hw, &csum);
+	if (diag != IXGBE_SUCCESS) {
+		PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
+		return -EIO;
+	}
+
+#ifdef RTE_NIC_BYPASS
+	diag = ixgbe_bypass_init_hw(hw);
+#else
+	diag = ixgbe_init_hw(hw);
+#endif /* RTE_NIC_BYPASS */
+
+	/*
+	 * Devices with copper phys will fail to initialise if ixgbe_init_hw()
+	 * is called too soon after the kernel driver unbinding/binding occurs.
+	 * The failure occurs in ixgbe_identify_phy_generic() for all devices,
+	 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
+	 * also called. See ixgbe_identify_phy_82599(). The reason for the
+	 * failure is not known, and only occuts when virtualisation features
+	 * are disabled in the bios. A delay of 100ms  was found to be enough by
+	 * trial-and-error, and is doubled to be safe.
+	 */
+	if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
+		rte_delay_ms(200);
+		diag = ixgbe_init_hw(hw);
+	}
+
+	if (diag == IXGBE_ERR_EEPROM_VERSION) {
+		PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
+		    "LOM.  Please be aware there may be issues associated "
+		    "with your hardware.");
+		PMD_INIT_LOG(ERR, "If you are experiencing problems "
+		    "please contact your Intel or hardware representative "
+		    "who provided you with this hardware.");
+	} else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
+		PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
+	if (diag) {
+		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
+		return -EIO;
+	}
+
+	/* disable interrupt */
+	ixgbe_disable_intr(hw);
+
+	/* reset mappings for queue statistics hw counters*/
+	ixgbe_reset_qstat_mappings(hw);
+
+	/* Allocate memory for storing MAC addresses */
+	eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
+			hw->mac.num_rar_entries, 0);
+	if (eth_dev->data->mac_addrs == NULL) {
+		PMD_INIT_LOG(ERR,
+			"Failed to allocate %u bytes needed to store "
+			"MAC addresses",
+			ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+		return -ENOMEM;
+	}
+	/* Copy the permanent MAC address */
+	ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
+			&eth_dev->data->mac_addrs[0]);
+
+	/* Allocate memory for storing hash filter MAC addresses */
+	eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
+			IXGBE_VMDQ_NUM_UC_MAC, 0);
+	if (eth_dev->data->hash_mac_addrs == NULL) {
+		PMD_INIT_LOG(ERR,
+			"Failed to allocate %d bytes needed to store MAC addresses",
+			ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
+		return -ENOMEM;
+	}
+
+	/* initialize the vfta */
+	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+
+	/* initialize the hw strip bitmap*/
+	memset(hwstrip, 0, sizeof(*hwstrip));
+
+	/* initialize PF if max_vfs not zero */
+	ixgbe_pf_host_init(eth_dev);
+
+	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+	/* let hardware know driver is loaded */
+	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
+	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
+	ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+	IXGBE_WRITE_FLUSH(hw);
+
+	if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
+		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
+			     (int) hw->mac.type, (int) hw->phy.type,
+			     (int) hw->phy.sfp_type);
+	else
+		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
+			     (int) hw->mac.type, (int) hw->phy.type);
+
+	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
+			eth_dev->data->port_id, pci_dev->id.vendor_id,
+			pci_dev->id.device_id);
+
+	rte_intr_callback_register(&(pci_dev->intr_handle),
+		ixgbe_dev_interrupt_handler, (void *)eth_dev);
+
+	/* enable uio intr after callback register */
+	rte_intr_enable(&(pci_dev->intr_handle));
+
+	/* enable support intr */
+	ixgbe_enable_intr(eth_dev);
+
+	/* initialize 5tuple filter list */
+	TAILQ_INIT(&filter_info->fivetuple_list);
+	memset(filter_info->fivetuple_mask, 0,
+		sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+
+	return 0;
+}
+
+
+/*
+ * Negotiate mailbox API version with the PF.
+ * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
+ * Then we try to negotiate starting with the most recent one.
+ * If all negotiation attempts fail, then we will proceed with
+ * the default one (ixgbe_mbox_api_10).
+ */
+static void
+ixgbevf_negotiate_api(struct ixgbe_hw *hw)
+{
+	int32_t i;
+
+	/* start with highest supported, proceed down */
+	static const enum ixgbe_pfvf_api_rev sup_ver[] = {
+		ixgbe_mbox_api_11,
+		ixgbe_mbox_api_10,
+	};
+
+	for (i = 0;
+			i != RTE_DIM(sup_ver) &&
+			ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
+			i++)
+		;
+}
+
+static void
+generate_random_mac_addr(struct ether_addr *mac_addr)
+{
+	uint64_t random;
+
+	/* Set Organizationally Unique Identifier (OUI) prefix. */
+	mac_addr->addr_bytes[0] = 0x00;
+	mac_addr->addr_bytes[1] = 0x09;
+	mac_addr->addr_bytes[2] = 0xC0;
+	/* Force indication of locally assigned MAC address. */
+	mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
+	/* Generate the last 3 bytes of the MAC address with a random number. */
+	random = rte_rand();
+	memcpy(&mac_addr->addr_bytes[3], &random, 3);
+}
+
+/*
+ * Virtual Function device init
+ */
+static int
+eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
+{
+	int diag;
+	uint32_t tc, tcs;
+	struct rte_pci_device *pci_dev;
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+	struct ixgbe_vfta * shadow_vfta =
+		IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
+	struct ixgbe_hwstrip *hwstrip =
+		IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
+	struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
+
+	PMD_INIT_FUNC_TRACE();
+
+	eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
+	eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
+	eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
+
+	/* for secondary processes, we don't initialise any further as primary
+	 * has already done this work. Only check we don't need a different
+	 * RX function */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+		if (eth_dev->data->scattered_rx)
+			eth_dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
+		return 0;
+	}
+
+	pci_dev = eth_dev->pci_dev;
+
+	hw->device_id = pci_dev->id.device_id;
+	hw->vendor_id = pci_dev->id.vendor_id;
+	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+
+	/* initialize the vfta */
+	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+
+	/* initialize the hw strip bitmap*/
+	memset(hwstrip, 0, sizeof(*hwstrip));
+
+	/* Initialize the shared code (base driver) */
+	diag = ixgbe_init_shared_code(hw);
+	if (diag != IXGBE_SUCCESS) {
+		PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
+		return -EIO;
+	}
+
+	/* init_mailbox_params */
+	hw->mbx.ops.init_params(hw);
+
+	/* Disable the interrupts for VF */
+	ixgbevf_intr_disable(hw);
+
+	hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
+	diag = hw->mac.ops.reset_hw(hw);
+
+	/*
+	 * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
+	 * the underlying PF driver has not assigned a MAC address to the VF.
+	 * In this case, assign a random MAC address.
+	 */
+	if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
+		PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
+		return (diag);
+	}
+
+	/* negotiate mailbox API version to use with the PF. */
+	ixgbevf_negotiate_api(hw);
+
+	/* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
+	ixgbevf_get_queues(hw, &tcs, &tc);
+
+	/* Allocate memory for storing MAC addresses */
+	eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
+			hw->mac.num_rar_entries, 0);
+	if (eth_dev->data->mac_addrs == NULL) {
+		PMD_INIT_LOG(ERR,
+			"Failed to allocate %u bytes needed to store "
+			"MAC addresses",
+			ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+		return -ENOMEM;
+	}
+
+	/* Generate a random MAC address, if none was assigned by PF. */
+	if (is_zero_ether_addr(perm_addr)) {
+		generate_random_mac_addr(perm_addr);
+		diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
+		if (diag) {
+			rte_free(eth_dev->data->mac_addrs);
+			eth_dev->data->mac_addrs = NULL;
+			return diag;
+		}
+		PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
+		PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
+			     "%02x:%02x:%02x:%02x:%02x:%02x",
+			     perm_addr->addr_bytes[0],
+			     perm_addr->addr_bytes[1],
+			     perm_addr->addr_bytes[2],
+			     perm_addr->addr_bytes[3],
+			     perm_addr->addr_bytes[4],
+			     perm_addr->addr_bytes[5]);
+	}
+
+	/* Copy the permanent MAC address */
+	ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
+
+	/* reset the hardware with the new settings */
+	diag = hw->mac.ops.start_hw(hw);
+	switch (diag) {
+		case  0:
+			break;
+
+		default:
+			PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
+			return (-EIO);
+	}
+
+	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
+		     eth_dev->data->port_id, pci_dev->id.vendor_id,
+		     pci_dev->id.device_id, "ixgbe_mac_82599_vf");
+
+	return 0;
+}
+
+static struct eth_driver rte_ixgbe_pmd = {
+	{
+		.name = "rte_ixgbe_pmd",
+		.id_table = pci_id_ixgbe_map,
+		.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+	},
+	.eth_dev_init = eth_ixgbe_dev_init,
+	.dev_private_size = sizeof(struct ixgbe_adapter),
+};
+
+/*
+ * virtual function driver struct
+ */
+static struct eth_driver rte_ixgbevf_pmd = {
+	{
+		.name = "rte_ixgbevf_pmd",
+		.id_table = pci_id_ixgbevf_map,
+		.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	},
+	.eth_dev_init = eth_ixgbevf_dev_init,
+	.dev_private_size = sizeof(struct ixgbe_adapter),
+};
+
+/*
+ * Driver initialization routine.
+ * Invoked once at EAL init time.
+ * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
+ */
+static int
+rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	rte_eth_driver_register(&rte_ixgbe_pmd);
+	return 0;
+}
+
+/*
+ * VF Driver initialization routine.
+ * Invoked one at EAL init time.
+ * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
+ */
+static int
+rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	rte_eth_driver_register(&rte_ixgbevf_pmd);
+	return (0);
+}
+
+static int
+ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_vfta * shadow_vfta =
+		IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+	uint32_t vfta;
+	uint32_t vid_idx;
+	uint32_t vid_bit;
+
+	vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
+	vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
+	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
+	if (on)
+		vfta |= vid_bit;
+	else
+		vfta &= ~vid_bit;
+	IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
+
+	/* update local VFTA copy */
+	shadow_vfta->vfta[vid_idx] = vfta;
+
+	return 0;
+}
+
+static void
+ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
+{
+	if (on)
+		ixgbe_vlan_hw_strip_enable(dev, queue);
+	else
+		ixgbe_vlan_hw_strip_disable(dev, queue);
+}
+
+static void
+ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Only the high 16-bits is valid */
+	IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
+}
+
+void
+ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t vlnctrl;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* Filter Table Disable */
+	vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+	vlnctrl &= ~IXGBE_VLNCTRL_VFE;
+
+	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+}
+
+void
+ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_vfta * shadow_vfta =
+		IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+	uint32_t vlnctrl;
+	uint16_t i;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* Filter Table Enable */
+	vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+	vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+	vlnctrl |= IXGBE_VLNCTRL_VFE;
+
+	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+
+	/* write whatever is in local vfta copy */
+	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
+		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
+}
+
+static void
+ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
+{
+	struct ixgbe_hwstrip *hwstrip =
+		IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
+
+	if(queue >= IXGBE_MAX_RX_QUEUE_NUM)
+		return;
+
+	if (on)
+		IXGBE_SET_HWSTRIP(hwstrip, queue);
+	else
+		IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
+}
+
+static void
+ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t ctrl;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (hw->mac.type == ixgbe_mac_82598EB) {
+		/* No queue level support */
+		PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip");
+		return;
+	}
+	else {
+		/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+		ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+		ctrl &= ~IXGBE_RXDCTL_VME;
+		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
+	}
+	/* record those setting for HW strip per queue */
+	ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
+}
+
+static void
+ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t ctrl;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (hw->mac.type == ixgbe_mac_82598EB) {
+		/* No queue level supported */
+		PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip");
+		return;
+	}
+	else {
+		/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+		ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+		ctrl |= IXGBE_RXDCTL_VME;
+		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
+	}
+	/* record those setting for HW strip per queue */
+	ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
+}
+
+void
+ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t ctrl;
+	uint16_t i;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (hw->mac.type == ixgbe_mac_82598EB) {
+		ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+		ctrl &= ~IXGBE_VLNCTRL_VME;
+		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+	}
+	else {
+		/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+		for (i = 0; i < dev->data->nb_rx_queues; i++) {
+			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+			ctrl &= ~IXGBE_RXDCTL_VME;
+			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
+
+			/* record those setting for HW strip per queue */
+			ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
+		}
+	}
+}
+
+void
+ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t ctrl;
+	uint16_t i;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (hw->mac.type == ixgbe_mac_82598EB) {
+		ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+		ctrl |= IXGBE_VLNCTRL_VME;
+		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+	}
+	else {
+		/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+		for (i = 0; i < dev->data->nb_rx_queues; i++) {
+			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+			ctrl |= IXGBE_RXDCTL_VME;
+			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
+
+			/* record those setting for HW strip per queue */
+			ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
+		}
+	}
+}
+
+static void
+ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t ctrl;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* DMATXCTRL: Geric Double VLAN Disable */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+	ctrl &= ~IXGBE_DMATXCTL_GDV;
+	IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
+
+	/* CTRL_EXT: Global Double VLAN Disable */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+	ctrl &= ~IXGBE_EXTENDED_VLAN;
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
+
+}
+
+static void
+ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t ctrl;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* DMATXCTRL: Geric Double VLAN Enable */
+	ctrl  = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+	ctrl |= IXGBE_DMATXCTL_GDV;
+	IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
+
+	/* CTRL_EXT: Global Double VLAN Enable */
+	ctrl  = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+	ctrl |= IXGBE_EXTENDED_VLAN;
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
+
+	/*
+	 * VET EXT field in the EXVET register = 0x8100 by default
+	 * So no need to change. Same to VT field of DMATXCTL register
+	 */
+}
+
+static void
+ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+	if(mask & ETH_VLAN_STRIP_MASK){
+		if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+			ixgbe_vlan_hw_strip_enable_all(dev);
+		else
+			ixgbe_vlan_hw_strip_disable_all(dev);
+	}
+
+	if(mask & ETH_VLAN_FILTER_MASK){
+		if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+			ixgbe_vlan_hw_filter_enable(dev);
+		else
+			ixgbe_vlan_hw_filter_disable(dev);
+	}
+
+	if(mask & ETH_VLAN_EXTEND_MASK){
+		if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+			ixgbe_vlan_hw_extend_enable(dev);
+		else
+			ixgbe_vlan_hw_extend_disable(dev);
+	}
+}
+
+static void
+ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
+	uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+	vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+}
+
+static int
+ixgbe_dev_configure(struct rte_eth_dev *dev)
+{
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+	struct ixgbe_adapter *adapter =
+		(struct ixgbe_adapter *)dev->data->dev_private;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* set flag to update link status after init */
+	intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+
+	/*
+	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
+	 * allocation or vector Rx preconditions we will reset it.
+	 */
+	adapter->rx_bulk_alloc_allowed = true;
+	adapter->rx_vec_allowed = true;
+
+	return 0;
+}
+
+/*
+ * Configure device link speed and setup link.
+ * It returns 0 on success.
+ */
+static int
+ixgbe_dev_start(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_vf_info *vfinfo =
+		*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+	int err, link_up = 0, negotiate = 0;
+	uint32_t speed = 0;
+	int mask = 0;
+	int status;
+	uint16_t vf, idx;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* IXGBE devices don't support half duplex */
+	if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
+			(dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
+		PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
+			     dev->data->dev_conf.link_duplex,
+			     dev->data->port_id);
+		return -EINVAL;
+	}
+
+	/* stop adapter */
+	hw->adapter_stopped = FALSE;
+	ixgbe_stop_adapter(hw);
+
+	/* reinitialize adapter
+	 * this calls reset and start */
+	status = ixgbe_pf_reset_hw(hw);
+	if (status != 0)
+		return -1;
+	hw->mac.ops.start_hw(hw);
+	hw->mac.get_link_status = true;
+
+	/* configure PF module if SRIOV enabled */
+	ixgbe_pf_host_configure(dev);
+
+	/* initialize transmission unit */
+	ixgbe_dev_tx_init(dev);
+
+	/* This can fail when allocating mbufs for descriptor rings */
+	err = ixgbe_dev_rx_init(dev);
+	if (err) {
+		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
+		goto error;
+	}
+
+	err = ixgbe_dev_rxtx_start(dev);
+	if (err < 0) {
+		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
+		goto error;
+	}
+
+	/* Skip link setup if loopback mode is enabled for 82599. */
+	if (hw->mac.type == ixgbe_mac_82599EB &&
+			dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+		goto skip_link_setup;
+
+	if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
+		err = hw->mac.ops.setup_sfp(hw);
+		if (err)
+			goto error;
+	}
+
+	/* Turn on the laser */
+	ixgbe_enable_tx_laser(hw);
+
+	err = ixgbe_check_link(hw, &speed, &link_up, 0);
+	if (err)
+		goto error;
+	dev->data->dev_link.link_status = link_up;
+
+	err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
+	if (err)
+		goto error;
+
+	switch(dev->data->dev_conf.link_speed) {
+	case ETH_LINK_SPEED_AUTONEG:
+		speed = (hw->mac.type != ixgbe_mac_82598EB) ?
+				IXGBE_LINK_SPEED_82599_AUTONEG :
+				IXGBE_LINK_SPEED_82598_AUTONEG;
+		break;
+	case ETH_LINK_SPEED_100:
+		/*
+		 * Invalid for 82598 but error will be detected by
+		 * ixgbe_setup_link()
+		 */
+		speed = IXGBE_LINK_SPEED_100_FULL;
+		break;
+	case ETH_LINK_SPEED_1000:
+		speed = IXGBE_LINK_SPEED_1GB_FULL;
+		break;
+	case ETH_LINK_SPEED_10000:
+		speed = IXGBE_LINK_SPEED_10GB_FULL;
+		break;
+	default:
+		PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu",
+			     dev->data->dev_conf.link_speed,
+			     dev->data->port_id);
+		goto error;
+	}
+
+	err = ixgbe_setup_link(hw, speed, link_up);
+	if (err)
+		goto error;
+
+skip_link_setup:
+
+	/* check if lsc interrupt is enabled */
+	if (dev->data->dev_conf.intr_conf.lsc != 0)
+		ixgbe_dev_lsc_interrupt_setup(dev);
+
+	/* resume enabled intr since hw reset */
+	ixgbe_enable_intr(dev);
+
+	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
+		ETH_VLAN_EXTEND_MASK;
+	ixgbe_vlan_offload_set(dev, mask);
+
+	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+		/* Enable vlan filtering for VMDq */
+		ixgbe_vmdq_vlan_hw_filter_enable(dev);
+	}
+
+	/* Configure DCB hw */
+	ixgbe_configure_dcb(dev);
+
+	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+		err = ixgbe_fdir_configure(dev);
+		if (err)
+			goto error;
+	}
+
+	/* Restore vf rate limit */
+	if (vfinfo != NULL) {
+		for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
+			for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
+				if (vfinfo[vf].tx_rate[idx] != 0)
+					ixgbe_set_vf_rate_limit(dev, vf,
+						vfinfo[vf].tx_rate[idx],
+						1 << idx);
+	}
+
+	ixgbe_restore_statistics_mapping(dev);
+
+	return (0);
+
+error:
+	PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
+	ixgbe_dev_clear_queues(dev);
+	return -EIO;
+}
+
+/*
+ * Stop device: disable rx and tx functions to allow for reconfiguring.
+ */
+static void
+ixgbe_dev_stop(struct rte_eth_dev *dev)
+{
+	struct rte_eth_link link;
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_vf_info *vfinfo =
+		*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+	struct ixgbe_filter_info *filter_info =
+		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+	struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next;
+	int vf;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* disable interrupts */
+	ixgbe_disable_intr(hw);
+
+	/* reset the NIC */
+	ixgbe_pf_reset_hw(hw);
+	hw->adapter_stopped = FALSE;
+
+	/* stop adapter */
+	ixgbe_stop_adapter(hw);
+
+	for (vf = 0; vfinfo != NULL &&
+		     vf < dev->pci_dev->max_vfs; vf++)
+		vfinfo[vf].clear_to_send = false;
+
+	/* Turn off the laser */
+	ixgbe_disable_tx_laser(hw);
+
+	ixgbe_dev_clear_queues(dev);
+
+	/* Clear stored conf */
+	dev->data->scattered_rx = 0;
+	dev->data->lro = 0;
+
+	/* Clear recorded link status */
+	memset(&link, 0, sizeof(link));
+	rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+
+	/* Remove all ntuple filters of the device */
+	for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
+	     p_5tuple != NULL; p_5tuple = p_5tuple_next) {
+		p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
+		TAILQ_REMOVE(&filter_info->fivetuple_list,
+			     p_5tuple, entries);
+		rte_free(p_5tuple);
+	}
+	memset(filter_info->fivetuple_mask, 0,
+		sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+
+}
+
+/*
+ * Set device link up: enable tx laser.
+ */
+static int
+ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	if (hw->mac.type == ixgbe_mac_82599EB) {
+#ifdef RTE_NIC_BYPASS
+		if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+			/* Not suported in bypass mode */
+			PMD_INIT_LOG(ERR, "Set link up is not supported "
+				     "by device id 0x%x", hw->device_id);
+			return -ENOTSUP;
+		}
+#endif
+		/* Turn on the laser */
+		ixgbe_enable_tx_laser(hw);
+		return 0;
+	}
+
+	PMD_INIT_LOG(ERR, "Set link up is not supported by device id 0x%x",
+		     hw->device_id);
+	return -ENOTSUP;
+}
+
+/*
+ * Set device link down: disable tx laser.
+ */
+static int
+ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	if (hw->mac.type == ixgbe_mac_82599EB) {
+#ifdef RTE_NIC_BYPASS
+		if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+			/* Not suported in bypass mode */
+			PMD_INIT_LOG(ERR, "Set link down is not supported "
+				     "by device id 0x%x", hw->device_id);
+			return -ENOTSUP;
+		}
+#endif
+		/* Turn off the laser */
+		ixgbe_disable_tx_laser(hw);
+		return 0;
+	}
+
+	PMD_INIT_LOG(ERR, "Set link down is not supported by device id 0x%x",
+		     hw->device_id);
+	return -ENOTSUP;
+}
+
+/*
+ * Reest and stop device.
+ */
+static void
+ixgbe_dev_close(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	PMD_INIT_FUNC_TRACE();
+
+	ixgbe_pf_reset_hw(hw);
+
+	ixgbe_dev_stop(dev);
+	hw->adapter_stopped = 1;
+
+	ixgbe_disable_pcie_master(hw);
+
+	/* reprogram the RAR[0] in case user changed it. */
+	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+}
+
+/*
+ * This function is based on ixgbe_update_stats_counters() in base/ixgbe.c
+ */
+static void
+ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+	struct ixgbe_hw *hw =
+			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_hw_stats *hw_stats =
+			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	uint32_t bprc, lxon, lxoff, total;
+	uint64_t total_missed_rx, total_qbrc, total_qprc;
+	unsigned i;
+
+	total_missed_rx = 0;
+	total_qbrc = 0;
+	total_qprc = 0;
+
+	hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+	hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
+	hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
+	hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
+
+	for (i = 0; i < 8; i++) {
+		uint32_t mp;
+		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+		/* global total per queue */
+		hw_stats->mpc[i] += mp;
+		/* Running comprehensive total for stats display */
+		total_missed_rx += hw_stats->mpc[i];
+		if (hw->mac.type == ixgbe_mac_82598EB)
+			hw_stats->rnbc[i] +=
+			    IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+		hw_stats->pxontxc[i] +=
+		    IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
+		hw_stats->pxonrxc[i] +=
+		    IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+		hw_stats->pxofftxc[i] +=
+		    IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
+		hw_stats->pxoffrxc[i] +=
+		    IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+		hw_stats->pxon2offc[i] +=
+		    IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
+	}
+	for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
+		hw_stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+		hw_stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+		hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
+		hw_stats->qbrc[i] +=
+		    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
+		hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
+		hw_stats->qbtc[i] +=
+		    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
+		hw_stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+
+		total_qprc += hw_stats->qprc[i];
+		total_qbrc += hw_stats->qbrc[i];
+	}
+	hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
+	hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
+	hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
+
+	/* Note that gprc counts missed packets */
+	hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
+
+	if (hw->mac.type != ixgbe_mac_82598EB) {
+		hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
+		hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
+		hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
+		hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
+		hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
+		hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
+		hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+		hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+	} else {
+		hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+		hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+		/* 82598 only has a counter in the high register */
+		hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+		hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+		hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+	}
+
+	/*
+	 * Workaround: mprc hardware is incorrectly counting
+	 * broadcasts, so for now we subtract those.
+	 */
+	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
+	hw_stats->bprc += bprc;
+	hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
+	if (hw->mac.type == ixgbe_mac_82598EB)
+		hw_stats->mprc -= bprc;
+
+	hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
+	hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
+	hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
+	hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
+	hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
+	hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
+
+	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+	hw_stats->lxontxc += lxon;
+	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+	hw_stats->lxofftxc += lxoff;
+	total = lxon + lxoff;
+
+	hw_stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
+	hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
+	hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
+	hw_stats->gptc -= total;
+	hw_stats->mptc -= total;
+	hw_stats->ptc64 -= total;
+	hw_stats->gotc -= total * ETHER_MIN_LEN;
+
+	hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
+	hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
+	hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
+	hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
+	hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
+	hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
+	hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
+	hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
+	hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
+	hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
+	hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
+	hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
+	hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
+	hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
+	hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
+	hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
+	hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
+	hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
+	/* Only read FCOE on 82599 */
+	if (hw->mac.type != ixgbe_mac_82598EB) {
+		hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
+		hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
+		hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
+		hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
+		hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
+	}
+
+	if (stats == NULL)
+		return;
+
+	/* Fill out the rte_eth_stats statistics structure */
+	stats->ipackets = total_qprc;
+	stats->ibytes = total_qbrc;
+	stats->opackets = hw_stats->gptc;
+	stats->obytes = hw_stats->gotc;
+	stats->imcasts = hw_stats->mprc;
+
+	for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
+		stats->q_ipackets[i] = hw_stats->qprc[i];
+		stats->q_opackets[i] = hw_stats->qptc[i];
+		stats->q_ibytes[i] = hw_stats->qbrc[i];
+		stats->q_obytes[i] = hw_stats->qbtc[i];
+		stats->q_errors[i] = hw_stats->qprdc[i];
+	}
+
+	/* Rx Errors */
+	stats->ibadcrc  = hw_stats->crcerrs;
+	stats->ibadlen  = hw_stats->rlec + hw_stats->ruc + hw_stats->roc;
+	stats->imissed  = total_missed_rx;
+	stats->ierrors  = stats->ibadcrc +
+	                  stats->ibadlen +
+	                  stats->imissed +
+	                  hw_stats->illerrc + hw_stats->errbc;
+
+	/* Tx Errors */
+	stats->oerrors  = 0;
+
+	/* XON/XOFF pause frames */
+	stats->tx_pause_xon  = hw_stats->lxontxc;
+	stats->rx_pause_xon  = hw_stats->lxonrxc;
+	stats->tx_pause_xoff = hw_stats->lxofftxc;
+	stats->rx_pause_xoff = hw_stats->lxoffrxc;
+
+	/* Flow Director Stats registers */
+	hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+	hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+	stats->fdirmatch = hw_stats->fdirmatch;
+	stats->fdirmiss = hw_stats->fdirmiss;
+}
+
+static void
+ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw_stats *stats =
+			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+	/* HW registers are cleared on read */
+	ixgbe_dev_stats_get(dev, NULL);
+
+	/* Reset software totals */
+	memset(stats, 0, sizeof(*stats));
+}
+
+static void
+ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
+			  IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+	/* Good Rx packet, include VF loopback */
+	UPDATE_VF_STAT(IXGBE_VFGPRC,
+	    hw_stats->last_vfgprc, hw_stats->vfgprc);
+
+	/* Good Rx octets, include VF loopback */
+	UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
+	    hw_stats->last_vfgorc, hw_stats->vfgorc);
+
+	/* Good Tx packet, include VF loopback */
+	UPDATE_VF_STAT(IXGBE_VFGPTC,
+	    hw_stats->last_vfgptc, hw_stats->vfgptc);
+
+	/* Good Tx octets, include VF loopback */
+	UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
+	    hw_stats->last_vfgotc, hw_stats->vfgotc);
+
+	/* Rx Multicst Packet */
+	UPDATE_VF_STAT(IXGBE_VFMPRC,
+	    hw_stats->last_vfmprc, hw_stats->vfmprc);
+
+	if (stats == NULL)
+		return;
+
+	stats->ipackets = hw_stats->vfgprc;
+	stats->ibytes = hw_stats->vfgorc;
+	stats->opackets = hw_stats->vfgptc;
+	stats->obytes = hw_stats->vfgotc;
+	stats->imcasts = hw_stats->vfmprc;
+}
+
+static void
+ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
+{
+	struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
+			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+	/* Sync HW register to the last stats */
+	ixgbevf_dev_stats_get(dev, NULL);
+
+	/* reset HW current stats*/
+	hw_stats->vfgprc = 0;
+	hw_stats->vfgorc = 0;
+	hw_stats->vfgptc = 0;
+	hw_stats->vfgotc = 0;
+	hw_stats->vfmprc = 0;
+
+}
+
+static void
+ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
+	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
+	dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
+	dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
+	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+	dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
+	dev_info->max_vfs = dev->pci_dev->max_vfs;
+	if (hw->mac.type == ixgbe_mac_82598EB)
+		dev_info->max_vmdq_pools = ETH_16_POOLS;
+	else
+		dev_info->max_vmdq_pools = ETH_64_POOLS;
+	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
+	dev_info->rx_offload_capa =
+		DEV_RX_OFFLOAD_VLAN_STRIP |
+		DEV_RX_OFFLOAD_IPV4_CKSUM |
+		DEV_RX_OFFLOAD_UDP_CKSUM  |
+		DEV_RX_OFFLOAD_TCP_CKSUM;
+
+	/*
+	 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
+	 * mode.
+	 */
+	if ((hw->mac.type == ixgbe_mac_82599EB ||
+	     hw->mac.type == ixgbe_mac_X540) &&
+	    !RTE_ETH_DEV_SRIOV(dev).active)
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
+
+	dev_info->tx_offload_capa =
+		DEV_TX_OFFLOAD_VLAN_INSERT |
+		DEV_TX_OFFLOAD_IPV4_CKSUM  |
+		DEV_TX_OFFLOAD_UDP_CKSUM   |
+		DEV_TX_OFFLOAD_TCP_CKSUM   |
+		DEV_TX_OFFLOAD_SCTP_CKSUM  |
+		DEV_TX_OFFLOAD_TCP_TSO;
+
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_thresh = {
+			.pthresh = IXGBE_DEFAULT_RX_PTHRESH,
+			.hthresh = IXGBE_DEFAULT_RX_HTHRESH,
+			.wthresh = IXGBE_DEFAULT_RX_WTHRESH,
+		},
+		.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
+		.rx_drop_en = 0,
+	};
+
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_thresh = {
+			.pthresh = IXGBE_DEFAULT_TX_PTHRESH,
+			.hthresh = IXGBE_DEFAULT_TX_HTHRESH,
+			.wthresh = IXGBE_DEFAULT_TX_WTHRESH,
+		},
+		.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
+		.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
+		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+				ETH_TXQ_FLAGS_NOOFFLOADS,
+	};
+	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
+}
+
+static void
+ixgbevf_dev_info_get(struct rte_eth_dev *dev,
+		     struct rte_eth_dev_info *dev_info)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
+	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
+	dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
+	dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */
+	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+	dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
+	dev_info->max_vfs = dev->pci_dev->max_vfs;
+	if (hw->mac.type == ixgbe_mac_82598EB)
+		dev_info->max_vmdq_pools = ETH_16_POOLS;
+	else
+		dev_info->max_vmdq_pools = ETH_64_POOLS;
+	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+				DEV_RX_OFFLOAD_IPV4_CKSUM |
+				DEV_RX_OFFLOAD_UDP_CKSUM  |
+				DEV_RX_OFFLOAD_TCP_CKSUM;
+	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+				DEV_TX_OFFLOAD_IPV4_CKSUM  |
+				DEV_TX_OFFLOAD_UDP_CKSUM   |
+				DEV_TX_OFFLOAD_TCP_CKSUM   |
+				DEV_TX_OFFLOAD_SCTP_CKSUM;
+
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_thresh = {
+			.pthresh = IXGBE_DEFAULT_RX_PTHRESH,
+			.hthresh = IXGBE_DEFAULT_RX_HTHRESH,
+			.wthresh = IXGBE_DEFAULT_RX_WTHRESH,
+		},
+		.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
+		.rx_drop_en = 0,
+	};
+
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_thresh = {
+			.pthresh = IXGBE_DEFAULT_TX_PTHRESH,
+			.hthresh = IXGBE_DEFAULT_TX_HTHRESH,
+			.wthresh = IXGBE_DEFAULT_TX_WTHRESH,
+		},
+		.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
+		.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
+		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+				ETH_TXQ_FLAGS_NOOFFLOADS,
+	};
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct rte_eth_link link, old;
+	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+	int link_up;
+	int diag;
+
+	link.link_status = 0;
+	link.link_speed = 0;
+	link.link_duplex = 0;
+	memset(&old, 0, sizeof(old));
+	rte_ixgbe_dev_atomic_read_link_status(dev, &old);
+
+	/* check if it needs to wait to complete, if lsc interrupt is enabled */
+	if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
+		diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
+	else
+		diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
+	if (diag != 0) {
+		link.link_speed = ETH_LINK_SPEED_100;
+		link.link_duplex = ETH_LINK_HALF_DUPLEX;
+		rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+		if (link.link_status == old.link_status)
+			return -1;
+		return 0;
+	}
+
+	if (link_speed == IXGBE_LINK_SPEED_UNKNOWN &&
+	    !hw->mac.get_link_status) {
+		memcpy(&link, &old, sizeof(link));
+		return -1;
+	}
+
+	if (link_up == 0) {
+		rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+		if (link.link_status == old.link_status)
+			return -1;
+		return 0;
+	}
+	link.link_status = 1;
+	link.link_duplex = ETH_LINK_FULL_DUPLEX;
+
+	switch (link_speed) {
+	default:
+	case IXGBE_LINK_SPEED_UNKNOWN:
+		link.link_duplex = ETH_LINK_HALF_DUPLEX;
+		link.link_speed = ETH_LINK_SPEED_100;
+		break;
+
+	case IXGBE_LINK_SPEED_100_FULL:
+		link.link_speed = ETH_LINK_SPEED_100;
+		break;
+
+	case IXGBE_LINK_SPEED_1GB_FULL:
+		link.link_speed = ETH_LINK_SPEED_1000;
+		break;
+
+	case IXGBE_LINK_SPEED_10GB_FULL:
+		link.link_speed = ETH_LINK_SPEED_10000;
+		break;
+	}
+	rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+
+	if (link.link_status == old.link_status)
+		return -1;
+
+	return 0;
+}
+
+static void
+ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t fctrl;
+
+	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+}
+
+static void
+ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t fctrl;
+
+	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+	fctrl &= (~IXGBE_FCTRL_UPE);
+	if (dev->data->all_multicast == 1)
+		fctrl |= IXGBE_FCTRL_MPE;
+	else
+		fctrl &= (~IXGBE_FCTRL_MPE);
+	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+}
+
+static void
+ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t fctrl;
+
+	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+	fctrl |= IXGBE_FCTRL_MPE;
+	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+}
+
+static void
+ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t fctrl;
+
+	if (dev->data->promiscuous == 1)
+		return; /* must remain in all_multicast mode */
+
+	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+	fctrl &= (~IXGBE_FCTRL_MPE);
+	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+}
+
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
+{
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+	ixgbe_dev_link_status_print(dev);
+	intr->mask |= IXGBE_EICR_LSC;
+
+	return 0;
+}
+
+/*
+ * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
+{
+	uint32_t eicr;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+	/* clear all cause mask */
+	ixgbe_disable_intr(hw);
+
+	/* read-on-clear nic registers here */
+	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+	PMD_DRV_LOG(INFO, "eicr %x", eicr);
+
+	intr->flags = 0;
+	if (eicr & IXGBE_EICR_LSC) {
+		/* set flag for async link update */
+		intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+	}
+
+	if (eicr & IXGBE_EICR_MAILBOX)
+		intr->flags |= IXGBE_FLAG_MAILBOX;
+
+	return 0;
+}
+
+/**
+ * It gets and then prints the link status.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static void
+ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
+{
+	struct rte_eth_link link;
+
+	memset(&link, 0, sizeof(link));
+	rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+	if (link.link_status) {
+		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
+					(int)(dev->data->port_id),
+					(unsigned)link.link_speed,
+			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+					"full-duplex" : "half-duplex");
+	} else {
+		PMD_INIT_LOG(INFO, " Port %d: Link Down",
+				(int)(dev->data->port_id));
+	}
+	PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
+				dev->pci_dev->addr.domain,
+				dev->pci_dev->addr.bus,
+				dev->pci_dev->addr.devid,
+				dev->pci_dev->addr.function);
+}
+
+/*
+ * It executes link_update after knowing an interrupt occurred.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
+{
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+	int64_t timeout;
+	struct rte_eth_link link;
+	int intr_enable_delay = false;
+
+	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
+
+	if (intr->flags & IXGBE_FLAG_MAILBOX) {
+		ixgbe_pf_mbx_process(dev);
+		intr->flags &= ~IXGBE_FLAG_MAILBOX;
+	}
+
+	if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
+		/* get the link status before link update, for predicting later */
+		memset(&link, 0, sizeof(link));
+		rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+
+		ixgbe_dev_link_update(dev, 0);
+
+		/* likely to up */
+		if (!link.link_status)
+			/* handle it 1 sec later, wait it being stable */
+			timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
+		/* likely to down */
+		else
+			/* handle it 4 sec later, wait it being stable */
+			timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
+
+		ixgbe_dev_link_status_print(dev);
+
+		intr_enable_delay = true;
+	}
+
+	if (intr_enable_delay) {
+		if (rte_eal_alarm_set(timeout * 1000,
+				      ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
+			PMD_DRV_LOG(ERR, "Error setting alarm");
+	} else {
+		PMD_DRV_LOG(DEBUG, "enable intr immediately");
+		ixgbe_enable_intr(dev);
+		rte_intr_enable(&(dev->pci_dev->intr_handle));
+	}
+
+
+	return 0;
+}
+
+/**
+ * Interrupt handler which shall be registered for alarm callback for delayed
+ * handling specific interrupt to wait for the stable nic state. As the
+ * NIC interrupt state is not stable for ixgbe after link is just down,
+ * it needs to wait 4 seconds to get the stable status.
+ *
+ * @param handle
+ *  Pointer to interrupt handle.
+ * @param param
+ *  The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ *  void
+ */
+static void
+ixgbe_dev_interrupt_delayed_handler(void *param)
+{
+	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t eicr;
+
+	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+	if (eicr & IXGBE_EICR_MAILBOX)
+		ixgbe_pf_mbx_process(dev);
+
+	if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
+		ixgbe_dev_link_update(dev, 0);
+		intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
+		ixgbe_dev_link_status_print(dev);
+		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+	}
+
+	PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
+	ixgbe_enable_intr(dev);
+	rte_intr_enable(&(dev->pci_dev->intr_handle));
+}
+
+/**
+ * Interrupt handler triggered by NIC  for handling
+ * specific interrupt.
+ *
+ * @param handle
+ *  Pointer to interrupt handle.
+ * @param param
+ *  The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ *  void
+ */
+static void
+ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+							void *param)
+{
+	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+	ixgbe_dev_interrupt_get_status(dev);
+	ixgbe_dev_interrupt_action(dev);
+}
+
+static int
+ixgbe_dev_led_on(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
+}
+
+static int
+ixgbe_dev_led_off(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
+}
+
+static int
+ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+	struct ixgbe_hw *hw;
+	uint32_t mflcn_reg;
+	uint32_t fccfg_reg;
+	int rx_pause;
+	int tx_pause;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	fc_conf->pause_time = hw->fc.pause_time;
+	fc_conf->high_water = hw->fc.high_water[0];
+	fc_conf->low_water = hw->fc.low_water[0];
+	fc_conf->send_xon = hw->fc.send_xon;
+	fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
+
+	/*
+	 * Return rx_pause status according to actual setting of
+	 * MFLCN register.
+	 */
+	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+	if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
+		rx_pause = 1;
+	else
+		rx_pause = 0;
+
+	/*
+	 * Return tx_pause status according to actual setting of
+	 * FCCFG register.
+	 */
+	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+	if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
+		tx_pause = 1;
+	else
+		tx_pause = 0;
+
+	if (rx_pause && tx_pause)
+		fc_conf->mode = RTE_FC_FULL;
+	else if (rx_pause)
+		fc_conf->mode = RTE_FC_RX_PAUSE;
+	else if (tx_pause)
+		fc_conf->mode = RTE_FC_TX_PAUSE;
+	else
+		fc_conf->mode = RTE_FC_NONE;
+
+	return 0;
+}
+
+static int
+ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+	struct ixgbe_hw *hw;
+	int err;
+	uint32_t rx_buf_size;
+	uint32_t max_high_water;
+	uint32_t mflcn;
+	enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
+		ixgbe_fc_none,
+		ixgbe_fc_rx_pause,
+		ixgbe_fc_tx_pause,
+		ixgbe_fc_full
+	};
+
+	PMD_INIT_FUNC_TRACE();
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	if (fc_conf->autoneg != !hw->fc.disable_fc_autoneg)
+		return -ENOTSUP;
+	rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
+	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+
+	/*
+	 * At least reserve one Ethernet frame for watermark
+	 * high_water/low_water in kilo bytes for ixgbe
+	 */
+	max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
+	if ((fc_conf->high_water > max_high_water) ||
+		(fc_conf->high_water < fc_conf->low_water)) {
+		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
+		return (-EINVAL);
+	}
+
+	hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
+	hw->fc.pause_time     = fc_conf->pause_time;
+	hw->fc.high_water[0]  = fc_conf->high_water;
+	hw->fc.low_water[0]   = fc_conf->low_water;
+	hw->fc.send_xon       = fc_conf->send_xon;
+
+	err = ixgbe_fc_enable(hw);
+
+	/* Not negotiated is not an error case */
+	if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
+
+		/* check if we want to forward MAC frames - driver doesn't have native
+		 * capability to do that, so we'll write the registers ourselves */
+
+		mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+
+		/* set or clear MFLCN.PMCF bit depending on configuration */
+		if (fc_conf->mac_ctrl_frame_fwd != 0)
+			mflcn |= IXGBE_MFLCN_PMCF;
+		else
+			mflcn &= ~IXGBE_MFLCN_PMCF;
+
+		IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
+		IXGBE_WRITE_FLUSH(hw);
+
+		return 0;
+	}
+
+	PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
+	return -EIO;
+}
+
+/**
+ *  ixgbe_pfc_enable_generic - Enable flow control
+ *  @hw: pointer to hardware structure
+ *  @tc_num: traffic class number
+ *  Enable flow control according to the current settings.
+ */
+static int
+ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
+{
+	int ret_val = 0;
+	uint32_t mflcn_reg, fccfg_reg;
+	uint32_t reg;
+	uint32_t fcrtl, fcrth;
+	uint8_t i;
+	uint8_t nb_rx_en;
+
+	/* Validate the water mark configuration */
+	if (!hw->fc.pause_time) {
+		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+		goto out;
+	}
+
+	/* Low water mark of zero causes XOFF floods */
+	if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
+		 /* High/Low water can not be 0 */
+		if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
+			PMD_INIT_LOG(ERR, "Invalid water mark configuration");
+			ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+			goto out;
+		}
+
+		if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
+			PMD_INIT_LOG(ERR, "Invalid water mark configuration");
+			ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+			goto out;
+		}
+	}
+	/* Negotiate the fc mode to use */
+	ixgbe_fc_autoneg(hw);
+
+	/* Disable any previous flow control settings */
+	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+	mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
+
+	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
+
+	switch (hw->fc.current_mode) {
+	case ixgbe_fc_none:
+		/*
+		 * If the count of enabled RX Priority Flow control >1,
+		 * and the TX pause can not be disabled
+		 */
+		nb_rx_en = 0;
+		for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+			reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
+			if (reg & IXGBE_FCRTH_FCEN)
+				nb_rx_en++;
+		}
+		if (nb_rx_en > 1)
+			fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+		break;
+	case ixgbe_fc_rx_pause:
+		/*
+		 * Rx Flow control is enabled and Tx Flow control is
+		 * disabled by software override. Since there really
+		 * isn't a way to advertise that we are capable of RX
+		 * Pause ONLY, we will advertise that we support both
+		 * symmetric and asymmetric Rx PAUSE.  Later, we will
+		 * disable the adapter's ability to send PAUSE frames.
+		 */
+		mflcn_reg |= IXGBE_MFLCN_RPFCE;
+		/*
+		 * If the count of enabled RX Priority Flow control >1,
+		 * and the TX pause can not be disabled
+		 */
+		nb_rx_en = 0;
+		for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+			reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
+			if (reg & IXGBE_FCRTH_FCEN)
+				nb_rx_en++;
+		}
+		if (nb_rx_en > 1)
+			fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+		break;
+	case ixgbe_fc_tx_pause:
+		/*
+		 * Tx Flow control is enabled, and Rx Flow control is
+		 * disabled by software override.
+		 */
+		fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+		break;
+	case ixgbe_fc_full:
+		/* Flow control (both Rx and Tx) is enabled by SW override. */
+		mflcn_reg |= IXGBE_MFLCN_RPFCE;
+		fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
+		break;
+	default:
+		PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
+		ret_val = IXGBE_ERR_CONFIG;
+		goto out;
+		break;
+	}
+
+	/* Set 802.3x based flow control settings. */
+	mflcn_reg |= IXGBE_MFLCN_DPF;
+	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
+	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
+
+	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
+	if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+		hw->fc.high_water[tc_num]) {
+		fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
+		fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
+	} else {
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
+		/*
+		 * In order to prevent Tx hangs when the internal Tx
+		 * switch is enabled we must set the high water mark
+		 * to the maximum FCRTH value.  This allows the Tx
+		 * switch to function even under heavy Rx workloads.
+		 */
+		fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
+	}
+	IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
+
+	/* Configure pause time (2 TCs per register) */
+	reg = hw->fc.pause_time * 0x00010001;
+	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+	/* Configure flow control refresh threshold value */
+	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+out:
+	return ret_val;
+}
+
+static int
+ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
+
+	if(hw->mac.type != ixgbe_mac_82598EB) {
+		ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
+	}
+	return ret_val;
+}
+
+static int
+ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
+{
+	int err;
+	uint32_t rx_buf_size;
+	uint32_t max_high_water;
+	uint8_t tc_num;
+	uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
+	struct ixgbe_hw *hw =
+                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_dcb_config *dcb_config =
+                IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+
+	enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
+		ixgbe_fc_none,
+		ixgbe_fc_rx_pause,
+		ixgbe_fc_tx_pause,
+		ixgbe_fc_full
+	};
+
+	PMD_INIT_FUNC_TRACE();
+
+	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
+	tc_num = map[pfc_conf->priority];
+	rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
+	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+	/*
+	 * At least reserve one Ethernet frame for watermark
+	 * high_water/low_water in kilo bytes for ixgbe
+	 */
+	max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
+	if ((pfc_conf->fc.high_water > max_high_water) ||
+	    (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
+		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
+		return (-EINVAL);
+	}
+
+	hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
+	hw->fc.pause_time = pfc_conf->fc.pause_time;
+	hw->fc.send_xon = pfc_conf->fc.send_xon;
+	hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
+	hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
+
+	err = ixgbe_dcb_pfc_enable(dev,tc_num);
+
+	/* Not negotiated is not an error case */
+	if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
+		return 0;
+
+	PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
+	return -EIO;
+}
+
+static int
+ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
+			  struct rte_eth_rss_reta_entry64 *reta_conf,
+			  uint16_t reta_size)
+{
+	uint8_t i, j, mask;
+	uint32_t reta, r;
+	uint16_t idx, shift;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	PMD_INIT_FUNC_TRACE();
+	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+			"(%d) doesn't match the number hardware can supported "
+			"(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
+		idx = i / RTE_RETA_GROUP_SIZE;
+		shift = i % RTE_RETA_GROUP_SIZE;
+		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+						IXGBE_4_BIT_MASK);
+		if (!mask)
+			continue;
+		if (mask == IXGBE_4_BIT_MASK)
+			r = 0;
+		else
+			r = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+		for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
+			if (mask & (0x1 << j))
+				reta |= reta_conf[idx].reta[shift + j] <<
+							(CHAR_BIT * j);
+			else
+				reta |= r & (IXGBE_8_BIT_MASK <<
+						(CHAR_BIT * j));
+		}
+		IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+	}
+
+	return 0;
+}
+
+static int
+ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
+			 struct rte_eth_rss_reta_entry64 *reta_conf,
+			 uint16_t reta_size)
+{
+	uint8_t i, j, mask;
+	uint32_t reta;
+	uint16_t idx, shift;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	PMD_INIT_FUNC_TRACE();
+	if (reta_size != ETH_RSS_RETA_SIZE_128) {
+		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+			"(%d) doesn't match the number hardware can supported "
+				"(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IXGBE_4_BIT_WIDTH) {
+		idx = i / RTE_RETA_GROUP_SIZE;
+		shift = i % RTE_RETA_GROUP_SIZE;
+		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+						IXGBE_4_BIT_MASK);
+		if (!mask)
+			continue;
+
+		reta = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+		for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
+			if (mask & (0x1 << j))
+				reta_conf[idx].reta[shift + j] =
+					((reta >> (CHAR_BIT * j)) &
+						IXGBE_8_BIT_MASK);
+		}
+	}
+
+	return 0;
+}
+
+static void
+ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+				uint32_t index, uint32_t pool)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t enable_addr = 1;
+
+	ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
+}
+
+static void
+ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	ixgbe_clear_rar(hw, index);
+}
+
+static int
+ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	uint32_t hlreg0;
+	uint32_t maxfrs;
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev_info dev_info;
+	uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+	ixgbe_dev_info_get(dev, &dev_info);
+
+	/* check that mtu is within the allowed range */
+	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+		return -EINVAL;
+
+	/* refuse mtu that requires the support of scattered packets when this
+	 * feature has not been enabled before. */
+	if (!dev->data->scattered_rx &&
+	    (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
+	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+		return -EINVAL;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+
+	/* switch to jumbo mode if needed */
+	if (frame_size > ETHER_MAX_LEN) {
+		dev->data->dev_conf.rxmode.jumbo_frame = 1;
+		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+	} else {
+		dev->data->dev_conf.rxmode.jumbo_frame = 0;
+		hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
+	}
+	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+	/* update max frame size */
+	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+	maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
+	maxfrs &= 0x0000FFFF;
+	maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
+	IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
+
+	return 0;
+}
+
+/*
+ * Virtual Function operations
+ */
+static void
+ixgbevf_intr_disable(struct ixgbe_hw *hw)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	/* Clear interrupt mask to stop from interrupts being generated */
+	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
+
+	IXGBE_WRITE_FLUSH(hw);
+}
+
+static int
+ixgbevf_dev_configure(struct rte_eth_dev *dev)
+{
+	struct rte_eth_conf* conf = &dev->data->dev_conf;
+
+	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
+		     dev->data->port_id);
+
+	/*
+	 * VF has no ability to enable/disable HW CRC
+	 * Keep the persistent behavior the same as Host PF
+	 */
+#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
+	if (!conf->rxmode.hw_strip_crc) {
+		PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
+		conf->rxmode.hw_strip_crc = 1;
+	}
+#else
+	if (conf->rxmode.hw_strip_crc) {
+		PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
+		conf->rxmode.hw_strip_crc = 0;
+	}
+#endif
+
+	return 0;
+}
+
+static int
+ixgbevf_dev_start(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int err, mask = 0;
+
+	PMD_INIT_FUNC_TRACE();
+
+	hw->mac.ops.reset_hw(hw);
+	hw->mac.get_link_status = true;
+
+	/* negotiate mailbox API version to use with the PF. */
+	ixgbevf_negotiate_api(hw);
+
+	ixgbevf_dev_tx_init(dev);
+
+	/* This can fail when allocating mbufs for descriptor rings */
+	err = ixgbevf_dev_rx_init(dev);
+	if (err) {
+		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
+		ixgbe_dev_clear_queues(dev);
+		return err;
+	}
+
+	/* Set vfta */
+	ixgbevf_set_vfta_all(dev,1);
+
+	/* Set HW strip */
+	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
+		ETH_VLAN_EXTEND_MASK;
+	ixgbevf_vlan_offload_set(dev, mask);
+
+	ixgbevf_dev_rxtx_start(dev);
+
+	return 0;
+}
+
+static void
+ixgbevf_dev_stop(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	PMD_INIT_FUNC_TRACE();
+
+	hw->adapter_stopped = TRUE;
+	ixgbe_stop_adapter(hw);
+
+	/*
+	  * Clear what we set, but we still keep shadow_vfta to
+	  * restore after device starts
+	  */
+	ixgbevf_set_vfta_all(dev,0);
+
+	/* Clear stored conf */
+	dev->data->scattered_rx = 0;
+
+	ixgbe_dev_clear_queues(dev);
+}
+
+static void
+ixgbevf_dev_close(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	PMD_INIT_FUNC_TRACE();
+
+	ixgbe_reset_hw(hw);
+
+	ixgbevf_dev_stop(dev);
+
+	/* reprogram the RAR[0] in case user changed it. */
+	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+}
+
+static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_vfta * shadow_vfta =
+		IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+	int i = 0, j = 0, vfta = 0, mask = 1;
+
+	for (i = 0; i < IXGBE_VFTA_SIZE; i++){
+		vfta = shadow_vfta->vfta[i];
+		if(vfta){
+			mask = 1;
+			for (j = 0; j < 32; j++){
+				if(vfta & mask)
+					ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
+				mask<<=1;
+			}
+		}
+	}
+
+}
+
+static int
+ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_vfta * shadow_vfta =
+		IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+	uint32_t vid_idx = 0;
+	uint32_t vid_bit = 0;
+	int ret = 0;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
+	ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on);
+	if(ret){
+		PMD_INIT_LOG(ERR, "Unable to set VF vlan");
+		return ret;
+	}
+	vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
+	vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
+
+	/* Save what we set and retore it after device reset */
+	if (on)
+		shadow_vfta->vfta[vid_idx] |= vid_bit;
+	else
+		shadow_vfta->vfta[vid_idx] &= ~vid_bit;
+
+	return 0;
+}
+
+static void
+ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t ctrl;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if(queue >= hw->mac.max_rx_queues)
+		return;
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+	if(on)
+		ctrl |= IXGBE_RXDCTL_VME;
+	else
+		ctrl &= ~IXGBE_RXDCTL_VME;
+	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
+
+	ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on);
+}
+
+static void
+ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint16_t i;
+	int on = 0;
+
+	/* VF function only support hw strip feature, others are not support */
+	if(mask & ETH_VLAN_STRIP_MASK){
+		on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
+
+		for(i=0; i < hw->mac.max_rx_queues; i++)
+			ixgbevf_vlan_strip_queue_set(dev,i,on);
+	}
+}
+
+static int
+ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
+{
+	uint32_t reg_val;
+
+	/* we only need to do this if VMDq is enabled */
+	reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+	if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
+		PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
+		return (-1);
+	}
+
+	return 0;
+}
+
+static uint32_t
+ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
+{
+	uint32_t vector = 0;
+	switch (hw->mac.mc_filter_type) {
+	case 0:   /* use bits [47:36] of the address */
+		vector = ((uc_addr->addr_bytes[4] >> 4) |
+			(((uint16_t)uc_addr->addr_bytes[5]) << 4));
+		break;
+	case 1:   /* use bits [46:35] of the address */
+		vector = ((uc_addr->addr_bytes[4] >> 3) |
+			(((uint16_t)uc_addr->addr_bytes[5]) << 5));
+		break;
+	case 2:   /* use bits [45:34] of the address */
+		vector = ((uc_addr->addr_bytes[4] >> 2) |
+			(((uint16_t)uc_addr->addr_bytes[5]) << 6));
+		break;
+	case 3:   /* use bits [43:32] of the address */
+		vector = ((uc_addr->addr_bytes[4]) |
+			(((uint16_t)uc_addr->addr_bytes[5]) << 8));
+		break;
+	default:  /* Invalid mc_filter_type */
+		break;
+	}
+
+	/* vector can only be 12-bits or boundary will be exceeded */
+	vector &= 0xFFF;
+	return vector;
+}
+
+static int
+ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
+			       uint8_t on)
+{
+	uint32_t vector;
+	uint32_t uta_idx;
+	uint32_t reg_val;
+	uint32_t uta_shift;
+	uint32_t rc;
+	const uint32_t ixgbe_uta_idx_mask = 0x7F;
+	const uint32_t ixgbe_uta_bit_shift = 5;
+	const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
+	const uint32_t bit1 = 0x1;
+
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_uta_info *uta_info =
+		IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
+
+	/* The UTA table only exists on 82599 hardware and newer */
+	if (hw->mac.type < ixgbe_mac_82599EB)
+		return (-ENOTSUP);
+
+	vector = ixgbe_uta_vector(hw,mac_addr);
+	uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
+	uta_shift = vector & ixgbe_uta_bit_mask;
+
+	rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
+	if(rc == on)
+		return 0;
+
+	reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
+	if (on) {
+		uta_info->uta_in_use++;
+		reg_val |= (bit1 << uta_shift);
+		uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
+	} else {
+		uta_info->uta_in_use--;
+		reg_val &= ~(bit1 << uta_shift);
+		uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
+	}
+
+	IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
+
+	if (uta_info->uta_in_use > 0)
+		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
+				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+	else
+		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
+
+	return 0;
+}
+
+static int
+ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
+{
+	int i;
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_uta_info *uta_info =
+		IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
+
+	/* The UTA table only exists on 82599 hardware and newer */
+	if (hw->mac.type < ixgbe_mac_82599EB)
+		return (-ENOTSUP);
+
+	if(on) {
+		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+			uta_info->uta_shadow[i] = ~0;
+			IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
+		}
+	} else {
+		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+			uta_info->uta_shadow[i] = 0;
+			IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
+		}
+	}
+	return 0;
+
+}
+
+uint32_t
+ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
+{
+	uint32_t new_val = orig_val;
+
+	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+		new_val |= IXGBE_VMOLR_AUPE;
+	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+		new_val |= IXGBE_VMOLR_ROMPE;
+	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+		new_val |= IXGBE_VMOLR_ROPE;
+	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+		new_val |= IXGBE_VMOLR_BAM;
+	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+		new_val |= IXGBE_VMOLR_MPE;
+
+	return new_val;
+}
+
+static int
+ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
+			       uint16_t rx_mask, uint8_t on)
+{
+	int val = 0;
+
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
+
+	if (hw->mac.type == ixgbe_mac_82598EB) {
+		PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
+			     " on 82599 hardware and newer");
+		return (-ENOTSUP);
+	}
+	if (ixgbe_vmdq_mode_check(hw) < 0)
+		return (-ENOTSUP);
+
+	val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
+
+	if (on)
+		vmolr |= val;
+	else
+		vmolr &= ~val;
+
+	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
+
+	return 0;
+}
+
+static int
+ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
+{
+	uint32_t reg,addr;
+	uint32_t val;
+	const uint8_t bit1 = 0x1;
+
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (ixgbe_vmdq_mode_check(hw) < 0)
+		return (-ENOTSUP);
+
+	addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
+	reg = IXGBE_READ_REG(hw, addr);
+	val = bit1 << pool;
+
+	if (on)
+		reg |= val;
+	else
+		reg &= ~val;
+
+	IXGBE_WRITE_REG(hw, addr,reg);
+
+	return 0;
+}
+
+static int
+ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
+{
+	uint32_t reg,addr;
+	uint32_t val;
+	const uint8_t bit1 = 0x1;
+
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (ixgbe_vmdq_mode_check(hw) < 0)
+		return (-ENOTSUP);
+
+	addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
+	reg = IXGBE_READ_REG(hw, addr);
+	val = bit1 << pool;
+
+	if (on)
+		reg |= val;
+	else
+		reg &= ~val;
+
+	IXGBE_WRITE_REG(hw, addr,reg);
+
+	return 0;
+}
+
+static int
+ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
+			uint64_t pool_mask, uint8_t vlan_on)
+{
+	int ret = 0;
+	uint16_t pool_idx;
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (ixgbe_vmdq_mode_check(hw) < 0)
+		return (-ENOTSUP);
+	for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
+		if (pool_mask & ((uint64_t)(1ULL << pool_idx)))
+			ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
+			if (ret < 0)
+				return ret;
+	}
+
+	return ret;
+}
+
+static int
+ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
+			struct rte_eth_vmdq_mirror_conf *mirror_conf,
+			uint8_t rule_id, uint8_t on)
+{
+	uint32_t mr_ctl,vlvf;
+	uint32_t mp_lsb = 0;
+	uint32_t mv_msb = 0;
+	uint32_t mv_lsb = 0;
+	uint32_t mp_msb = 0;
+	uint8_t i = 0;
+	int reg_index = 0;
+	uint64_t vlan_mask = 0;
+
+	const uint8_t pool_mask_offset = 32;
+	const uint8_t vlan_mask_offset = 32;
+	const uint8_t dst_pool_offset = 8;
+	const uint8_t rule_mr_offset  = 4;
+	const uint8_t mirror_rule_mask= 0x0F;
+
+	struct ixgbe_mirror_info *mr_info =
+			(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (ixgbe_vmdq_mode_check(hw) < 0)
+		return (-ENOTSUP);
+
+	/* Check if vlan mask is valid */
+	if ((mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) && (on)) {
+		if (mirror_conf->vlan.vlan_mask == 0)
+			return (-EINVAL);
+	}
+
+	/* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
+	if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) {
+		for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
+			if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
+				/* search vlan id related pool vlan filter index */
+				reg_index = ixgbe_find_vlvf_slot(hw,
+						mirror_conf->vlan.vlan_id[i]);
+				if(reg_index < 0)
+					return (-EINVAL);
+				vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
+				if ((vlvf & IXGBE_VLVF_VIEN) &&
+					((vlvf & IXGBE_VLVF_VLANID_MASK)
+						== mirror_conf->vlan.vlan_id[i]))
+					vlan_mask |= (1ULL << reg_index);
+				else
+					return (-EINVAL);
+			}
+		}
+
+		if (on) {
+			mv_lsb = vlan_mask & 0xFFFFFFFF;
+			mv_msb = vlan_mask >> vlan_mask_offset;
+
+			mr_info->mr_conf[rule_id].vlan.vlan_mask =
+						mirror_conf->vlan.vlan_mask;
+			for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+				if(mirror_conf->vlan.vlan_mask & (1ULL << i))
+					mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
+						mirror_conf->vlan.vlan_id[i];
+			}
+		} else {
+			mv_lsb = 0;
+			mv_msb = 0;
+			mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
+			for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
+				mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
+		}
+	}
+
+	/*
+	 * if enable pool mirror, write related pool mask register,if disable
+	 * pool mirror, clear PFMRVM register
+	 */
+	if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
+		if (on) {
+			mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
+			mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
+			mr_info->mr_conf[rule_id].pool_mask =
+					mirror_conf->pool_mask;
+
+		} else {
+			mp_lsb = 0;
+			mp_msb = 0;
+			mr_info->mr_conf[rule_id].pool_mask = 0;
+		}
+	}
+
+	/* read  mirror control register and recalculate it */
+	mr_ctl = IXGBE_READ_REG(hw,IXGBE_MRCTL(rule_id));
+
+	if (on) {
+		mr_ctl |= mirror_conf->rule_type_mask;
+		mr_ctl &= mirror_rule_mask;
+		mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
+	} else
+		mr_ctl &= ~(mirror_conf->rule_type_mask & mirror_rule_mask);
+
+	mr_info->mr_conf[rule_id].rule_type_mask = (uint8_t)(mr_ctl & mirror_rule_mask);
+	mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
+
+	/* write mirrror control  register */
+	IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
+
+        /* write pool mirrror control  register */
+	if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
+		IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
+		IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
+				mp_msb);
+	}
+	/* write VLAN mirrror control  register */
+	if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) {
+		IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
+		IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
+				mv_msb);
+	}
+
+	return 0;
+}
+
+static int
+ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
+{
+	int mr_ctl = 0;
+	uint32_t lsb_val = 0;
+	uint32_t msb_val = 0;
+	const uint8_t rule_mr_offset = 4;
+
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_mirror_info *mr_info =
+		(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
+
+	if (ixgbe_vmdq_mode_check(hw) < 0)
+		return (-ENOTSUP);
+
+	memset(&mr_info->mr_conf[rule_id], 0,
+		sizeof(struct rte_eth_vmdq_mirror_conf));
+
+	/* clear PFVMCTL register */
+	IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
+
+	/* clear pool mask register */
+	IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
+	IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
+
+	/* clear vlan mask register */
+	IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
+	IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
+
+	return 0;
+}
+
+static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+	uint16_t queue_idx, uint16_t tx_rate)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t rf_dec, rf_int;
+	uint32_t bcnrc_val;
+	uint16_t link_speed = dev->data->dev_link.link_speed;
+
+	if (queue_idx >= hw->mac.max_tx_queues)
+		return -EINVAL;
+
+	if (tx_rate != 0) {
+		/* Calculate the rate factor values to set */
+		rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
+		rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
+		rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
+
+		bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
+		bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
+				IXGBE_RTTBCNRC_RF_INT_MASK_M);
+		bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
+	} else {
+		bcnrc_val = 0;
+	}
+
+	/*
+	 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
+	 * set as 0x4.
+	 */
+	if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
+		(dev->data->dev_conf.rxmode.max_rx_pkt_len >=
+				IXGBE_MAX_JUMBO_FRAME_SIZE))
+		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+			IXGBE_MMW_SIZE_JUMBO_FRAME);
+	else
+		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+			IXGBE_MMW_SIZE_DEFAULT);
+
+	/* Set RTTBCNRC of queue X */
+	IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
+	IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return 0;
+}
+
+static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+	uint16_t tx_rate, uint64_t q_msk)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_vf_info *vfinfo =
+		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+	uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+	uint32_t queue_stride =
+		IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+	uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
+	uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
+	uint16_t total_rate = 0;
+
+	if (queue_end >= hw->mac.max_tx_queues)
+		return -EINVAL;
+
+	if (vfinfo != NULL) {
+		for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
+			if (vf_idx == vf)
+				continue;
+			for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
+				idx++)
+				total_rate += vfinfo[vf_idx].tx_rate[idx];
+		}
+	} else
+		return -EINVAL;
+
+	/* Store tx_rate for this vf. */
+	for (idx = 0; idx < nb_q_per_pool; idx++) {
+		if (((uint64_t)0x1 << idx) & q_msk) {
+			if (vfinfo[vf].tx_rate[idx] != tx_rate)
+				vfinfo[vf].tx_rate[idx] = tx_rate;
+			total_rate += tx_rate;
+		}
+	}
+
+	if (total_rate > dev->data->dev_link.link_speed) {
+		/*
+		 * Reset stored TX rate of the VF if it causes exceed
+		 * link speed.
+		 */
+		memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
+		return -EINVAL;
+	}
+
+	/* Set RTTBCNRC of each queue/pool for vf X  */
+	for (; queue_idx <= queue_end; queue_idx++) {
+		if (0x1 & q_msk)
+			ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
+		q_msk = q_msk >> 1;
+	}
+
+	return 0;
+}
+
+static void
+ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+		     __attribute__((unused)) uint32_t index,
+		     __attribute__((unused)) uint32_t pool)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int diag;
+
+	/*
+	 * On a 82599 VF, adding again the same MAC addr is not an idempotent
+	 * operation. Trap this case to avoid exhausting the [very limited]
+	 * set of PF resources used to store VF MAC addresses.
+	 */
+	if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
+		return;
+	diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
+	if (diag == 0)
+		return;
+	PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag);
+}
+
+static void
+ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
+	struct ether_addr *mac_addr;
+	uint32_t i;
+	int diag;
+
+	/*
+	 * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
+	 * not support the deletion of a given MAC address.
+	 * Instead, it imposes to delete all MAC addresses, then to add again
+	 * all MAC addresses with the exception of the one to be deleted.
+	 */
+	(void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
+
+	/*
+	 * Add again all MAC addresses, with the exception of the deleted one
+	 * and of the permanent MAC address.
+	 */
+	for (i = 0, mac_addr = dev->data->mac_addrs;
+	     i < hw->mac.num_rar_entries; i++, mac_addr++) {
+		/* Skip the deleted MAC address */
+		if (i == index)
+			continue;
+		/* Skip NULL MAC addresses */
+		if (is_zero_ether_addr(mac_addr))
+			continue;
+		/* Skip the permanent MAC address */
+		if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
+			continue;
+		diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
+		if (diag != 0)
+			PMD_DRV_LOG(ERR,
+				    "Adding again MAC address "
+				    "%02x:%02x:%02x:%02x:%02x:%02x failed "
+				    "diag=%d",
+				    mac_addr->addr_bytes[0],
+				    mac_addr->addr_bytes[1],
+				    mac_addr->addr_bytes[2],
+				    mac_addr->addr_bytes[3],
+				    mac_addr->addr_bytes[4],
+				    mac_addr->addr_bytes[5],
+				    diag);
+	}
+}
+
+#define MAC_TYPE_FILTER_SUP(type)    do {\
+	if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
+		(type) != ixgbe_mac_X550)\
+		return -ENOTSUP;\
+} while (0)
+
+static int
+ixgbe_syn_filter_set(struct rte_eth_dev *dev,
+			struct rte_eth_syn_filter *filter,
+			bool add)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t synqf;
+
+	if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
+		return -EINVAL;
+
+	synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+
+	if (add) {
+		if (synqf & IXGBE_SYN_FILTER_ENABLE)
+			return -EINVAL;
+		synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
+			IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
+
+		if (filter->hig_pri)
+			synqf |= IXGBE_SYN_FILTER_SYNQFP;
+		else
+			synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
+	} else {
+		if (!(synqf & IXGBE_SYN_FILTER_ENABLE))
+			return -ENOENT;
+		synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
+	}
+	IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+	IXGBE_WRITE_FLUSH(hw);
+	return 0;
+}
+
+static int
+ixgbe_syn_filter_get(struct rte_eth_dev *dev,
+			struct rte_eth_syn_filter *filter)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+
+	if (synqf & IXGBE_SYN_FILTER_ENABLE) {
+		filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
+		filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
+		return 0;
+	}
+	return -ENOENT;
+}
+
+static int
+ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
+			enum rte_filter_op filter_op,
+			void *arg)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int ret;
+
+	MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+	if (filter_op == RTE_ETH_FILTER_NOP)
+		return 0;
+
+	if (arg == NULL) {
+		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
+			    filter_op);
+		return -EINVAL;
+	}
+
+	switch (filter_op) {
+	case RTE_ETH_FILTER_ADD:
+		ret = ixgbe_syn_filter_set(dev,
+				(struct rte_eth_syn_filter *)arg,
+				TRUE);
+		break;
+	case RTE_ETH_FILTER_DELETE:
+		ret = ixgbe_syn_filter_set(dev,
+				(struct rte_eth_syn_filter *)arg,
+				FALSE);
+		break;
+	case RTE_ETH_FILTER_GET:
+		ret = ixgbe_syn_filter_get(dev,
+				(struct rte_eth_syn_filter *)arg);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+
+static inline enum ixgbe_5tuple_protocol
+convert_protocol_type(uint8_t protocol_value)
+{
+	if (protocol_value == IPPROTO_TCP)
+		return IXGBE_FILTER_PROTOCOL_TCP;
+	else if (protocol_value == IPPROTO_UDP)
+		return IXGBE_FILTER_PROTOCOL_UDP;
+	else if (protocol_value == IPPROTO_SCTP)
+		return IXGBE_FILTER_PROTOCOL_SCTP;
+	else
+		return IXGBE_FILTER_PROTOCOL_NONE;
+}
+
+/*
+ * add a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
+			struct ixgbe_5tuple_filter *filter)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_filter_info *filter_info =
+		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+	int i, idx, shift;
+	uint32_t ftqf, sdpqf;
+	uint32_t l34timir = 0;
+	uint8_t mask = 0xff;
+
+	/*
+	 * look for an unused 5tuple filter index,
+	 * and insert the filter to list.
+	 */
+	for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
+		idx = i / (sizeof(uint32_t) * NBBY);
+		shift = i % (sizeof(uint32_t) * NBBY);
+		if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
+			filter_info->fivetuple_mask[idx] |= 1 << shift;
+			filter->index = i;
+			TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
+					  filter,
+					  entries);
+			break;
+		}
+	}
+	if (i >= IXGBE_MAX_FTQF_FILTERS) {
+		PMD_DRV_LOG(ERR, "5tuple filters are full.");
+		return -ENOSYS;
+	}
+
+	sdpqf = (uint32_t)(filter->filter_info.dst_port <<
+				IXGBE_SDPQF_DSTPORT_SHIFT);
+	sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
+
+	ftqf = (uint32_t)(filter->filter_info.proto &
+		IXGBE_FTQF_PROTOCOL_MASK);
+	ftqf |= (uint32_t)((filter->filter_info.priority &
+		IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
+	if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
+		mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
+	if (filter->filter_info.dst_ip_mask == 0)
+		mask &= IXGBE_FTQF_DEST_ADDR_MASK;
+	if (filter->filter_info.src_port_mask == 0)
+		mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
+	if (filter->filter_info.dst_port_mask == 0)
+		mask &= IXGBE_FTQF_DEST_PORT_MASK;
+	if (filter->filter_info.proto_mask == 0)
+		mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
+	ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
+	ftqf |= IXGBE_FTQF_POOL_MASK_EN;
+	ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
+
+	IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
+	IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
+	IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
+	IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
+
+	l34timir |= IXGBE_L34T_IMIR_RESERVE;
+	l34timir |= (uint32_t)(filter->queue <<
+				IXGBE_L34T_IMIR_QUEUE_SHIFT);
+	IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
+	return 0;
+}
+
+/*
+ * remove a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: the pointer of the filter will be removed.
+ */
+static void
+ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+			struct ixgbe_5tuple_filter *filter)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_filter_info *filter_info =
+		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+	uint16_t index = filter->index;
+
+	filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
+				~(1 << (index % (sizeof(uint32_t) * NBBY)));
+	TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
+	rte_free(filter);
+
+	IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
+	IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
+	IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
+	IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
+	IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
+}
+
+static int
+ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	struct ixgbe_hw *hw;
+	uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
+		return -EINVAL;
+
+	/* refuse mtu that requires the support of scattered packets when this
+	 * feature has not been enabled before. */
+	if (!dev->data->scattered_rx &&
+	    (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
+	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+		return -EINVAL;
+
+	/*
+	 * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
+	 * request of the version 2.0 of the mailbox API.
+	 * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
+	 * of the mailbox API.
+	 * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
+	 * prior to 3.11.33 which contains the following change:
+	 * "ixgbe: Enable jumbo frames support w/ SR-IOV"
+	 */
+	ixgbevf_rlpml_set_vf(hw, max_frame);
+
+	/* update max frame size */
+	dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
+	return 0;
+}
+
+#define MAC_TYPE_FILTER_SUP_EXT(type)    do {\
+	if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\
+		return -ENOTSUP;\
+} while (0)
+
+static inline struct ixgbe_5tuple_filter *
+ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
+			struct ixgbe_5tuple_filter_info *key)
+{
+	struct ixgbe_5tuple_filter *it;
+
+	TAILQ_FOREACH(it, filter_list, entries) {
+		if (memcmp(key, &it->filter_info,
+			sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
+			return it;
+		}
+	}
+	return NULL;
+}
+
+/* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
+static inline int
+ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
+			struct ixgbe_5tuple_filter_info *filter_info)
+{
+	if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
+		filter->priority > IXGBE_5TUPLE_MAX_PRI ||
+		filter->priority < IXGBE_5TUPLE_MIN_PRI)
+		return -EINVAL;
+
+	switch (filter->dst_ip_mask) {
+	case UINT32_MAX:
+		filter_info->dst_ip_mask = 0;
+		filter_info->dst_ip = filter->dst_ip;
+		break;
+	case 0:
+		filter_info->dst_ip_mask = 1;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
+		return -EINVAL;
+	}
+
+	switch (filter->src_ip_mask) {
+	case UINT32_MAX:
+		filter_info->src_ip_mask = 0;
+		filter_info->src_ip = filter->src_ip;
+		break;
+	case 0:
+		filter_info->src_ip_mask = 1;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "invalid src_ip mask.");
+		return -EINVAL;
+	}
+
+	switch (filter->dst_port_mask) {
+	case UINT16_MAX:
+		filter_info->dst_port_mask = 0;
+		filter_info->dst_port = filter->dst_port;
+		break;
+	case 0:
+		filter_info->dst_port_mask = 1;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "invalid dst_port mask.");
+		return -EINVAL;
+	}
+
+	switch (filter->src_port_mask) {
+	case UINT16_MAX:
+		filter_info->src_port_mask = 0;
+		filter_info->src_port = filter->src_port;
+		break;
+	case 0:
+		filter_info->src_port_mask = 1;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "invalid src_port mask.");
+		return -EINVAL;
+	}
+
+	switch (filter->proto_mask) {
+	case UINT8_MAX:
+		filter_info->proto_mask = 0;
+		filter_info->proto =
+			convert_protocol_type(filter->proto);
+		break;
+	case 0:
+		filter_info->proto_mask = 1;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "invalid protocol mask.");
+		return -EINVAL;
+	}
+
+	filter_info->priority = (uint8_t)filter->priority;
+	return 0;
+}
+
+/*
+ * add or delete a ntuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
+ * add: if true, add filter, if false, remove filter
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
+			struct rte_eth_ntuple_filter *ntuple_filter,
+			bool add)
+{
+	struct ixgbe_filter_info *filter_info =
+		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+	struct ixgbe_5tuple_filter_info filter_5tuple;
+	struct ixgbe_5tuple_filter *filter;
+	int ret;
+
+	if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
+		PMD_DRV_LOG(ERR, "only 5tuple is supported.");
+		return -EINVAL;
+	}
+
+	memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
+	ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
+	if (ret < 0)
+		return ret;
+
+	filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
+					 &filter_5tuple);
+	if (filter != NULL && add) {
+		PMD_DRV_LOG(ERR, "filter exists.");
+		return -EEXIST;
+	}
+	if (filter == NULL && !add) {
+		PMD_DRV_LOG(ERR, "filter doesn't exist.");
+		return -ENOENT;
+	}
+
+	if (add) {
+		filter = rte_zmalloc("ixgbe_5tuple_filter",
+				sizeof(struct ixgbe_5tuple_filter), 0);
+		if (filter == NULL)
+			return -ENOMEM;
+		(void)rte_memcpy(&filter->filter_info,
+				 &filter_5tuple,
+				 sizeof(struct ixgbe_5tuple_filter_info));
+		filter->queue = ntuple_filter->queue;
+		ret = ixgbe_add_5tuple_filter(dev, filter);
+		if (ret < 0) {
+			rte_free(filter);
+			return ret;
+		}
+	} else
+		ixgbe_remove_5tuple_filter(dev, filter);
+
+	return 0;
+}
+
+/*
+ * get a ntuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
+			struct rte_eth_ntuple_filter *ntuple_filter)
+{
+	struct ixgbe_filter_info *filter_info =
+		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+	struct ixgbe_5tuple_filter_info filter_5tuple;
+	struct ixgbe_5tuple_filter *filter;
+	int ret;
+
+	if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
+		PMD_DRV_LOG(ERR, "only 5tuple is supported.");
+		return -EINVAL;
+	}
+
+	memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
+	ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
+	if (ret < 0)
+		return ret;
+
+	filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
+					 &filter_5tuple);
+	if (filter == NULL) {
+		PMD_DRV_LOG(ERR, "filter doesn't exist.");
+		return -ENOENT;
+	}
+	ntuple_filter->queue = filter->queue;
+	return 0;
+}
+
+/*
+ * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
+				enum rte_filter_op filter_op,
+				void *arg)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int ret;
+
+	MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+	if (filter_op == RTE_ETH_FILTER_NOP)
+		return 0;
+
+	if (arg == NULL) {
+		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+			    filter_op);
+		return -EINVAL;
+	}
+
+	switch (filter_op) {
+	case RTE_ETH_FILTER_ADD:
+		ret = ixgbe_add_del_ntuple_filter(dev,
+			(struct rte_eth_ntuple_filter *)arg,
+			TRUE);
+		break;
+	case RTE_ETH_FILTER_DELETE:
+		ret = ixgbe_add_del_ntuple_filter(dev,
+			(struct rte_eth_ntuple_filter *)arg,
+			FALSE);
+		break;
+	case RTE_ETH_FILTER_GET:
+		ret = ixgbe_get_ntuple_filter(dev,
+			(struct rte_eth_ntuple_filter *)arg);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static inline int
+ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
+			uint16_t ethertype)
+{
+	int i;
+
+	for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+		if (filter_info->ethertype_filters[i] == ethertype &&
+		    (filter_info->ethertype_mask & (1 << i)))
+			return i;
+	}
+	return -1;
+}
+
+static inline int
+ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
+			uint16_t ethertype)
+{
+	int i;
+
+	for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+		if (!(filter_info->ethertype_mask & (1 << i))) {
+			filter_info->ethertype_mask |= 1 << i;
+			filter_info->ethertype_filters[i] = ethertype;
+			return i;
+		}
+	}
+	return -1;
+}
+
+static inline int
+ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
+			uint8_t idx)
+{
+	if (idx >= IXGBE_MAX_ETQF_FILTERS)
+		return -1;
+	filter_info->ethertype_mask &= ~(1 << idx);
+	filter_info->ethertype_filters[idx] = 0;
+	return idx;
+}
+
+static int
+ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+			struct rte_eth_ethertype_filter *filter,
+			bool add)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_filter_info *filter_info =
+		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+	uint32_t etqf = 0;
+	uint32_t etqs = 0;
+	int ret;
+
+	if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
+		return -EINVAL;
+
+	if (filter->ether_type == ETHER_TYPE_IPv4 ||
+		filter->ether_type == ETHER_TYPE_IPv6) {
+		PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
+			" ethertype filter.", filter->ether_type);
+		return -EINVAL;
+	}
+
+	if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+		PMD_DRV_LOG(ERR, "mac compare is unsupported.");
+		return -EINVAL;
+	}
+	if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+		PMD_DRV_LOG(ERR, "drop option is unsupported.");
+		return -EINVAL;
+	}
+
+	ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
+	if (ret >= 0 && add) {
+		PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
+			    filter->ether_type);
+		return -EEXIST;
+	}
+	if (ret < 0 && !add) {
+		PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+			    filter->ether_type);
+		return -ENOENT;
+	}
+
+	if (add) {
+		ret = ixgbe_ethertype_filter_insert(filter_info,
+			filter->ether_type);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "ethertype filters are full.");
+			return -ENOSYS;
+		}
+		etqf = IXGBE_ETQF_FILTER_EN;
+		etqf |= (uint32_t)filter->ether_type;
+		etqs |= (uint32_t)((filter->queue <<
+				    IXGBE_ETQS_RX_QUEUE_SHIFT) &
+				    IXGBE_ETQS_RX_QUEUE);
+		etqs |= IXGBE_ETQS_QUEUE_EN;
+	} else {
+		ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
+		if (ret < 0)
+			return -ENOSYS;
+	}
+	IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
+	IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return 0;
+}
+
+static int
+ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
+			struct rte_eth_ethertype_filter *filter)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_filter_info *filter_info =
+		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+	uint32_t etqf, etqs;
+	int ret;
+
+	ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+			    filter->ether_type);
+		return -ENOENT;
+	}
+
+	etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
+	if (etqf & IXGBE_ETQF_FILTER_EN) {
+		etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
+		filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
+		filter->flags = 0;
+		filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
+			       IXGBE_ETQS_RX_QUEUE_SHIFT;
+		return 0;
+	}
+	return -ENOENT;
+}
+
+/*
+ * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+static int
+ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
+				enum rte_filter_op filter_op,
+				void *arg)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int ret;
+
+	MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+	if (filter_op == RTE_ETH_FILTER_NOP)
+		return 0;
+
+	if (arg == NULL) {
+		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+			    filter_op);
+		return -EINVAL;
+	}
+
+	switch (filter_op) {
+	case RTE_ETH_FILTER_ADD:
+		ret = ixgbe_add_del_ethertype_filter(dev,
+			(struct rte_eth_ethertype_filter *)arg,
+			TRUE);
+		break;
+	case RTE_ETH_FILTER_DELETE:
+		ret = ixgbe_add_del_ethertype_filter(dev,
+			(struct rte_eth_ethertype_filter *)arg,
+			FALSE);
+		break;
+	case RTE_ETH_FILTER_GET:
+		ret = ixgbe_get_ethertype_filter(dev,
+			(struct rte_eth_ethertype_filter *)arg);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static int
+ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+		     enum rte_filter_type filter_type,
+		     enum rte_filter_op filter_op,
+		     void *arg)
+{
+	int ret = -EINVAL;
+
+	switch (filter_type) {
+	case RTE_ETH_FILTER_NTUPLE:
+		ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
+		break;
+	case RTE_ETH_FILTER_ETHERTYPE:
+		ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
+		break;
+	case RTE_ETH_FILTER_SYN:
+		ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
+		break;
+	case RTE_ETH_FILTER_FDIR:
+		ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
+		break;
+	default:
+		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+							filter_type);
+		break;
+	}
+
+	return ret;
+}
+
+static struct rte_driver rte_ixgbe_driver = {
+	.type = PMD_PDEV,
+	.init = rte_ixgbe_pmd_init,
+};
+
+static struct rte_driver rte_ixgbevf_driver = {
+	.type = PMD_PDEV,
+	.init = rte_ixgbevf_pmd_init,
+};
+
+PMD_REGISTER_DRIVER(rte_ixgbe_driver);
+PMD_REGISTER_DRIVER(rte_ixgbevf_driver);
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
new file mode 100644
index 0000000..19237b8
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -0,0 +1,400 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IXGBE_ETHDEV_H_
+#define _IXGBE_ETHDEV_H_
+#include "base/ixgbe_dcb.h"
+#include "base/ixgbe_dcb_82599.h"
+#include "base/ixgbe_dcb_82598.h"
+#include "ixgbe_bypass.h"
+
+/* need update link, bit flag */
+#define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
+#define IXGBE_FLAG_MAILBOX          (uint32_t)(1 << 1)
+
+/*
+ * Defines that were not part of ixgbe_type.h as they are not used by the
+ * FreeBSD driver.
+ */
+#define IXGBE_ADVTXD_MAC_1588       0x00080000 /* IEEE1588 Timestamp packet */
+#define IXGBE_RXD_STAT_TMST         0x10000    /* Timestamped Packet indication */
+#define IXGBE_ADVTXD_TUCMD_L4T_RSV  0x00001800 /* L4 Packet TYPE, resvd  */
+#define IXGBE_RXDADV_ERR_CKSUM_BIT  30
+#define IXGBE_RXDADV_ERR_CKSUM_MSK  3
+#define IXGBE_ADVTXD_MACLEN_SHIFT   9          /* Bit shift for l2_len */
+#define IXGBE_NB_STAT_MAPPING_REGS  32
+#define IXGBE_EXTENDED_VLAN	  (uint32_t)(1 << 26) /* EXTENDED VLAN ENABLE */
+#define IXGBE_VFTA_SIZE 128
+#define IXGBE_VLAN_TAG_SIZE 4
+#define IXGBE_MAX_RX_QUEUE_NUM	128
+#ifndef NBBY
+#define NBBY	8	/* number of bits in a byte */
+#endif
+#define IXGBE_HWSTRIP_BITMAP_SIZE (IXGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY))
+
+/* EITR Inteval is in 2048ns uinits for 1G and 10G link */
+#define IXGBE_EITR_INTERVAL_UNIT_NS	2048
+#define IXGBE_EITR_ITR_INT_SHIFT       3
+#define IXGBE_EITR_INTERVAL_US(us) \
+	(((us) * 1000 / IXGBE_EITR_INTERVAL_UNIT_NS << IXGBE_EITR_ITR_INT_SHIFT) & \
+		IXGBE_EITR_ITR_INT_MASK)
+
+
+/* Loopback operation modes */
+/* 82599 specific loopback operation types */
+#define IXGBE_LPBK_82599_NONE   0x0 /* Default value. Loopback is disabled. */
+#define IXGBE_LPBK_82599_TX_RX  0x1 /* Tx->Rx loopback operation is enabled. */
+
+#define IXGBE_MAX_JUMBO_FRAME_SIZE      0x2600 /* Maximum Jumbo frame size. */
+
+#define IXGBE_RTTBCNRC_RF_INT_MASK_BASE 0x000003FF
+#define IXGBE_RTTBCNRC_RF_INT_MASK_M \
+	(IXGBE_RTTBCNRC_RF_INT_MASK_BASE << IXGBE_RTTBCNRC_RF_INT_SHIFT)
+
+#define IXGBE_MAX_QUEUE_NUM_PER_VF  8
+
+#define IXGBE_SYN_FILTER_ENABLE         0x00000001 /* syn filter enable field */
+#define IXGBE_SYN_FILTER_QUEUE          0x000000FE /* syn filter queue field */
+#define IXGBE_SYN_FILTER_QUEUE_SHIFT    1          /* syn filter queue field shift */
+#define IXGBE_SYN_FILTER_SYNQFP         0x80000000 /* syn filter SYNQFP */
+
+#define IXGBE_ETQF_UP                   0x00070000 /* ethertype filter priority field */
+#define IXGBE_ETQF_SHIFT                16
+#define IXGBE_ETQF_UP_EN                0x00080000
+#define IXGBE_ETQF_ETHERTYPE            0x0000FFFF /* ethertype filter ethertype field */
+#define IXGBE_ETQF_MAX_PRI              7
+
+#define IXGBE_SDPQF_DSTPORT             0xFFFF0000 /* dst port field */
+#define IXGBE_SDPQF_DSTPORT_SHIFT       16         /* dst port field shift */
+#define IXGBE_SDPQF_SRCPORT             0x0000FFFF /* src port field */
+
+#define IXGBE_L34T_IMIR_SIZE_BP         0x00001000
+#define IXGBE_L34T_IMIR_RESERVE         0x00080000 /* bit 13 to 19 must be set to 1000000b. */
+#define IXGBE_L34T_IMIR_LLI             0x00100000
+#define IXGBE_L34T_IMIR_QUEUE           0x0FE00000
+#define IXGBE_L34T_IMIR_QUEUE_SHIFT     21
+#define IXGBE_5TUPLE_MAX_PRI            7
+#define IXGBE_5TUPLE_MIN_PRI            1
+
+#define IXGBE_RSS_OFFLOAD_ALL ( \
+	ETH_RSS_IPV4 | \
+	ETH_RSS_NONFRAG_IPV4_TCP | \
+	ETH_RSS_NONFRAG_IPV4_UDP | \
+	ETH_RSS_IPV6 | \
+	ETH_RSS_NONFRAG_IPV6_TCP | \
+	ETH_RSS_NONFRAG_IPV6_UDP | \
+	ETH_RSS_IPV6_EX | \
+	ETH_RSS_IPV6_TCP_EX | \
+	ETH_RSS_IPV6_UDP_EX)
+
+/*
+ * Information about the fdir mode.
+ */
+
+struct ixgbe_hw_fdir_mask {
+	uint16_t vlan_tci_mask;
+	uint32_t src_ipv4_mask;
+	uint32_t dst_ipv4_mask;
+	uint16_t src_ipv6_mask;
+	uint16_t dst_ipv6_mask;
+	uint16_t src_port_mask;
+	uint16_t dst_port_mask;
+	uint16_t flex_bytes_mask;
+};
+
+struct ixgbe_hw_fdir_info {
+	struct ixgbe_hw_fdir_mask mask;
+	uint8_t     flex_bytes_offset;
+	uint16_t    collision;
+	uint16_t    free;
+	uint16_t    maxhash;
+	uint8_t     maxlen;
+	uint64_t    add;
+	uint64_t    remove;
+	uint64_t    f_add;
+	uint64_t    f_remove;
+};
+
+/* structure for interrupt relative data */
+struct ixgbe_interrupt {
+	uint32_t flags;
+	uint32_t mask;
+};
+
+struct ixgbe_stat_mapping_registers {
+	uint32_t tqsm[IXGBE_NB_STAT_MAPPING_REGS];
+	uint32_t rqsmr[IXGBE_NB_STAT_MAPPING_REGS];
+};
+
+struct ixgbe_vfta {
+	uint32_t vfta[IXGBE_VFTA_SIZE];
+};
+
+struct ixgbe_hwstrip {
+	uint32_t bitmap[IXGBE_HWSTRIP_BITMAP_SIZE];
+};
+
+/*
+ * VF data which used by PF host only
+ */
+#define IXGBE_MAX_VF_MC_ENTRIES		30
+#define IXGBE_MAX_MR_RULE_ENTRIES	4 /* number of mirroring rules supported */
+#define IXGBE_MAX_UTA                   128
+
+struct ixgbe_uta_info {
+	uint8_t  uc_filter_type;
+	uint16_t uta_in_use;
+	uint32_t uta_shadow[IXGBE_MAX_UTA];
+};
+
+struct ixgbe_mirror_info {
+	struct rte_eth_vmdq_mirror_conf mr_conf[ETH_VMDQ_NUM_MIRROR_RULE];
+	/**< store PF mirror rules configuration*/
+};
+
+struct ixgbe_vf_info {
+	uint8_t vf_mac_addresses[ETHER_ADDR_LEN];
+	uint16_t vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
+	uint16_t num_vf_mc_hashes;
+	uint16_t default_vf_vlan_id;
+	uint16_t vlans_enabled;
+	bool clear_to_send;
+	uint16_t tx_rate[IXGBE_MAX_QUEUE_NUM_PER_VF];
+	uint16_t vlan_count;
+	uint8_t spoofchk_enabled;
+	uint8_t api_version;
+};
+
+/*
+ *  Possible l4type of 5tuple filters.
+ */
+enum ixgbe_5tuple_protocol {
+	IXGBE_FILTER_PROTOCOL_TCP = 0,
+	IXGBE_FILTER_PROTOCOL_UDP,
+	IXGBE_FILTER_PROTOCOL_SCTP,
+	IXGBE_FILTER_PROTOCOL_NONE,
+};
+
+TAILQ_HEAD(ixgbe_5tuple_filter_list, ixgbe_5tuple_filter);
+
+struct ixgbe_5tuple_filter_info {
+	uint32_t dst_ip;
+	uint32_t src_ip;
+	uint16_t dst_port;
+	uint16_t src_port;
+	enum ixgbe_5tuple_protocol proto;        /* l4 protocol. */
+	uint8_t priority;        /* seven levels (001b-111b), 111b is highest,
+				      used when more than one filter matches. */
+	uint8_t dst_ip_mask:1,   /* if mask is 1b, do not compare dst ip. */
+		src_ip_mask:1,   /* if mask is 1b, do not compare src ip. */
+		dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
+		src_port_mask:1, /* if mask is 1b, do not compare src port. */
+		proto_mask:1;    /* if mask is 1b, do not compare protocol. */
+};
+
+/* 5tuple filter structure */
+struct ixgbe_5tuple_filter {
+	TAILQ_ENTRY(ixgbe_5tuple_filter) entries;
+	uint16_t index;       /* the index of 5tuple filter */
+	struct ixgbe_5tuple_filter_info filter_info;
+	uint16_t queue;       /* rx queue assigned to */
+};
+
+#define IXGBE_5TUPLE_ARRAY_SIZE \
+	(RTE_ALIGN(IXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \
+	 (sizeof(uint32_t) * NBBY))
+
+/*
+ * Structure to store filters' info.
+ */
+struct ixgbe_filter_info {
+	uint8_t ethertype_mask;  /* Bit mask for every used ethertype filter */
+	/* store used ethertype filters*/
+	uint16_t ethertype_filters[IXGBE_MAX_ETQF_FILTERS];
+	/* Bit mask for every used 5tuple filter */
+	uint32_t fivetuple_mask[IXGBE_5TUPLE_ARRAY_SIZE];
+	struct ixgbe_5tuple_filter_list fivetuple_list;
+};
+
+/*
+ * Structure to store private data for each driver instance (for each port).
+ */
+struct ixgbe_adapter {
+	struct ixgbe_hw             hw;
+	struct ixgbe_hw_stats       stats;
+	struct ixgbe_hw_fdir_info   fdir;
+	struct ixgbe_interrupt      intr;
+	struct ixgbe_stat_mapping_registers stat_mappings;
+	struct ixgbe_vfta           shadow_vfta;
+	struct ixgbe_hwstrip		hwstrip;
+	struct ixgbe_dcb_config     dcb_config;
+	struct ixgbe_mirror_info    mr_data;
+	struct ixgbe_vf_info        *vfdata;
+	struct ixgbe_uta_info       uta_info;
+#ifdef RTE_NIC_BYPASS
+	struct ixgbe_bypass_info    bps;
+#endif /* RTE_NIC_BYPASS */
+	struct ixgbe_filter_info    filter;
+
+	bool rx_bulk_alloc_allowed;
+	bool rx_vec_allowed;
+};
+
+#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
+	(&((struct ixgbe_adapter *)adapter)->hw)
+
+#define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \
+	(&((struct ixgbe_adapter *)adapter)->stats)
+
+#define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \
+	(&((struct ixgbe_adapter *)adapter)->intr)
+
+#define IXGBE_DEV_PRIVATE_TO_FDIR_INFO(adapter) \
+	(&((struct ixgbe_adapter *)adapter)->fdir)
+
+#define IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(adapter) \
+	(&((struct ixgbe_adapter *)adapter)->stat_mappings)
+
+#define IXGBE_DEV_PRIVATE_TO_VFTA(adapter) \
+	(&((struct ixgbe_adapter *)adapter)->shadow_vfta)
+
+#define IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(adapter) \
+	(&((struct ixgbe_adapter *)adapter)->hwstrip)
+
+#define IXGBE_DEV_PRIVATE_TO_DCB_CFG(adapter) \
+	(&((struct ixgbe_adapter *)adapter)->dcb_config)
+
+#define IXGBE_DEV_PRIVATE_TO_P_VFDATA(adapter) \
+	(&((struct ixgbe_adapter *)adapter)->vfdata)
+
+#define IXGBE_DEV_PRIVATE_TO_PFDATA(adapter) \
+	(&((struct ixgbe_adapter *)adapter)->mr_data)
+
+#define IXGBE_DEV_PRIVATE_TO_UTA(adapter) \
+	(&((struct ixgbe_adapter *)adapter)->uta_info)
+
+#define IXGBE_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
+	(&((struct ixgbe_adapter *)adapter)->filter)
+
+/*
+ * RX/TX function prototypes
+ */
+void ixgbe_dev_clear_queues(struct rte_eth_dev *dev);
+
+void ixgbe_dev_rx_queue_release(void *rxq);
+
+void ixgbe_dev_tx_queue_release(void *txq);
+
+int  ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+		uint16_t nb_rx_desc, unsigned int socket_id,
+		const struct rte_eth_rxconf *rx_conf,
+		struct rte_mempool *mb_pool);
+
+int  ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+		uint16_t nb_tx_desc, unsigned int socket_id,
+		const struct rte_eth_txconf *tx_conf);
+
+uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
+		uint16_t rx_queue_id);
+
+int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+
+int ixgbe_dev_rx_init(struct rte_eth_dev *dev);
+
+void ixgbe_dev_tx_init(struct rte_eth_dev *dev);
+
+int ixgbe_dev_rxtx_start(struct rte_eth_dev *dev);
+
+int ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
+int ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
+int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
+int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
+int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);
+
+void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
+
+void ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev);
+
+uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts);
+
+uint16_t ixgbe_recv_pkts_lro_single_alloc(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+		uint16_t nb_pkts);
+
+uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
+		uint16_t nb_pkts);
+
+int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
+			      struct rte_eth_rss_conf *rss_conf);
+
+int ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+				struct rte_eth_rss_conf *rss_conf);
+
+/*
+ * Flow director function prototypes
+ */
+int ixgbe_fdir_configure(struct rte_eth_dev *dev);
+
+void ixgbe_configure_dcb(struct rte_eth_dev *dev);
+
+/*
+ * misc function prototypes
+ */
+void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
+
+void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
+
+void ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev);
+
+void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev);
+
+void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
+
+void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev);
+
+int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
+
+uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
+
+int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
+			enum rte_filter_op filter_op, void *arg);
+#endif /* _IXGBE_ETHDEV_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
new file mode 100644
index 0000000..db48817
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -0,0 +1,1144 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+
+#include "ixgbe_logs.h"
+#include "base/ixgbe_api.h"
+#include "base/ixgbe_common.h"
+#include "ixgbe_ethdev.h"
+
+/* To get PBALLOC (Packet Buffer Allocation) bits from FDIRCTRL value */
+#define FDIRCTRL_PBALLOC_MASK           0x03
+
+/* For calculating memory required for FDIR filters */
+#define PBALLOC_SIZE_SHIFT              15
+
+/* Number of bits used to mask bucket hash for different pballoc sizes */
+#define PERFECT_BUCKET_64KB_HASH_MASK   0x07FF  /* 11 bits */
+#define PERFECT_BUCKET_128KB_HASH_MASK  0x0FFF  /* 12 bits */
+#define PERFECT_BUCKET_256KB_HASH_MASK  0x1FFF  /* 13 bits */
+#define SIG_BUCKET_64KB_HASH_MASK       0x1FFF  /* 13 bits */
+#define SIG_BUCKET_128KB_HASH_MASK      0x3FFF  /* 14 bits */
+#define SIG_BUCKET_256KB_HASH_MASK      0x7FFF  /* 15 bits */
+#define IXGBE_DEFAULT_FLEXBYTES_OFFSET  12 /* default flexbytes offset in bytes */
+#define IXGBE_FDIR_MAX_FLEX_LEN         2 /* len in bytes of flexbytes */
+#define IXGBE_MAX_FLX_SOURCE_OFF        62
+#define IXGBE_FDIRCTRL_FLEX_MASK        (0x1F << IXGBE_FDIRCTRL_FLEX_SHIFT)
+#define IXGBE_FDIRCMD_CMD_INTERVAL_US   10
+
+#define IXGBE_FDIR_FLOW_TYPES ( \
+	(1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
+	(1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
+	(1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
+	(1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
+	(1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
+	(1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
+	(1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
+	(1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER))
+
+#define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
+	uint8_t ipv6_addr[16]; \
+	uint8_t i; \
+	rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\
+	(ipv6m) = 0; \
+	for (i = 0; i < sizeof(ipv6_addr); i++) { \
+		if (ipv6_addr[i] == UINT8_MAX) \
+			(ipv6m) |= 1 << i; \
+		else if (ipv6_addr[i] != 0) { \
+			PMD_DRV_LOG(ERR, " invalid IPv6 address mask."); \
+			return -EINVAL; \
+		} \
+	} \
+} while (0)
+
+#define IPV6_MASK_TO_ADDR(ipv6m, ipaddr) do { \
+	uint8_t ipv6_addr[16]; \
+	uint8_t i; \
+	for (i = 0; i < sizeof(ipv6_addr); i++) { \
+		if ((ipv6m) & (1 << i)) \
+			ipv6_addr[i] = UINT8_MAX; \
+		else \
+			ipv6_addr[i] = 0; \
+	} \
+	rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
+} while (0)
+
+static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
+static int fdir_set_input_mask_82599(struct rte_eth_dev *dev,
+		const struct rte_eth_fdir_masks *input_mask);
+static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
+		const struct rte_eth_fdir_flex_conf *conf);
+static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
+static int ixgbe_fdir_filter_to_atr_input(
+		const struct rte_eth_fdir_filter *fdir_filter,
+		union ixgbe_atr_input *input);
+static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
+				 uint32_t key);
+static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
+		enum rte_fdir_pballoc_type pballoc);
+static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+		enum rte_fdir_pballoc_type pballoc);
+static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+			union ixgbe_atr_input *input, uint8_t queue,
+			uint32_t fdircmd, uint32_t fdirhash);
+static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+		union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
+		uint32_t fdirhash);
+static int ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
+			      const struct rte_eth_fdir_filter *fdir_filter,
+			      bool del,
+			      bool update);
+static int ixgbe_fdir_flush(struct rte_eth_dev *dev);
+static void ixgbe_fdir_info_get(struct rte_eth_dev *dev,
+			struct rte_eth_fdir_info *fdir_info);
+static void ixgbe_fdir_stats_get(struct rte_eth_dev *dev,
+			struct rte_eth_fdir_stats *fdir_stats);
+
+/**
+ * This function is based on ixgbe_fdir_enable_82599() in base/ixgbe_82599.c.
+ * It adds extra configuration of fdirctrl that is common for all filter types.
+ *
+ *  Initialize Flow Director control registers
+ *  @hw: pointer to hardware structure
+ *  @fdirctrl: value to write to flow director control register
+ **/
+static int
+fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
+{
+	int i;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* Prime the keys for hashing */
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
+
+	/*
+	 * Continue setup of fdirctrl register bits:
+	 *  Set the maximum length per hash bucket to 0xA filters
+	 *  Send interrupt when 64 filters are left
+	 */
+	fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+
+	/*
+	 * Poll init-done after we write the register.  Estimated times:
+	 *      10G: PBALLOC = 11b, timing is 60us
+	 *       1G: PBALLOC = 11b, timing is 600us
+	 *     100M: PBALLOC = 11b, timing is 6ms
+	 *
+	 *     Multiple these timings by 4 if under full Rx load
+	 *
+	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
+	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
+	 * this might not finish in our poll time, but we can live with that
+	 * for now.
+	 */
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+	IXGBE_WRITE_FLUSH(hw);
+	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+		                   IXGBE_FDIRCTRL_INIT_DONE)
+			break;
+		msec_delay(1);
+	}
+
+	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
+		PMD_INIT_LOG(ERR, "Flow Director poll time exceeded "
+			"during enabling!");
+		return -ETIMEDOUT;
+	}
+	return 0;
+}
+
+/*
+ * Set appropriate bits in fdirctrl for: variable reporting levels, moving
+ * flexbytes matching field, and drop queue (only for perfect matching mode).
+ */
+static inline int
+configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
+{
+	*fdirctrl = 0;
+
+	switch (conf->pballoc) {
+	case RTE_FDIR_PBALLOC_64K:
+		/* 8k - 1 signature filters */
+		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
+		break;
+	case RTE_FDIR_PBALLOC_128K:
+		/* 16k - 1 signature filters */
+		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
+		break;
+	case RTE_FDIR_PBALLOC_256K:
+		/* 32k - 1 signature filters */
+		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
+		break;
+	default:
+		/* bad value */
+		PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value");
+		return -EINVAL;
+	};
+
+	/* status flags: write hash & swindex in the rx descriptor */
+	switch (conf->status) {
+	case RTE_FDIR_NO_REPORT_STATUS:
+		/* do nothing, default mode */
+		break;
+	case RTE_FDIR_REPORT_STATUS:
+		/* report status when the packet matches a fdir rule */
+		*fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
+		break;
+	case RTE_FDIR_REPORT_STATUS_ALWAYS:
+		/* always report status */
+		*fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS;
+		break;
+	default:
+		/* bad value */
+		PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value");
+		return -EINVAL;
+	};
+
+	*fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) <<
+		     IXGBE_FDIRCTRL_FLEX_SHIFT;
+
+	if (conf->mode == RTE_FDIR_MODE_PERFECT) {
+		*fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
+		*fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
+	}
+	/*
+	 * Continue setup of fdirctrl register bits:
+	 *  Set the maximum length per hash bucket to 0xA filters
+	 *  Send interrupt when 64 filters are left
+	 */
+	*fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+
+	return 0;
+}
+
+/**
+ * Reverse the bits in FDIR registers that store 2 x 16 bit masks.
+ *
+ *  @hi_dword: Bits 31:16 mask to be bit swapped.
+ *  @lo_dword: Bits 15:0  mask to be bit swapped.
+ *
+ *  Flow director uses several registers to store 2 x 16 bit masks with the
+ *  bits reversed such as FDIRTCPM, FDIRUDPM. The LS bit of the
+ *  mask affects the MS bit/byte of the target. This function reverses the
+ *  bits in these masks.
+ *  **/
+static inline uint32_t
+reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
+{
+	uint32_t mask = hi_dword << 16;
+	mask |= lo_dword;
+	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+/*
+ * This is based on ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
+ * but makes use of the rte_fdir_masks structure to see which bits to set.
+ */
+static int
+fdir_set_input_mask_82599(struct rte_eth_dev *dev,
+		const struct rte_eth_fdir_masks *input_mask)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_hw_fdir_info *info =
+			IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+	/*
+	 * mask VM pool and DIPv6 since there are currently not supported
+	 * mask FLEX byte, it will be set in flex_conf
+	 */
+	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX;
+	uint32_t fdirtcpm;  /* TCP source and destination port masks. */
+	uint32_t fdiripv6m; /* IPv6 source and destination masks. */
+	uint16_t dst_ipv6m = 0;
+	uint16_t src_ipv6m = 0;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/*
+	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
+	 * are zero, then assume a full mask for that field. Also assume that
+	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
+	 * cannot be masked out in this implementation.
+	 */
+	if (input_mask->dst_port_mask == 0 && input_mask->src_port_mask == 0)
+		/* use the L4 protocol mask for raw IPv4/IPv6 traffic */
+		fdirm |= IXGBE_FDIRM_L4P;
+
+	if (input_mask->vlan_tci_mask == 0x0FFF)
+		/* mask VLAN Priority */
+		fdirm |= IXGBE_FDIRM_VLANP;
+	else if (input_mask->vlan_tci_mask == 0xE000)
+		/* mask VLAN ID */
+		fdirm |= IXGBE_FDIRM_VLANID;
+	else if (input_mask->vlan_tci_mask == 0)
+		/* mask VLAN ID and Priority */
+		fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
+	else if (input_mask->vlan_tci_mask != 0xEFFF) {
+		PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
+		return -EINVAL;
+	}
+	info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
+
+	/* store the TCP/UDP port masks, bit reversed from port layout */
+	fdirtcpm = reverse_fdir_bitmasks(input_mask->dst_port_mask,
+					 input_mask->src_port_mask);
+
+	/* write both the same so that UDP and TCP use the same mask */
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+	info->mask.src_port_mask = input_mask->src_port_mask;
+	info->mask.dst_port_mask = input_mask->dst_port_mask;
+
+	/* Store source and destination IPv4 masks (big-endian) */
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, ~(input_mask->ipv4_mask.src_ip));
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, ~(input_mask->ipv4_mask.dst_ip));
+	info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
+	info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
+
+	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
+		/*
+		 * IPv6 mask is only meaningful in signature mode
+		 * Store source and destination IPv6 masks (bit reversed)
+		 */
+		IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
+		IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
+		fdiripv6m = (dst_ipv6m << 16) | src_ipv6m;
+
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
+		info->mask.src_ipv6_mask = src_ipv6m;
+		info->mask.dst_ipv6_mask = dst_ipv6m;
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/*
+ * ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
+ * arguments are valid
+ */
+static int
+ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
+		const struct rte_eth_fdir_flex_conf *conf)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_hw_fdir_info *info =
+			IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+	const struct rte_eth_flex_payload_cfg *flex_cfg;
+	const struct rte_eth_fdir_flex_mask *flex_mask;
+	uint32_t fdirctrl, fdirm;
+	uint16_t flexbytes = 0;
+	uint16_t i;
+
+	fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+	fdirm = IXGBE_READ_REG(hw, IXGBE_FDIRM);
+
+	if (conf == NULL) {
+		PMD_DRV_LOG(INFO, "NULL pointer.");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < conf->nb_payloads; i++) {
+		flex_cfg = &conf->flex_set[i];
+		if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) {
+			PMD_DRV_LOG(ERR, "unsupported payload type.");
+			return -EINVAL;
+		}
+		if (((flex_cfg->src_offset[0] & 0x1) == 0) &&
+		    (flex_cfg->src_offset[1] == flex_cfg->src_offset[0] + 1) &&
+		    (flex_cfg->src_offset[0] <= IXGBE_MAX_FLX_SOURCE_OFF)) {
+			fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK;
+			fdirctrl |= (flex_cfg->src_offset[0] / sizeof(uint16_t)) <<
+					IXGBE_FDIRCTRL_FLEX_SHIFT;
+		} else {
+			PMD_DRV_LOG(ERR, "invalid flexbytes arguments.");
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < conf->nb_flexmasks; i++) {
+		flex_mask = &conf->flex_mask[i];
+		if (flex_mask->flow_type != RTE_ETH_FLOW_UNKNOWN) {
+			PMD_DRV_LOG(ERR, "flexmask should be set globally.");
+			return -EINVAL;
+		}
+		flexbytes = (uint16_t)(((flex_mask->mask[0] << 8) & 0xFF00) |
+					((flex_mask->mask[1]) & 0xFF));
+		if (flexbytes == UINT16_MAX)
+			fdirm &= ~IXGBE_FDIRM_FLEX;
+		else if (flexbytes != 0) {
+			/* IXGBE_FDIRM_FLEX is set by default when set mask */
+			PMD_DRV_LOG(ERR, " invalid flexbytes mask arguments.");
+			return -EINVAL;
+		}
+	}
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
+	info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0;
+	info->flex_bytes_offset = (uint8_t)((fdirctrl &
+					    IXGBE_FDIRCTRL_FLEX_MASK) >>
+					    IXGBE_FDIRCTRL_FLEX_SHIFT);
+	return 0;
+}
+
+int
+ixgbe_fdir_configure(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int err;
+	uint32_t fdirctrl, pbsize;
+	int i;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (hw->mac.type != ixgbe_mac_82599EB &&
+		hw->mac.type != ixgbe_mac_X540 &&
+		hw->mac.type != ixgbe_mac_X550 &&
+		hw->mac.type != ixgbe_mac_X550EM_x)
+		return -ENOSYS;
+
+	err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
+	if (err)
+		return err;
+
+	/*
+	 * Before enabling Flow Director, the Rx Packet Buffer size
+	 * must be reduced.  The new value is the current size minus
+	 * flow director memory usage size.
+	 */
+	pbsize = (1 << (PBALLOC_SIZE_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
+	IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
+	    (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
+
+	/*
+	 * The defaults in the HW for RX PB 1-7 are not zero and so should be
+	 * intialized to zero for non DCB mode otherwise actual total RX PB
+	 * would be bigger than programmed and filter space would run into
+	 * the PB 0 region.
+	 */
+	for (i = 1; i < 8; i++)
+		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+
+	err = fdir_set_input_mask_82599(dev, &dev->data->dev_conf.fdir_conf.mask);
+	if (err < 0) {
+		PMD_INIT_LOG(ERR, " Error on setting FD mask");
+		return err;
+	}
+	err = ixgbe_set_fdir_flex_conf(dev,
+		&dev->data->dev_conf.fdir_conf.flex_conf);
+	if (err < 0) {
+		PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments.");
+		return err;
+	}
+
+	err = fdir_enable_82599(hw, fdirctrl);
+	if (err < 0) {
+		PMD_INIT_LOG(ERR, " Error on enabling FD.");
+		return err;
+	}
+	return 0;
+}
+
+/*
+ * Convert DPDK rte_eth_fdir_filter struct to ixgbe_atr_input union that is used
+ * by the IXGBE driver code.
+ */
+static int
+ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
+		union ixgbe_atr_input *input)
+{
+	input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci;
+	input->formatted.flex_bytes = (uint16_t)(
+		(fdir_filter->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
+		(fdir_filter->input.flow_ext.flexbytes[0] & 0xFF));
+
+	switch (fdir_filter->input.flow_type) {
+	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+		input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
+		break;
+	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+		input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
+		break;
+	case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
+		input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
+		break;
+	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+		input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
+		break;
+	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+		input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV6;
+		break;
+	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+		input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
+		break;
+	case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
+		input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV6;
+		break;
+	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+		input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, " Error on flow_type input");
+		return -EINVAL;
+	}
+
+	switch (fdir_filter->input.flow_type) {
+	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+		input->formatted.src_port =
+			fdir_filter->input.flow.udp4_flow.src_port;
+		input->formatted.dst_port =
+			fdir_filter->input.flow.udp4_flow.dst_port;
+	/*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
+	case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
+	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+		input->formatted.src_ip[0] =
+			fdir_filter->input.flow.ip4_flow.src_ip;
+		input->formatted.dst_ip[0] =
+			fdir_filter->input.flow.ip4_flow.dst_ip;
+		break;
+
+	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+		input->formatted.src_port =
+			fdir_filter->input.flow.udp6_flow.src_port;
+		input->formatted.dst_port =
+			fdir_filter->input.flow.udp6_flow.dst_port;
+	/*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
+	case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
+	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+		rte_memcpy(input->formatted.src_ip,
+			   fdir_filter->input.flow.ipv6_flow.src_ip,
+			   sizeof(input->formatted.src_ip));
+		rte_memcpy(input->formatted.dst_ip,
+			   fdir_filter->input.flow.ipv6_flow.dst_ip,
+			   sizeof(input->formatted.dst_ip));
+		break;
+	default:
+		PMD_DRV_LOG(ERR, " Error on flow_type input");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * The below function is taken from the FreeBSD IXGBE drivers release
+ * 2.3.8. The only change is not to mask hash_result with IXGBE_ATR_HASH_MASK
+ * before returning, as the signature hash can use 16bits.
+ *
+ * The newer driver has optimised functions for calculating bucket and
+ * signature hashes. However they don't support IPv6 type packets for signature
+ * filters so are not used here.
+ *
+ * Note that the bkt_hash field in the ixgbe_atr_input structure is also never
+ * set.
+ *
+ * Compute the hashes for SW ATR
+ *  @stream: input bitstream to compute the hash on
+ *  @key: 32-bit hash key
+ **/
+static uint32_t
+ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
+				 uint32_t key)
+{
+	/*
+	 * The algorithm is as follows:
+	 *    Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
+	 *    where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
+	 *    and A[n] x B[n] is bitwise AND between same length strings
+	 *
+	 *    K[n] is 16 bits, defined as:
+	 *       for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
+	 *       for n modulo 32 < 15, K[n] =
+	 *             K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
+	 *
+	 *    S[n] is 16 bits, defined as:
+	 *       for n >= 15, S[n] = S[n:n - 15]
+	 *       for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
+	 *
+	 *    To simplify for programming, the algorithm is implemented
+	 *    in software this way:
+	 *
+	 *    key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
+	 *
+	 *    for (i = 0; i < 352; i+=32)
+	 *        hi_hash_dword[31:0] ^= Stream[(i+31):i];
+	 *
+	 *    lo_hash_dword[15:0]  ^= Stream[15:0];
+	 *    lo_hash_dword[15:0]  ^= hi_hash_dword[31:16];
+	 *    lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
+	 *
+	 *    hi_hash_dword[31:0]  ^= Stream[351:320];
+	 *
+	 *    if(key[0])
+	 *        hash[15:0] ^= Stream[15:0];
+	 *
+	 *    for (i = 0; i < 16; i++) {
+	 *        if (key[i])
+	 *            hash[15:0] ^= lo_hash_dword[(i+15):i];
+	 *        if (key[i + 16])
+	 *            hash[15:0] ^= hi_hash_dword[(i+15):i];
+	 *    }
+	 *
+	 */
+	__be32 common_hash_dword = 0;
+	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+	u32 hash_result = 0;
+	u8 i;
+
+	/* record the flow_vm_vlan bits as they are a key part to the hash */
+	flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]);
+
+	/* generate common hash dword */
+	for (i = 1; i <= 13; i++)
+		common_hash_dword ^= atr_input->dword_stream[i];
+
+	hi_hash_dword = IXGBE_NTOHL(common_hash_dword);
+
+	/* low dword is word swapped version of common */
+	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+	/* apply flow ID/VM pool/VLAN ID bits to hash words */
+	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+	/* Process bits 0 and 16 */
+	if (key & 0x0001) hash_result ^= lo_hash_dword;
+	if (key & 0x00010000) hash_result ^= hi_hash_dword;
+
+	/*
+	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+	 * delay this because bit 0 of the stream should not be processed
+	 * so we do not add the vlan until after bit 0 was processed
+	 */
+	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+
+	/* process the remaining 30 bits in the key 2 bits at a time */
+	for (i = 15; i; i-- ) {
+		if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
+		if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
+	}
+
+	return hash_result;
+}
+
+static uint32_t
+atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+		enum rte_fdir_pballoc_type pballoc)
+{
+	if (pballoc == RTE_FDIR_PBALLOC_256K)
+		return ixgbe_atr_compute_hash_82599(input,
+				IXGBE_ATR_BUCKET_HASH_KEY) &
+				PERFECT_BUCKET_256KB_HASH_MASK;
+	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+		return ixgbe_atr_compute_hash_82599(input,
+				IXGBE_ATR_BUCKET_HASH_KEY) &
+				PERFECT_BUCKET_128KB_HASH_MASK;
+	else
+		return ixgbe_atr_compute_hash_82599(input,
+				IXGBE_ATR_BUCKET_HASH_KEY) &
+				PERFECT_BUCKET_64KB_HASH_MASK;
+}
+
+/**
+ * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
+ * @hw: pointer to hardware structure
+ */
+static inline int
+ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd)
+{
+	int i;
+
+	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
+		*fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
+		if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
+			return 0;
+		rte_delay_us(IXGBE_FDIRCMD_CMD_INTERVAL_US);
+	}
+
+	return -ETIMEDOUT;
+}
+
+/*
+ * Calculate the hash value needed for signature-match filters. In the FreeBSD
+ * driver, this is done by the optimised function
+ * ixgbe_atr_compute_sig_hash_82599(). However that can't be used here as it
+ * doesn't support calculating a hash for an IPv6 filter.
+ */
+static uint32_t
+atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
+		enum rte_fdir_pballoc_type pballoc)
+{
+	uint32_t bucket_hash, sig_hash;
+
+	if (pballoc == RTE_FDIR_PBALLOC_256K)
+		bucket_hash = ixgbe_atr_compute_hash_82599(input,
+				IXGBE_ATR_BUCKET_HASH_KEY) &
+				SIG_BUCKET_256KB_HASH_MASK;
+	else if (pballoc == RTE_FDIR_PBALLOC_128K)
+		bucket_hash = ixgbe_atr_compute_hash_82599(input,
+				IXGBE_ATR_BUCKET_HASH_KEY) &
+				SIG_BUCKET_128KB_HASH_MASK;
+	else
+		bucket_hash = ixgbe_atr_compute_hash_82599(input,
+				IXGBE_ATR_BUCKET_HASH_KEY) &
+				SIG_BUCKET_64KB_HASH_MASK;
+
+	sig_hash = ixgbe_atr_compute_hash_82599(input,
+			IXGBE_ATR_SIGNATURE_HASH_KEY);
+
+	return (sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT) | bucket_hash;
+}
+
+/*
+ * This is based on ixgbe_fdir_write_perfect_filter_82599() in
+ * base/ixgbe_82599.c, with the ability to set extra flags in FDIRCMD register
+ * added, and IPv6 support also added. The hash value is also pre-calculated
+ * as the pballoc value is needed to do it.
+ */
+static int
+fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+			union ixgbe_atr_input *input, uint8_t queue,
+			uint32_t fdircmd, uint32_t fdirhash)
+{
+	uint32_t fdirport, fdirvlan;
+	int err = 0;
+
+	/* record the IPv4 address (big-endian) */
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
+
+	/* record source and destination port (little-endian)*/
+	fdirport = IXGBE_NTOHS(input->formatted.dst_port);
+	fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+	fdirport |= IXGBE_NTOHS(input->formatted.src_port);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+
+	/* record vlan (little-endian) and flex_bytes(big-endian) */
+	fdirvlan = input->formatted.flex_bytes;
+	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+	fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+
+	/* configure FDIRHASH register */
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+	/*
+	 * flush all previous writes to make certain registers are
+	 * programmed prior to issuing the command
+	 */
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* configure FDIRCMD register */
+	fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
+		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+	fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+	fdircmd |= (uint32_t)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
+
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
+
+	PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
+
+	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+	if (err < 0)
+		PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
+
+	return err;
+}
+
+/**
+ * This function is based on ixgbe_atr_add_signature_filter_82599() in
+ * base/ixgbe_82599.c, but uses a pre-calculated hash value. It also supports
+ * setting extra fields in the FDIRCMD register, and removes the code that was
+ * verifying the flow_type field. According to the documentation, a flow type of
+ * 00 (i.e. not TCP, UDP, or SCTP) is not supported, however it appears to
+ * work ok...
+ *
+ *  Adds a signature hash filter
+ *  @hw: pointer to hardware structure
+ *  @input: unique input dword
+ *  @queue: queue index to direct traffic to
+ *  @fdircmd: any extra flags to set in fdircmd register
+ *  @fdirhash: pre-calculated hash value for the filter
+ **/
+static int
+fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+		union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
+		uint32_t fdirhash)
+{
+	int err = 0;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* configure FDIRCMD register */
+	fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
+	          IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+	fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
+
+	PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
+
+	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+	if (err < 0)
+		PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
+
+	return err;
+}
+
+/*
+ * This is based on ixgbe_fdir_erase_perfect_filter_82599() in
+ * base/ixgbe_82599.c. It is modified to take in the hash as a parameter so
+ * that it can be used for removing signature and perfect filters.
+ */
+static int
+fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash)
+{
+	uint32_t fdircmd = 0;
+	int err = 0;
+
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+	/* flush hash to HW */
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* Query if filter is present */
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
+
+	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+	if (err < 0) {
+		PMD_INIT_LOG(ERR, "Timeout querying for flow director filter.");
+		return err;
+	}
+
+	/* if filter exists in hardware then remove it */
+	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+		IXGBE_WRITE_FLUSH(hw);
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
+	}
+	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+	if (err < 0)
+		PMD_INIT_LOG(ERR, "Timeout erasing flow director filter.");
+	return err;
+
+}
+
+/*
+ * ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
+ * @dev: pointer to the structure rte_eth_dev
+ * @fdir_filter: fdir filter entry
+ * @del: 1 - delete, 0 - add
+ * @update: 1 - update
+ */
+static int
+ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
+			      const struct rte_eth_fdir_filter *fdir_filter,
+			      bool del,
+			      bool update)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t fdircmd_flags;
+	uint32_t fdirhash;
+	union ixgbe_atr_input input;
+	uint8_t queue;
+	bool is_perfect = FALSE;
+	int err;
+
+	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_NONE)
+		return -ENOTSUP;
+
+	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
+		is_perfect = TRUE;
+
+	memset(&input, 0, sizeof(input));
+
+	err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input);
+	if (err)
+		return err;
+
+	if (is_perfect) {
+		if (input.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
+			PMD_DRV_LOG(ERR, "IPv6 is not supported in"
+					 " perfect mode!");
+			return -ENOTSUP;
+		}
+		fdirhash = atr_compute_perfect_hash_82599(&input,
+				dev->data->dev_conf.fdir_conf.pballoc);
+		fdirhash |= fdir_filter->soft_id <<
+				IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+	} else
+		fdirhash = atr_compute_sig_hash_82599(&input,
+				dev->data->dev_conf.fdir_conf.pballoc);
+
+	if (del) {
+		err = fdir_erase_filter_82599(hw, fdirhash);
+		if (err < 0)
+			PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!");
+		else
+			PMD_DRV_LOG(DEBUG, "Success to delete FDIR filter!");
+		return err;
+	}
+	/* add or update an fdir filter*/
+	fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
+	if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT) {
+		if (is_perfect) {
+			queue = dev->data->dev_conf.fdir_conf.drop_queue;
+			fdircmd_flags |= IXGBE_FDIRCMD_DROP;
+		} else {
+			PMD_DRV_LOG(ERR, "Drop option is not supported in"
+				" signature mode.");
+			return -EINVAL;
+		}
+	} else if (fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
+		queue = (uint8_t)fdir_filter->action.rx_queue;
+	else
+		return -EINVAL;
+
+	if (is_perfect) {
+		err = fdir_write_perfect_filter_82599(hw, &input, queue,
+				fdircmd_flags, fdirhash);
+	} else {
+		err = fdir_add_signature_filter_82599(hw, &input, queue,
+				fdircmd_flags, fdirhash);
+	}
+	if (err < 0)
+		PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
+	else
+		PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
+
+	return err;
+}
+
+static int
+ixgbe_fdir_flush(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_hw_fdir_info *info =
+			IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+	int ret;
+
+	ret = ixgbe_reinit_fdir_tables_82599(hw);
+	if (ret < 0) {
+		PMD_INIT_LOG(ERR, "Failed to re-initialize FD table.");
+		return ret;
+	}
+
+	info->f_add = 0;
+	info->f_remove = 0;
+	info->add = 0;
+	info->remove = 0;
+
+	return ret;
+}
+
+#define FDIRENTRIES_NUM_SHIFT 10
+static void
+ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_hw_fdir_info *info =
+			IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+	uint32_t fdirctrl, max_num;
+	uint8_t offset;
+
+	fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+	offset = ((fdirctrl & IXGBE_FDIRCTRL_FLEX_MASK) >>
+			IXGBE_FDIRCTRL_FLEX_SHIFT) * sizeof(uint16_t);
+
+	fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
+	max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
+			(fdirctrl & FDIRCTRL_PBALLOC_MASK)));
+	if (fdir_info->mode == RTE_FDIR_MODE_PERFECT)
+		fdir_info->guarant_spc = max_num;
+	else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE)
+		fdir_info->guarant_spc = max_num * 4;
+
+	fdir_info->mask.vlan_tci_mask = info->mask.vlan_tci_mask;
+	fdir_info->mask.ipv4_mask.src_ip = info->mask.src_ipv4_mask;
+	fdir_info->mask.ipv4_mask.dst_ip = info->mask.dst_ipv4_mask;
+	IPV6_MASK_TO_ADDR(info->mask.src_ipv6_mask,
+			fdir_info->mask.ipv6_mask.src_ip);
+	IPV6_MASK_TO_ADDR(info->mask.dst_ipv6_mask,
+			fdir_info->mask.ipv6_mask.dst_ip);
+	fdir_info->mask.src_port_mask = info->mask.src_port_mask;
+	fdir_info->mask.dst_port_mask = info->mask.dst_port_mask;
+	fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN;
+	fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
+	fdir_info->flex_payload_unit = sizeof(uint16_t);
+	fdir_info->max_flex_payload_segment_num = 1;
+	fdir_info->flex_payload_limit = 62;
+	fdir_info->flex_conf.nb_payloads = 1;
+	fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD;
+	fdir_info->flex_conf.flex_set[0].src_offset[0] = offset;
+	fdir_info->flex_conf.flex_set[0].src_offset[1] = offset + 1;
+	fdir_info->flex_conf.nb_flexmasks = 1;
+	fdir_info->flex_conf.flex_mask[0].flow_type = RTE_ETH_FLOW_UNKNOWN;
+	fdir_info->flex_conf.flex_mask[0].mask[0] =
+			(uint8_t)(info->mask.flex_bytes_mask & 0x00FF);
+	fdir_info->flex_conf.flex_mask[0].mask[1] =
+			(uint8_t)((info->mask.flex_bytes_mask & 0xFF00) >> 8);
+}
+
+static void
+ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_stats)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_hw_fdir_info *info =
+			IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+	uint32_t reg, max_num;
+
+	/* Get the information from registers */
+	reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
+	info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >>
+					IXGBE_FDIRFREE_COLL_SHIFT);
+	info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >>
+				   IXGBE_FDIRFREE_FREE_SHIFT);
+
+	reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
+	info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >>
+				      IXGBE_FDIRLEN_MAXHASH_SHIFT);
+	info->maxlen  = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >>
+				     IXGBE_FDIRLEN_MAXLEN_SHIFT);
+
+	reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
+	info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >>
+		IXGBE_FDIRUSTAT_REMOVE_SHIFT;
+	info->add += (reg & IXGBE_FDIRUSTAT_ADD_MASK) >>
+		IXGBE_FDIRUSTAT_ADD_SHIFT;
+
+	reg = IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT) & 0xFFFF;
+	info->f_remove += (reg & IXGBE_FDIRFSTAT_FREMOVE_MASK) >>
+		IXGBE_FDIRFSTAT_FREMOVE_SHIFT;
+	info->f_add += (reg & IXGBE_FDIRFSTAT_FADD_MASK) >>
+		IXGBE_FDIRFSTAT_FADD_SHIFT;
+
+	/*  Copy the new information in the fdir parameter */
+	fdir_stats->collision = info->collision;
+	fdir_stats->free = info->free;
+	fdir_stats->maxhash = info->maxhash;
+	fdir_stats->maxlen = info->maxlen;
+	fdir_stats->remove = info->remove;
+	fdir_stats->add = info->add;
+	fdir_stats->f_remove = info->f_remove;
+	fdir_stats->f_add = info->f_add;
+
+	reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+	max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
+			(reg & FDIRCTRL_PBALLOC_MASK)));
+	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
+			fdir_stats->guarant_cnt = max_num - fdir_stats->free;
+	else if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE)
+		fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
+
+}
+
+/*
+ * ixgbe_fdir_ctrl_func - deal with all operations on flow director.
+ * @dev: pointer to the structure rte_eth_dev
+ * @filter_op:operation will be taken
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+int
+ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
+			enum rte_filter_op filter_op, void *arg)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int ret = 0;
+
+	if (hw->mac.type != ixgbe_mac_82599EB &&
+		hw->mac.type != ixgbe_mac_X540 &&
+		hw->mac.type != ixgbe_mac_X550 &&
+		hw->mac.type != ixgbe_mac_X550EM_x)
+		return -ENOTSUP;
+
+	if (filter_op == RTE_ETH_FILTER_NOP)
+		return 0;
+
+	if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
+		return -EINVAL;
+
+	switch (filter_op) {
+	case RTE_ETH_FILTER_ADD:
+		ret = ixgbe_add_del_fdir_filter(dev,
+			(struct rte_eth_fdir_filter *)arg, FALSE, FALSE);
+		break;
+	case RTE_ETH_FILTER_UPDATE:
+		ret = ixgbe_add_del_fdir_filter(dev,
+			(struct rte_eth_fdir_filter *)arg, FALSE, TRUE);
+		break;
+	case RTE_ETH_FILTER_DELETE:
+		ret = ixgbe_add_del_fdir_filter(dev,
+			(struct rte_eth_fdir_filter *)arg, TRUE, FALSE);
+		break;
+	case RTE_ETH_FILTER_FLUSH:
+		ret = ixgbe_fdir_flush(dev);
+		break;
+	case RTE_ETH_FILTER_INFO:
+		ixgbe_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
+		break;
+	case RTE_ETH_FILTER_STATS:
+		ixgbe_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
diff --git a/drivers/net/ixgbe/ixgbe_logs.h b/drivers/net/ixgbe/ixgbe_logs.h
new file mode 100644
index 0000000..572e030
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_logs.h
@@ -0,0 +1,78 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IXGBE_LOGS_H_
+#define _IXGBE_LOGS_H_
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
+		"PMD: %s(): " fmt "\n", __func__, ##args)
+
+#ifdef RTE_LIBRTE_IXGBE_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE() do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IXGBE_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+	RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+	RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+	RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IXGBE_DEBUG_DRIVER
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+	RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#else
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
+#endif
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+	PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _IXGBE_LOGS_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
new file mode 100644
index 0000000..caed137
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -0,0 +1,629 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_memcpy.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+
+#include "base/ixgbe_common.h"
+#include "ixgbe_ethdev.h"
+
+#define IXGBE_MAX_VFTA     (128)
+#define IXGBE_VF_MSG_SIZE_DEFAULT 1
+#define IXGBE_VF_GET_QUEUE_MSG_SIZE 5
+
+static inline uint16_t
+dev_num_vf(struct rte_eth_dev *eth_dev)
+{
+	return eth_dev->pci_dev->max_vfs;
+}
+
+static inline
+int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
+{
+	unsigned char vf_mac_addr[ETHER_ADDR_LEN];
+	struct ixgbe_vf_info *vfinfo =
+		*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+	uint16_t vfn;
+
+	for (vfn = 0; vfn < vf_num; vfn++) {
+		eth_random_addr(vf_mac_addr);
+		/* keep the random address as default */
+		memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
+			   ETHER_ADDR_LEN);
+	}
+
+	return 0;
+}
+
+static inline int
+ixgbe_mb_intr_setup(struct rte_eth_dev *dev)
+{
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+	intr->mask |= IXGBE_EICR_MAILBOX;
+
+	return 0;
+}
+
+void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
+{
+	struct ixgbe_vf_info **vfinfo =
+		IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
+	struct ixgbe_mirror_info *mirror_info =
+        IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private);
+	struct ixgbe_uta_info *uta_info =
+        IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+	uint16_t vf_num;
+	uint8_t nb_queue;
+
+	PMD_INIT_FUNC_TRACE();
+
+	RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
+	if (0 == (vf_num = dev_num_vf(eth_dev)))
+		return;
+
+	*vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0);
+	if (*vfinfo == NULL)
+		rte_panic("Cannot allocate memory for private VF data\n");
+
+	memset(mirror_info,0,sizeof(struct ixgbe_mirror_info));
+	memset(uta_info,0,sizeof(struct ixgbe_uta_info));
+	hw->mac.mc_filter_type = 0;
+
+	if (vf_num >= ETH_32_POOLS) {
+		nb_queue = 2;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
+	} else if (vf_num >= ETH_16_POOLS) {
+		nb_queue = 4;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
+	} else {
+		nb_queue = 8;
+		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
+	}
+
+	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
+	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
+	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
+
+	ixgbe_vf_perm_addr_gen(eth_dev, vf_num);
+
+	/* init_mailbox_params */
+	hw->mbx.ops.init_params(hw);
+
+	/* set mb interrupt mask */
+	ixgbe_mb_intr_setup(eth_dev);
+
+	return;
+}
+
+int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
+{
+	uint32_t vtctl, fcrth;
+	uint32_t vfre_slot, vfre_offset;
+	uint16_t vf_num;
+	const uint8_t VFRE_SHIFT = 5;  /* VFRE 32 bits per slot */
+	const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+	uint32_t gpie, gcr_ext;
+	uint32_t vlanctrl;
+	int i;
+
+	if (0 == (vf_num = dev_num_vf(eth_dev)))
+		return -1;
+
+	/* enable VMDq and set the default pool for PF */
+	vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+	vtctl |= IXGBE_VMD_CTL_VMDQ_EN;
+	vtctl &= ~IXGBE_VT_CTL_POOL_MASK;
+	vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx
+		<< IXGBE_VT_CTL_POOL_SHIFT;
+	vtctl |= IXGBE_VT_CTL_REPLEN;
+	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
+
+	vfre_offset = vf_num & VFRE_MASK;
+	vfre_slot = (vf_num >> VFRE_SHIFT) > 0 ? 1 : 0;
+
+	/* Enable pools reserved to PF only */
+	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot), (~0) << vfre_offset);
+	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot ^ 1), vfre_slot - 1);
+	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot), (~0) << vfre_offset);
+	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot ^ 1), vfre_slot - 1);
+
+	/* PFDMA Tx General Switch Control Enables VMDQ loopback */
+	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+
+	/* clear VMDq map to perment rar 0 */
+	hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
+
+	/* clear VMDq map to scan rar 127 */
+	IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(hw->mac.num_rar_entries), 0);
+	IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(hw->mac.num_rar_entries), 0);
+
+	/* set VMDq map to default PF pool */
+	hw->mac.ops.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
+
+	/*
+	 * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
+	 */
+	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
+
+	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+	gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+	gpie |= IXGBE_GPIE_MSIX_MODE;
+
+	switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
+	case ETH_64_POOLS:
+		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
+		gpie |= IXGBE_GPIE_VTMODE_64;
+		break;
+	case ETH_32_POOLS:
+		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
+		gpie |= IXGBE_GPIE_VTMODE_32;
+		break;
+	case ETH_16_POOLS:
+		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_16;
+		gpie |= IXGBE_GPIE_VTMODE_16;
+		break;
+	}
+
+	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
+        IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+        /*
+	 * enable vlan filtering and allow all vlan tags through
+	 */
+        vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+        vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+
+        /* VFTA - enable all vlan filters */
+        for (i = 0; i < IXGBE_MAX_VFTA; i++) {
+                IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
+        }
+
+	/* Enable MAC Anti-Spoofing */
+	hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);
+
+	/* set flow control threshold to max to avoid tx switch hang */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+		fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
+	}
+
+	return 0;
+}
+
+static void
+set_rx_mode(struct rte_eth_dev *dev)
+{
+	struct rte_eth_dev_data *dev_data =
+		(struct rte_eth_dev_data*)dev->data->dev_private;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+	uint16_t vfn = dev_num_vf(dev);
+
+	/* Check for Promiscuous and All Multicast modes */
+	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+
+	/* set all bits that we expect to always be set */
+	fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
+	fctrl |= IXGBE_FCTRL_BAM;
+
+	/* clear the bits we are changing the status of */
+	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+
+	if (dev_data->promiscuous) {
+		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+		vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
+	} else {
+		if (dev_data->all_multicast) {
+			fctrl |= IXGBE_FCTRL_MPE;
+			vmolr |= IXGBE_VMOLR_MPE;
+		} else {
+			vmolr |= IXGBE_VMOLR_ROMPE;
+		}
+	}
+
+	if (hw->mac.type != ixgbe_mac_82598EB) {
+		vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(vfn)) &
+			 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
+			   IXGBE_VMOLR_ROPE);
+		IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vfn), vmolr);
+	}
+
+	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+
+	if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+		ixgbe_vlan_hw_strip_enable_all(dev);
+	else
+		ixgbe_vlan_hw_strip_disable_all(dev);
+}
+
+static inline void
+ixgbe_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_vf_info *vfinfo =
+		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+	int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+	uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+
+	vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_ROMPE |
+			IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
+	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+
+	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
+
+	/* reset multicast table array for vf */
+	vfinfo[vf].num_vf_mc_hashes = 0;
+
+	/* reset rx mode */
+	set_rx_mode(dev);
+
+	hw->mac.ops.clear_rar(hw, rar_entry);
+}
+
+static inline void
+ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t reg;
+	uint32_t reg_offset, vf_shift;
+	const uint8_t VFRE_SHIFT = 5;  /* VFRE 32 bits per slot */
+	const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
+
+	vf_shift = vf & VFRE_MASK;
+	reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0;
+
+	/* enable transmit and receive for vf */
+	reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
+	reg |= (reg | (1 << vf_shift));
+	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+
+	reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
+	reg |= (reg | (1 << vf_shift));
+	IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
+
+	/* Enable counting of spoofed packets in the SSVPC register */
+	reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
+	reg |= (1 << vf_shift);
+	IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
+
+	ixgbe_vf_reset_event(dev, vf);
+}
+
+static int
+ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_vf_info *vfinfo =
+		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+	unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses;
+	int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+	uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
+
+	ixgbe_vf_reset_msg(dev, vf);
+
+	hw->mac.ops.set_rar(hw, rar_entry, vf_mac, vf, IXGBE_RAH_AV);
+
+	/* reply to reset with ack and vf mac address */
+	msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
+	rte_memcpy(new_mac, vf_mac, ETHER_ADDR_LEN);
+	/*
+	 * Piggyback the multicast filter type so VF can compute the
+	 * correct vectors
+	 */
+	msgbuf[3] = hw->mac.mc_filter_type;
+	ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
+
+	return 0;
+}
+
+static int
+ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_vf_info *vfinfo =
+		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+	int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+	uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
+
+	if (is_valid_assigned_ether_addr((struct ether_addr*)new_mac)) {
+		rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
+		return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV);
+	}
+	return -1;
+}
+
+static int
+ixgbe_vf_set_multicast(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_vf_info *vfinfo =
+		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+	int nb_entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
+		IXGBE_VT_MSGINFO_SHIFT;
+	uint16_t *hash_list = (uint16_t *)&msgbuf[1];
+	uint32_t mta_idx;
+	uint32_t mta_shift;
+	const uint32_t IXGBE_MTA_INDEX_MASK = 0x7F;
+	const uint32_t IXGBE_MTA_BIT_SHIFT = 5;
+	const uint32_t IXGBE_MTA_BIT_MASK = (0x1 << IXGBE_MTA_BIT_SHIFT) - 1;
+	uint32_t reg_val;
+	int i;
+
+	/* only so many hash values supported */
+	nb_entries = RTE_MIN(nb_entries, IXGBE_MAX_VF_MC_ENTRIES);
+
+	/* store the mc entries  */
+	vfinfo->num_vf_mc_hashes = (uint16_t)nb_entries;
+	for (i = 0; i < nb_entries; i++) {
+		vfinfo->vf_mc_hashes[i] = hash_list[i];
+	}
+
+	for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
+		mta_idx = (vfinfo->vf_mc_hashes[i] >> IXGBE_MTA_BIT_SHIFT)
+				& IXGBE_MTA_INDEX_MASK;
+		mta_shift = vfinfo->vf_mc_hashes[i] & IXGBE_MTA_BIT_MASK;
+		reg_val = IXGBE_READ_REG(hw, IXGBE_MTA(mta_idx));
+		reg_val |= (1 << mta_shift);
+		IXGBE_WRITE_REG(hw, IXGBE_MTA(mta_idx), reg_val);
+	}
+
+	return 0;
+}
+
+static int
+ixgbe_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+	int add, vid;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_vf_info *vfinfo =
+		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+
+	add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+		>> IXGBE_VT_MSGINFO_SHIFT;
+	vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
+
+	if (add)
+		vfinfo[vf].vlan_count++;
+	else if (vfinfo[vf].vlan_count)
+		vfinfo[vf].vlan_count--;
+	return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add);
+}
+
+static int
+ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t new_mtu = msgbuf[1];
+	uint32_t max_frs;
+	int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+	/* X540 and X550 support jumbo frames in IOV mode */
+	if (hw->mac.type != ixgbe_mac_X540 &&
+		hw->mac.type != ixgbe_mac_X550 &&
+		hw->mac.type != ixgbe_mac_X550EM_x)
+		return -1;
+
+	if ((max_frame < ETHER_MIN_LEN) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
+		return -1;
+
+	max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
+		   IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
+	if (max_frs < new_mtu) {
+		max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
+		IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
+	}
+
+	return 0;
+}
+
+static int
+ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+	uint32_t api_version = msgbuf[1];
+	struct ixgbe_vf_info *vfinfo =
+		*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+
+	switch (api_version) {
+	case ixgbe_mbox_api_10:
+	case ixgbe_mbox_api_11:
+		vfinfo[vf].api_version = (uint8_t)api_version;
+		return 0;
+	default:
+		break;
+	}
+
+	RTE_LOG(ERR, PMD, "Negotiate invalid api version %u from VF %d\n",
+		api_version, vf);
+
+	return -1;
+}
+
+static int
+ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+	struct ixgbe_vf_info *vfinfo =
+		*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+	uint32_t default_q = vf * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+
+	/* Verify if the PF supports the mbox APIs version or not */
+	switch (vfinfo[vf].api_version) {
+	case ixgbe_mbox_api_20:
+	case ixgbe_mbox_api_11:
+		break;
+	default:
+		return -1;
+	}
+
+	/* Notify VF of Rx and Tx queue number */
+	msgbuf[IXGBE_VF_RX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+	msgbuf[IXGBE_VF_TX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+
+	/* Notify VF of default queue */
+	msgbuf[IXGBE_VF_DEF_QUEUE] = default_q;
+
+	/*
+	 * FIX ME if it needs fill msgbuf[IXGBE_VF_TRANS_VLAN]
+	 * for VLAN strip or VMDQ_DCB or VMDQ_DCB_RSS
+	 */
+
+	return 0;
+}
+
+static int
+ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
+{
+	uint16_t mbx_size = IXGBE_VFMAILBOX_SIZE;
+	uint16_t msg_size = IXGBE_VF_MSG_SIZE_DEFAULT;
+	uint32_t msgbuf[IXGBE_VFMAILBOX_SIZE];
+	int32_t retval;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_vf_info *vfinfo =
+		*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+
+	retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
+	if (retval) {
+		PMD_DRV_LOG(ERR, "Error mbx recv msg from VF %d", vf);
+		return retval;
+	}
+
+	/* do nothing with the message already been processed */
+	if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
+		return retval;
+
+	/* flush the ack before we write any messages back */
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* perform VF reset */
+	if (msgbuf[0] == IXGBE_VF_RESET) {
+		int ret = ixgbe_vf_reset(dev, vf, msgbuf);
+		vfinfo[vf].clear_to_send = true;
+		return ret;
+	}
+
+	/* check & process VF to PF mailbox message */
+	switch ((msgbuf[0] & 0xFFFF)) {
+	case IXGBE_VF_SET_MAC_ADDR:
+		retval = ixgbe_vf_set_mac_addr(dev, vf, msgbuf);
+		break;
+	case IXGBE_VF_SET_MULTICAST:
+		retval = ixgbe_vf_set_multicast(dev, vf, msgbuf);
+		break;
+	case IXGBE_VF_SET_LPE:
+		retval = ixgbe_set_vf_lpe(dev, vf, msgbuf);
+		break;
+	case IXGBE_VF_SET_VLAN:
+		retval = ixgbe_vf_set_vlan(dev, vf, msgbuf);
+		break;
+	case IXGBE_VF_API_NEGOTIATE:
+		retval = ixgbe_negotiate_vf_api(dev, vf, msgbuf);
+		break;
+	case IXGBE_VF_GET_QUEUES:
+		retval = ixgbe_get_vf_queues(dev, vf, msgbuf);
+		msg_size = IXGBE_VF_GET_QUEUE_MSG_SIZE;
+		break;
+	default:
+		PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]);
+		retval = IXGBE_ERR_MBX;
+		break;
+	}
+
+	/* response the VF according to the message process result */
+	if (retval)
+		msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
+	else
+		msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
+
+	msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
+
+	ixgbe_write_mbx(hw, msgbuf, msg_size, vf);
+
+	return retval;
+}
+
+static inline void
+ixgbe_rcv_ack_from_vf(struct rte_eth_dev *dev, uint16_t vf)
+{
+	uint32_t msg = IXGBE_VT_MSGTYPE_NACK;
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_vf_info *vfinfo =
+		*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+
+	if (!vfinfo[vf].clear_to_send)
+		ixgbe_write_mbx(hw, &msg, 1, vf);
+}
+
+void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev)
+{
+	uint16_t vf;
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+	for (vf = 0; vf < dev_num_vf(eth_dev); vf++) {
+		/* check & process vf function level reset */
+		if (!ixgbe_check_for_rst(hw, vf))
+			ixgbe_vf_reset_event(eth_dev, vf);
+
+		/* check & process vf mailbox messages */
+		if (!ixgbe_check_for_msg(hw, vf))
+			ixgbe_rcv_msg_from_vf(eth_dev, vf);
+
+		/* check & process acks from vf */
+		if (!ixgbe_check_for_ack(hw, vf))
+			ixgbe_rcv_ack_from_vf(eth_dev, vf);
+	}
+}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
new file mode 100644
index 0000000..4f9ab22
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -0,0 +1,4780 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ *   Copyright 2014 6WIND S.A.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_prefetch.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_string_fns.h>
+#include <rte_errno.h>
+#include <rte_ip.h>
+
+#include "ixgbe_logs.h"
+#include "base/ixgbe_api.h"
+#include "base/ixgbe_vf.h"
+#include "ixgbe_ethdev.h"
+#include "base/ixgbe_dcb.h"
+#include "base/ixgbe_common.h"
+#include "ixgbe_rxtx.h"
+
+/* Bit Mask to indicate what bits required for building TX context */
+#define IXGBE_TX_OFFLOAD_MASK (			 \
+		PKT_TX_VLAN_PKT |		 \
+		PKT_TX_IP_CKSUM |		 \
+		PKT_TX_L4_MASK |		 \
+		PKT_TX_TCP_SEG)
+
+static inline struct rte_mbuf *
+rte_rxmbuf_alloc(struct rte_mempool *mp)
+{
+	struct rte_mbuf *m;
+
+	m = __rte_mbuf_raw_alloc(mp);
+	__rte_mbuf_sanity_check_raw(m, 0);
+	return (m);
+}
+
+
+#if 1
+#define RTE_PMD_USE_PREFETCH
+#endif
+
+#ifdef RTE_PMD_USE_PREFETCH
+/*
+ * Prefetch a cache line into all cache levels.
+ */
+#define rte_ixgbe_prefetch(p)   rte_prefetch0(p)
+#else
+#define rte_ixgbe_prefetch(p)   do {} while(0)
+#endif
+
+/*********************************************************************
+ *
+ *  TX functions
+ *
+ **********************************************************************/
+
+/*
+ * Check for descriptors with their DD bit set and free mbufs.
+ * Return the total number of buffers freed.
+ */
+static inline int __attribute__((always_inline))
+ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
+{
+	struct ixgbe_tx_entry *txep;
+	uint32_t status;
+	int i;
+
+	/* check DD bit on threshold descriptor */
+	status = txq->tx_ring[txq->tx_next_dd].wb.status;
+	if (! (status & IXGBE_ADVTXD_STAT_DD))
+		return 0;
+
+	/*
+	 * first buffer to free from S/W ring is at index
+	 * tx_next_dd - (tx_rs_thresh-1)
+	 */
+	txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
+
+	/* free buffers one at a time */
+	if ((txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) != 0) {
+		for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
+			txep->mbuf->next = NULL;
+			rte_mempool_put(txep->mbuf->pool, txep->mbuf);
+			txep->mbuf = NULL;
+		}
+	} else {
+		for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
+			rte_pktmbuf_free_seg(txep->mbuf);
+			txep->mbuf = NULL;
+		}
+	}
+
+	/* buffers were freed, update counters */
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+	if (txq->tx_next_dd >= txq->nb_tx_desc)
+		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+	return txq->tx_rs_thresh;
+}
+
+/* Populate 4 descriptors with data from 4 mbufs */
+static inline void
+tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+	uint64_t buf_dma_addr;
+	uint32_t pkt_len;
+	int i;
+
+	for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
+		buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+		pkt_len = (*pkts)->data_len;
+
+		/* write data to descriptor */
+		txdp->read.buffer_addr = buf_dma_addr;
+		txdp->read.cmd_type_len =
+				((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+		txdp->read.olinfo_status =
+				(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+		rte_prefetch0(&(*pkts)->pool);
+	}
+}
+
+/* Populate 1 descriptor with data from 1 mbuf */
+static inline void
+tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+	uint64_t buf_dma_addr;
+	uint32_t pkt_len;
+
+	buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+	pkt_len = (*pkts)->data_len;
+
+	/* write data to descriptor */
+	txdp->read.buffer_addr = buf_dma_addr;
+	txdp->read.cmd_type_len =
+			((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+	txdp->read.olinfo_status =
+			(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+	rte_prefetch0(&(*pkts)->pool);
+}
+
+/*
+ * Fill H/W descriptor ring with mbuf data.
+ * Copy mbuf pointers to the S/W ring.
+ */
+static inline void
+ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
+		      uint16_t nb_pkts)
+{
+	volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
+	struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
+	const int N_PER_LOOP = 4;
+	const int N_PER_LOOP_MASK = N_PER_LOOP-1;
+	int mainpart, leftover;
+	int i, j;
+
+	/*
+	 * Process most of the packets in chunks of N pkts.  Any
+	 * leftover packets will get processed one at a time.
+	 */
+	mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
+	leftover = (nb_pkts & ((uint32_t)  N_PER_LOOP_MASK));
+	for (i = 0; i < mainpart; i += N_PER_LOOP) {
+		/* Copy N mbuf pointers to the S/W ring */
+		for (j = 0; j < N_PER_LOOP; ++j) {
+			(txep + i + j)->mbuf = *(pkts + i + j);
+		}
+		tx4(txdp + i, pkts + i);
+	}
+
+	if (unlikely(leftover > 0)) {
+		for (i = 0; i < leftover; ++i) {
+			(txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
+			tx1(txdp + mainpart + i, pkts + mainpart + i);
+		}
+	}
+}
+
+static inline uint16_t
+tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+	     uint16_t nb_pkts)
+{
+	struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+	volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
+	uint16_t n = 0;
+
+	/*
+	 * Begin scanning the H/W ring for done descriptors when the
+	 * number of available descriptors drops below tx_free_thresh.  For
+	 * each done descriptor, free the associated buffer.
+	 */
+	if (txq->nb_tx_free < txq->tx_free_thresh)
+		ixgbe_tx_free_bufs(txq);
+
+	/* Only use descriptors that are available */
+	nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	/* Use exactly nb_pkts descriptors */
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+	/*
+	 * At this point, we know there are enough descriptors in the
+	 * ring to transmit all the packets.  This assumes that each
+	 * mbuf contains a single segment, and that no new offloads
+	 * are expected, which would require a new context descriptor.
+	 */
+
+	/*
+	 * See if we're going to wrap-around. If so, handle the top
+	 * of the descriptor ring first, then do the bottom.  If not,
+	 * the processing looks just like the "bottom" part anyway...
+	 */
+	if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
+		n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
+		ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
+
+		/*
+		 * We know that the last descriptor in the ring will need to
+		 * have its RS bit set because tx_rs_thresh has to be
+		 * a divisor of the ring size
+		 */
+		tx_r[txq->tx_next_rs].read.cmd_type_len |=
+			rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
+		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+		txq->tx_tail = 0;
+	}
+
+	/* Fill H/W descriptor ring with mbuf data */
+	ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
+	txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
+
+	/*
+	 * Determine if RS bit should be set
+	 * This is what we actually want:
+	 *   if ((txq->tx_tail - 1) >= txq->tx_next_rs)
+	 * but instead of subtracting 1 and doing >=, we can just do
+	 * greater than without subtracting.
+	 */
+	if (txq->tx_tail > txq->tx_next_rs) {
+		tx_r[txq->tx_next_rs].read.cmd_type_len |=
+			rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
+		txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
+						txq->tx_rs_thresh);
+		if (txq->tx_next_rs >= txq->nb_tx_desc)
+			txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+	}
+
+	/*
+	 * Check for wrap-around. This would only happen if we used
+	 * up to the last descriptor in the ring, no more, no less.
+	 */
+	if (txq->tx_tail >= txq->nb_tx_desc)
+		txq->tx_tail = 0;
+
+	/* update tail pointer */
+	rte_wmb();
+	IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
+
+	return nb_pkts;
+}
+
+uint16_t
+ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
+		       uint16_t nb_pkts)
+{
+	uint16_t nb_tx;
+
+	/* Try to transmit at least chunks of TX_MAX_BURST pkts */
+	if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
+		return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
+
+	/* transmit more than the max burst, in chunks of TX_MAX_BURST */
+	nb_tx = 0;
+	while (nb_pkts) {
+		uint16_t ret, n;
+		n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
+		ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
+		nb_tx = (uint16_t)(nb_tx + ret);
+		nb_pkts = (uint16_t)(nb_pkts - ret);
+		if (ret < n)
+			break;
+	}
+
+	return nb_tx;
+}
+
+static inline void
+ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
+		volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
+		uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
+{
+	uint32_t type_tucmd_mlhl;
+	uint32_t mss_l4len_idx = 0;
+	uint32_t ctx_idx;
+	uint32_t vlan_macip_lens;
+	union ixgbe_tx_offload tx_offload_mask;
+
+	ctx_idx = txq->ctx_curr;
+	tx_offload_mask.data = 0;
+	type_tucmd_mlhl = 0;
+
+	/* Specify which HW CTX to upload. */
+	mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
+
+	if (ol_flags & PKT_TX_VLAN_PKT) {
+		tx_offload_mask.vlan_tci |= ~0;
+	}
+
+	/* check if TCP segmentation required for this packet */
+	if (ol_flags & PKT_TX_TCP_SEG) {
+		/* implies IP cksum and TCP cksum */
+		type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
+			IXGBE_ADVTXD_TUCMD_L4T_TCP |
+			IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+
+		tx_offload_mask.l2_len |= ~0;
+		tx_offload_mask.l3_len |= ~0;
+		tx_offload_mask.l4_len |= ~0;
+		tx_offload_mask.tso_segsz |= ~0;
+		mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
+		mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
+	} else { /* no TSO, check if hardware checksum is needed */
+		if (ol_flags & PKT_TX_IP_CKSUM) {
+			type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
+			tx_offload_mask.l2_len |= ~0;
+			tx_offload_mask.l3_len |= ~0;
+		}
+
+		switch (ol_flags & PKT_TX_L4_MASK) {
+		case PKT_TX_UDP_CKSUM:
+			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
+				IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+			mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+			tx_offload_mask.l2_len |= ~0;
+			tx_offload_mask.l3_len |= ~0;
+			break;
+		case PKT_TX_TCP_CKSUM:
+			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
+				IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+			mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+			tx_offload_mask.l2_len |= ~0;
+			tx_offload_mask.l3_len |= ~0;
+			tx_offload_mask.l4_len |= ~0;
+			break;
+		case PKT_TX_SCTP_CKSUM:
+			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
+				IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+			mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+			tx_offload_mask.l2_len |= ~0;
+			tx_offload_mask.l3_len |= ~0;
+			break;
+		default:
+			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
+				IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+			break;
+		}
+	}
+
+	txq->ctx_cache[ctx_idx].flags = ol_flags;
+	txq->ctx_cache[ctx_idx].tx_offload.data  =
+		tx_offload_mask.data & tx_offload.data;
+	txq->ctx_cache[ctx_idx].tx_offload_mask    = tx_offload_mask;
+
+	ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
+	vlan_macip_lens = tx_offload.l3_len;
+	vlan_macip_lens |= (tx_offload.l2_len << IXGBE_ADVTXD_MACLEN_SHIFT);
+	vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
+	ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
+	ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
+	ctx_txd->seqnum_seed     = 0;
+}
+
+/*
+ * Check which hardware context can be used. Use the existing match
+ * or create a new context descriptor.
+ */
+static inline uint32_t
+what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
+		union ixgbe_tx_offload tx_offload)
+{
+	/* If match with the current used context */
+	if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+		(txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
+		(txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
+			return txq->ctx_curr;
+	}
+
+	/* What if match with the next context  */
+	txq->ctx_curr ^= 1;
+	if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+		(txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
+		(txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
+			return txq->ctx_curr;
+	}
+
+	/* Mismatch, use the previous context */
+	return (IXGBE_CTX_NUM);
+}
+
+static inline uint32_t
+tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
+{
+	uint32_t tmp = 0;
+	if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
+		tmp |= IXGBE_ADVTXD_POPTS_TXSM;
+	if (ol_flags & PKT_TX_IP_CKSUM)
+		tmp |= IXGBE_ADVTXD_POPTS_IXSM;
+	if (ol_flags & PKT_TX_TCP_SEG)
+		tmp |= IXGBE_ADVTXD_POPTS_TXSM;
+	return tmp;
+}
+
+static inline uint32_t
+tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
+{
+	uint32_t cmdtype = 0;
+	if (ol_flags & PKT_TX_VLAN_PKT)
+		cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
+	if (ol_flags & PKT_TX_TCP_SEG)
+		cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
+	return cmdtype;
+}
+
+/* Default RS bit threshold values */
+#ifndef DEFAULT_TX_RS_THRESH
+#define DEFAULT_TX_RS_THRESH   32
+#endif
+#ifndef DEFAULT_TX_FREE_THRESH
+#define DEFAULT_TX_FREE_THRESH 32
+#endif
+
+/* Reset transmit descriptors after they have been used */
+static inline int
+ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
+{
+	struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
+	volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
+	uint16_t last_desc_cleaned = txq->last_desc_cleaned;
+	uint16_t nb_tx_desc = txq->nb_tx_desc;
+	uint16_t desc_to_clean_to;
+	uint16_t nb_tx_to_clean;
+
+	/* Determine the last descriptor needing to be cleaned */
+	desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
+	if (desc_to_clean_to >= nb_tx_desc)
+		desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
+
+	/* Check to make sure the last descriptor to clean is done */
+	desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
+	if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD))
+	{
+		PMD_TX_FREE_LOG(DEBUG,
+				"TX descriptor %4u is not done"
+				"(port=%d queue=%d)",
+				desc_to_clean_to,
+				txq->port_id, txq->queue_id);
+		/* Failed to clean any descriptors, better luck next time */
+		return -(1);
+	}
+
+	/* Figure out how many descriptors will be cleaned */
+	if (last_desc_cleaned > desc_to_clean_to)
+		nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
+							desc_to_clean_to);
+	else
+		nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
+						last_desc_cleaned);
+
+	PMD_TX_FREE_LOG(DEBUG,
+			"Cleaning %4u TX descriptors: %4u to %4u "
+			"(port=%d queue=%d)",
+			nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
+			txq->port_id, txq->queue_id);
+
+	/*
+	 * The last descriptor to clean is done, so that means all the
+	 * descriptors from the last descriptor that was cleaned
+	 * up to the last descriptor with the RS bit set
+	 * are done. Only reset the threshold descriptor.
+	 */
+	txr[desc_to_clean_to].wb.status = 0;
+
+	/* Update the txq to reflect the last descriptor that was cleaned */
+	txq->last_desc_cleaned = desc_to_clean_to;
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
+
+	/* No Error */
+	return (0);
+}
+
+uint16_t
+ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+		uint16_t nb_pkts)
+{
+	struct ixgbe_tx_queue *txq;
+	struct ixgbe_tx_entry *sw_ring;
+	struct ixgbe_tx_entry *txe, *txn;
+	volatile union ixgbe_adv_tx_desc *txr;
+	volatile union ixgbe_adv_tx_desc *txd;
+	struct rte_mbuf     *tx_pkt;
+	struct rte_mbuf     *m_seg;
+	uint64_t buf_dma_addr;
+	uint32_t olinfo_status;
+	uint32_t cmd_type_len;
+	uint32_t pkt_len;
+	uint16_t slen;
+	uint64_t ol_flags;
+	uint16_t tx_id;
+	uint16_t tx_last;
+	uint16_t nb_tx;
+	uint16_t nb_used;
+	uint64_t tx_ol_req;
+	uint32_t ctx = 0;
+	uint32_t new_ctx;
+	union ixgbe_tx_offload tx_offload = {0};
+
+	txq = tx_queue;
+	sw_ring = txq->sw_ring;
+	txr     = txq->tx_ring;
+	tx_id   = txq->tx_tail;
+	txe = &sw_ring[tx_id];
+
+	/* Determine if the descriptor ring needs to be cleaned. */
+	if ((txq->nb_tx_desc - txq->nb_tx_free) > txq->tx_free_thresh) {
+		ixgbe_xmit_cleanup(txq);
+	}
+
+	rte_prefetch0(&txe->mbuf->pool);
+
+	/* TX loop */
+	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+		new_ctx = 0;
+		tx_pkt = *tx_pkts++;
+		pkt_len = tx_pkt->pkt_len;
+
+		/*
+		 * Determine how many (if any) context descriptors
+		 * are needed for offload functionality.
+		 */
+		ol_flags = tx_pkt->ol_flags;
+
+		/* If hardware offload required */
+		tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
+		if (tx_ol_req) {
+			tx_offload.l2_len = tx_pkt->l2_len;
+			tx_offload.l3_len = tx_pkt->l3_len;
+			tx_offload.l4_len = tx_pkt->l4_len;
+			tx_offload.vlan_tci = tx_pkt->vlan_tci;
+			tx_offload.tso_segsz = tx_pkt->tso_segsz;
+
+			/* If new context need be built or reuse the exist ctx. */
+			ctx = what_advctx_update(txq, tx_ol_req,
+				tx_offload);
+			/* Only allocate context descriptor if required*/
+			new_ctx = (ctx == IXGBE_CTX_NUM);
+			ctx = txq->ctx_curr;
+		}
+
+		/*
+		 * Keep track of how many descriptors are used this loop
+		 * This will always be the number of segments + the number of
+		 * Context descriptors required to transmit the packet
+		 */
+		nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
+
+		/*
+		 * The number of descriptors that must be allocated for a
+		 * packet is the number of segments of that packet, plus 1
+		 * Context Descriptor for the hardware offload, if any.
+		 * Determine the last TX descriptor to allocate in the TX ring
+		 * for the packet, starting from the current position (tx_id)
+		 * in the ring.
+		 */
+		tx_last = (uint16_t) (tx_id + nb_used - 1);
+
+		/* Circular ring */
+		if (tx_last >= txq->nb_tx_desc)
+			tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
+
+		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
+			   " tx_first=%u tx_last=%u",
+			   (unsigned) txq->port_id,
+			   (unsigned) txq->queue_id,
+			   (unsigned) pkt_len,
+			   (unsigned) tx_id,
+			   (unsigned) tx_last);
+
+		/*
+		 * Make sure there are enough TX descriptors available to
+		 * transmit the entire packet.
+		 * nb_used better be less than or equal to txq->tx_rs_thresh
+		 */
+		if (nb_used > txq->nb_tx_free) {
+			PMD_TX_FREE_LOG(DEBUG,
+					"Not enough free TX descriptors "
+					"nb_used=%4u nb_free=%4u "
+					"(port=%d queue=%d)",
+					nb_used, txq->nb_tx_free,
+					txq->port_id, txq->queue_id);
+
+			if (ixgbe_xmit_cleanup(txq) != 0) {
+				/* Could not clean any descriptors */
+				if (nb_tx == 0)
+					return (0);
+				goto end_of_tx;
+			}
+
+			/* nb_used better be <= txq->tx_rs_thresh */
+			if (unlikely(nb_used > txq->tx_rs_thresh)) {
+				PMD_TX_FREE_LOG(DEBUG,
+					"The number of descriptors needed to "
+					"transmit the packet exceeds the "
+					"RS bit threshold. This will impact "
+					"performance."
+					"nb_used=%4u nb_free=%4u "
+					"tx_rs_thresh=%4u. "
+					"(port=%d queue=%d)",
+					nb_used, txq->nb_tx_free,
+					txq->tx_rs_thresh,
+					txq->port_id, txq->queue_id);
+				/*
+				 * Loop here until there are enough TX
+				 * descriptors or until the ring cannot be
+				 * cleaned.
+				 */
+				while (nb_used > txq->nb_tx_free) {
+					if (ixgbe_xmit_cleanup(txq) != 0) {
+						/*
+						 * Could not clean any
+						 * descriptors
+						 */
+						if (nb_tx == 0)
+							return (0);
+						goto end_of_tx;
+					}
+				}
+			}
+		}
+
+		/*
+		 * By now there are enough free TX descriptors to transmit
+		 * the packet.
+		 */
+
+		/*
+		 * Set common flags of all TX Data Descriptors.
+		 *
+		 * The following bits must be set in all Data Descriptors:
+		 *   - IXGBE_ADVTXD_DTYP_DATA
+		 *   - IXGBE_ADVTXD_DCMD_DEXT
+		 *
+		 * The following bits must be set in the first Data Descriptor
+		 * and are ignored in the other ones:
+		 *   - IXGBE_ADVTXD_DCMD_IFCS
+		 *   - IXGBE_ADVTXD_MAC_1588
+		 *   - IXGBE_ADVTXD_DCMD_VLE
+		 *
+		 * The following bits must only be set in the last Data
+		 * Descriptor:
+		 *   - IXGBE_TXD_CMD_EOP
+		 *
+		 * The following bits can be set in any Data Descriptor, but
+		 * are only set in the last Data Descriptor:
+		 *   - IXGBE_TXD_CMD_RS
+		 */
+		cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
+			IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+
+#ifdef RTE_LIBRTE_IEEE1588
+		if (ol_flags & PKT_TX_IEEE1588_TMST)
+			cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
+#endif
+
+		olinfo_status = 0;
+		if (tx_ol_req) {
+
+			if (ol_flags & PKT_TX_TCP_SEG) {
+				/* when TSO is on, paylen in descriptor is the
+				 * not the packet len but the tcp payload len */
+				pkt_len -= (tx_offload.l2_len +
+					tx_offload.l3_len + tx_offload.l4_len);
+			}
+
+			/*
+			 * Setup the TX Advanced Context Descriptor if required
+			 */
+			if (new_ctx) {
+				volatile struct ixgbe_adv_tx_context_desc *
+				    ctx_txd;
+
+				ctx_txd = (volatile struct
+				    ixgbe_adv_tx_context_desc *)
+				    &txr[tx_id];
+
+				txn = &sw_ring[txe->next_id];
+				rte_prefetch0(&txn->mbuf->pool);
+
+				if (txe->mbuf != NULL) {
+					rte_pktmbuf_free_seg(txe->mbuf);
+					txe->mbuf = NULL;
+				}
+
+				ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
+					tx_offload);
+
+				txe->last_id = tx_last;
+				tx_id = txe->next_id;
+				txe = txn;
+			}
+
+			/*
+			 * Setup the TX Advanced Data Descriptor,
+			 * This path will go through
+			 * whatever new/reuse the context descriptor
+			 */
+			cmd_type_len  |= tx_desc_ol_flags_to_cmdtype(ol_flags);
+			olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
+			olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
+		}
+
+		olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+		m_seg = tx_pkt;
+		do {
+			txd = &txr[tx_id];
+			txn = &sw_ring[txe->next_id];
+			rte_prefetch0(&txn->mbuf->pool);
+
+			if (txe->mbuf != NULL)
+				rte_pktmbuf_free_seg(txe->mbuf);
+			txe->mbuf = m_seg;
+
+			/*
+			 * Set up Transmit Data Descriptor.
+			 */
+			slen = m_seg->data_len;
+			buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+			txd->read.buffer_addr =
+				rte_cpu_to_le_64(buf_dma_addr);
+			txd->read.cmd_type_len =
+				rte_cpu_to_le_32(cmd_type_len | slen);
+			txd->read.olinfo_status =
+				rte_cpu_to_le_32(olinfo_status);
+			txe->last_id = tx_last;
+			tx_id = txe->next_id;
+			txe = txn;
+			m_seg = m_seg->next;
+		} while (m_seg != NULL);
+
+		/*
+		 * The last packet data descriptor needs End Of Packet (EOP)
+		 */
+		cmd_type_len |= IXGBE_TXD_CMD_EOP;
+		txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
+		txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
+
+		/* Set RS bit only on threshold packets' last descriptor */
+		if (txq->nb_tx_used >= txq->tx_rs_thresh) {
+			PMD_TX_FREE_LOG(DEBUG,
+					"Setting RS bit on TXD id="
+					"%4u (port=%d queue=%d)",
+					tx_last, txq->port_id, txq->queue_id);
+
+			cmd_type_len |= IXGBE_TXD_CMD_RS;
+
+			/* Update txq RS bit counters */
+			txq->nb_tx_used = 0;
+		}
+		txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
+	}
+end_of_tx:
+	rte_wmb();
+
+	/*
+	 * Set the Transmit Descriptor Tail (TDT)
+	 */
+	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+		   (unsigned) txq->port_id, (unsigned) txq->queue_id,
+		   (unsigned) tx_id, (unsigned) nb_tx);
+	IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
+	txq->tx_tail = tx_id;
+
+	return (nb_tx);
+}
+
+/*********************************************************************
+ *
+ *  RX functions
+ *
+ **********************************************************************/
+static inline uint64_t
+rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
+{
+	uint64_t pkt_flags;
+
+	static const uint64_t ip_pkt_types_map[16] = {
+		0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
+		PKT_RX_IPV6_HDR, 0, 0, 0,
+		PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
+		PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
+	};
+
+	static const uint64_t ip_rss_types_map[16] = {
+		0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
+		0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
+		PKT_RX_RSS_HASH, 0, 0, 0,
+		0, 0, 0,  PKT_RX_FDIR,
+	};
+
+#ifdef RTE_LIBRTE_IEEE1588
+	static uint64_t ip_pkt_etqf_map[8] = {
+		0, 0, 0, PKT_RX_IEEE1588_PTP,
+		0, 0, 0, 0,
+	};
+
+	pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ?
+			ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
+			ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
+#else
+	pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? 0 :
+			ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
+
+#endif
+	return pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF];
+}
+
+static inline uint64_t
+rx_desc_status_to_pkt_flags(uint32_t rx_status)
+{
+	uint64_t pkt_flags;
+
+	/*
+	 * Check if VLAN present only.
+	 * Do not check whether L3/L4 rx checksum done by NIC or not,
+	 * That can be found from rte_eth_rxmode.hw_ip_checksum flag
+	 */
+	pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ?  PKT_RX_VLAN_PKT : 0;
+
+#ifdef RTE_LIBRTE_IEEE1588
+	if (rx_status & IXGBE_RXD_STAT_TMST)
+		pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
+#endif
+	return pkt_flags;
+}
+
+static inline uint64_t
+rx_desc_error_to_pkt_flags(uint32_t rx_status)
+{
+	/*
+	 * Bit 31: IPE, IPv4 checksum error
+	 * Bit 30: L4I, L4I integrity error
+	 */
+	static uint64_t error_to_pkt_flags_map[4] = {
+		0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
+		PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
+	};
+	return error_to_pkt_flags_map[(rx_status >>
+		IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
+}
+
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+/*
+ * LOOK_AHEAD defines how many desc statuses to check beyond the
+ * current descriptor.
+ * It must be a pound define for optimal performance.
+ * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
+ * function only works with LOOK_AHEAD=8.
+ */
+#define LOOK_AHEAD 8
+#if (LOOK_AHEAD != 8)
+#error "PMD IXGBE: LOOK_AHEAD must be 8\n"
+#endif
+static inline int
+ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
+{
+	volatile union ixgbe_adv_rx_desc *rxdp;
+	struct ixgbe_rx_entry *rxep;
+	struct rte_mbuf *mb;
+	uint16_t pkt_len;
+	uint64_t pkt_flags;
+	int s[LOOK_AHEAD], nb_dd;
+	int i, j, nb_rx = 0;
+
+
+	/* get references to current descriptor and S/W ring entry */
+	rxdp = &rxq->rx_ring[rxq->rx_tail];
+	rxep = &rxq->sw_ring[rxq->rx_tail];
+
+	/* check to make sure there is at least 1 packet to receive */
+	if (! (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD))
+		return 0;
+
+	/*
+	 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
+	 * reference packets that are ready to be received.
+	 */
+	for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
+	     i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD)
+	{
+		/* Read desc statuses backwards to avoid race condition */
+		for (j = LOOK_AHEAD-1; j >= 0; --j)
+			s[j] = rxdp[j].wb.upper.status_error;
+
+		/* Compute how many status bits were set */
+		nb_dd = 0;
+		for (j = 0; j < LOOK_AHEAD; ++j)
+			nb_dd += s[j] & IXGBE_RXDADV_STAT_DD;
+
+		nb_rx += nb_dd;
+
+		/* Translate descriptor info to mbuf format */
+		for (j = 0; j < nb_dd; ++j) {
+			mb = rxep[j].mbuf;
+			pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len);
+			mb->data_len = pkt_len;
+			mb->pkt_len = pkt_len;
+			mb->vlan_tci = rxdp[j].wb.upper.vlan;
+			mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
+
+			/* convert descriptor fields to rte mbuf flags */
+			pkt_flags  = rx_desc_hlen_type_rss_to_pkt_flags(
+					rxdp[j].wb.lower.lo_dword.data);
+			/* reuse status field from scan list */
+			pkt_flags |= rx_desc_status_to_pkt_flags(s[j]);
+			pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
+			mb->ol_flags = pkt_flags;
+
+			if (likely(pkt_flags & PKT_RX_RSS_HASH))
+				mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;
+			else if (pkt_flags & PKT_RX_FDIR) {
+				mb->hash.fdir.hash =
+					(uint16_t)((rxdp[j].wb.lower.hi_dword.csum_ip.csum)
+						& IXGBE_ATR_HASH_MASK);
+				mb->hash.fdir.id = rxdp[j].wb.lower.hi_dword.csum_ip.ip_id;
+			}
+		}
+
+		/* Move mbuf pointers from the S/W ring to the stage */
+		for (j = 0; j < LOOK_AHEAD; ++j) {
+			rxq->rx_stage[i + j] = rxep[j].mbuf;
+		}
+
+		/* stop if all requested packets could not be received */
+		if (nb_dd != LOOK_AHEAD)
+			break;
+	}
+
+	/* clear software ring entries so we can cleanup correctly */
+	for (i = 0; i < nb_rx; ++i) {
+		rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
+	}
+
+
+	return nb_rx;
+}
+
+static inline int
+ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
+{
+	volatile union ixgbe_adv_rx_desc *rxdp;
+	struct ixgbe_rx_entry *rxep;
+	struct rte_mbuf *mb;
+	uint16_t alloc_idx;
+	__le64 dma_addr;
+	int diag, i;
+
+	/* allocate buffers in bulk directly into the S/W ring */
+	alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
+	rxep = &rxq->sw_ring[alloc_idx];
+	diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
+				    rxq->rx_free_thresh);
+	if (unlikely(diag != 0))
+		return (-ENOMEM);
+
+	rxdp = &rxq->rx_ring[alloc_idx];
+	for (i = 0; i < rxq->rx_free_thresh; ++i) {
+		/* populate the static rte mbuf fields */
+		mb = rxep[i].mbuf;
+		if (reset_mbuf) {
+			mb->next = NULL;
+			mb->nb_segs = 1;
+			mb->port = rxq->port_id;
+		}
+
+		rte_mbuf_refcnt_set(mb, 1);
+		mb->data_off = RTE_PKTMBUF_HEADROOM;
+
+		/* populate the descriptors */
+		dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
+		rxdp[i].read.hdr_addr = dma_addr;
+		rxdp[i].read.pkt_addr = dma_addr;
+	}
+
+	/* update state of internal queue structure */
+	rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
+	if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
+		rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
+
+	/* no errors */
+	return 0;
+}
+
+static inline uint16_t
+ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+			 uint16_t nb_pkts)
+{
+	struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
+	int i;
+
+	/* how many packets are ready to return? */
+	nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
+
+	/* copy mbuf pointers to the application's packet list */
+	for (i = 0; i < nb_pkts; ++i)
+		rx_pkts[i] = stage[i];
+
+	/* update internal queue state */
+	rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
+	rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
+
+	return nb_pkts;
+}
+
+static inline uint16_t
+rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+	     uint16_t nb_pkts)
+{
+	struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
+	uint16_t nb_rx = 0;
+
+	/* Any previously recv'd pkts will be returned from the Rx stage */
+	if (rxq->rx_nb_avail)
+		return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+	/* Scan the H/W ring for packets to receive */
+	nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
+
+	/* update internal queue state */
+	rxq->rx_next_avail = 0;
+	rxq->rx_nb_avail = nb_rx;
+	rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
+
+	/* if required, allocate new buffers to replenish descriptors */
+	if (rxq->rx_tail > rxq->rx_free_trigger) {
+		uint16_t cur_free_trigger = rxq->rx_free_trigger;
+
+		if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
+			int i, j;
+			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+				   "queue_id=%u", (unsigned) rxq->port_id,
+				   (unsigned) rxq->queue_id);
+
+			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+				rxq->rx_free_thresh;
+
+			/*
+			 * Need to rewind any previous receives if we cannot
+			 * allocate new buffers to replenish the old ones.
+			 */
+			rxq->rx_nb_avail = 0;
+			rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
+			for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
+				rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
+
+			return 0;
+		}
+
+		/* update tail pointer */
+		rte_wmb();
+		IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, cur_free_trigger);
+	}
+
+	if (rxq->rx_tail >= rxq->nb_rx_desc)
+		rxq->rx_tail = 0;
+
+	/* received any packets this loop? */
+	if (rxq->rx_nb_avail)
+		return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+	return 0;
+}
+
+/* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
+static uint16_t
+ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+			   uint16_t nb_pkts)
+{
+	uint16_t nb_rx;
+
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
+		return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
+
+	/* request is relatively large, chunk it up */
+	nb_rx = 0;
+	while (nb_pkts) {
+		uint16_t ret, n;
+		n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
+		ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
+		nb_rx = (uint16_t)(nb_rx + ret);
+		nb_pkts = (uint16_t)(nb_pkts - ret);
+		if (ret < n)
+			break;
+	}
+
+	return nb_rx;
+}
+
+#else
+
+/* Stub to avoid extra ifdefs */
+static uint16_t
+ixgbe_recv_pkts_bulk_alloc(__rte_unused void *rx_queue,
+	__rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
+{
+	return 0;
+}
+
+static inline int
+ixgbe_rx_alloc_bufs(__rte_unused struct ixgbe_rx_queue *rxq,
+		    __rte_unused bool reset_mbuf)
+{
+	return -ENOMEM;
+}
+#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
+
+uint16_t
+ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	struct ixgbe_rx_queue *rxq;
+	volatile union ixgbe_adv_rx_desc *rx_ring;
+	volatile union ixgbe_adv_rx_desc *rxdp;
+	struct ixgbe_rx_entry *sw_ring;
+	struct ixgbe_rx_entry *rxe;
+	struct rte_mbuf *rxm;
+	struct rte_mbuf *nmb;
+	union ixgbe_adv_rx_desc rxd;
+	uint64_t dma_addr;
+	uint32_t staterr;
+	uint32_t hlen_type_rss;
+	uint16_t pkt_len;
+	uint16_t rx_id;
+	uint16_t nb_rx;
+	uint16_t nb_hold;
+	uint64_t pkt_flags;
+
+	nb_rx = 0;
+	nb_hold = 0;
+	rxq = rx_queue;
+	rx_id = rxq->rx_tail;
+	rx_ring = rxq->rx_ring;
+	sw_ring = rxq->sw_ring;
+	while (nb_rx < nb_pkts) {
+		/*
+		 * The order of operations here is important as the DD status
+		 * bit must not be read after any other descriptor fields.
+		 * rx_ring and rxdp are pointing to volatile data so the order
+		 * of accesses cannot be reordered by the compiler. If they were
+		 * not volatile, they could be reordered which could lead to
+		 * using invalid descriptor fields when read from rxd.
+		 */
+		rxdp = &rx_ring[rx_id];
+		staterr = rxdp->wb.upper.status_error;
+		if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+			break;
+		rxd = *rxdp;
+
+		/*
+		 * End of packet.
+		 *
+		 * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
+		 * is likely to be invalid and to be dropped by the various
+		 * validation checks performed by the network stack.
+		 *
+		 * Allocate a new mbuf to replenish the RX ring descriptor.
+		 * If the allocation fails:
+		 *    - arrange for that RX descriptor to be the first one
+		 *      being parsed the next time the receive function is
+		 *      invoked [on the same queue].
+		 *
+		 *    - Stop parsing the RX ring and return immediately.
+		 *
+		 * This policy do not drop the packet received in the RX
+		 * descriptor for which the allocation of a new mbuf failed.
+		 * Thus, it allows that packet to be later retrieved if
+		 * mbuf have been freed in the mean time.
+		 * As a side effect, holding RX descriptors instead of
+		 * systematically giving them back to the NIC may lead to
+		 * RX ring exhaustion situations.
+		 * However, the NIC can gracefully prevent such situations
+		 * to happen by sending specific "back-pressure" flow control
+		 * frames to its peer(s).
+		 */
+		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+			   "ext_err_stat=0x%08x pkt_len=%u",
+			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+			   (unsigned) rx_id, (unsigned) staterr,
+			   (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
+
+		nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+		if (nmb == NULL) {
+			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+				   "queue_id=%u", (unsigned) rxq->port_id,
+				   (unsigned) rxq->queue_id);
+			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+			break;
+		}
+
+		nb_hold++;
+		rxe = &sw_ring[rx_id];
+		rx_id++;
+		if (rx_id == rxq->nb_rx_desc)
+			rx_id = 0;
+
+		/* Prefetch next mbuf while processing current one. */
+		rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
+
+		/*
+		 * When next RX descriptor is on a cache-line boundary,
+		 * prefetch the next 4 RX descriptors and the next 8 pointers
+		 * to mbufs.
+		 */
+		if ((rx_id & 0x3) == 0) {
+			rte_ixgbe_prefetch(&rx_ring[rx_id]);
+			rte_ixgbe_prefetch(&sw_ring[rx_id]);
+		}
+
+		rxm = rxe->mbuf;
+		rxe->mbuf = nmb;
+		dma_addr =
+			rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+		rxdp->read.hdr_addr = dma_addr;
+		rxdp->read.pkt_addr = dma_addr;
+
+		/*
+		 * Initialize the returned mbuf.
+		 * 1) setup generic mbuf fields:
+		 *    - number of segments,
+		 *    - next segment,
+		 *    - packet length,
+		 *    - RX port identifier.
+		 * 2) integrate hardware offload data, if any:
+		 *    - RSS flag & hash,
+		 *    - IP checksum flag,
+		 *    - VLAN TCI, if any,
+		 *    - error flags.
+		 */
+		pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
+				      rxq->crc_len);
+		rxm->data_off = RTE_PKTMBUF_HEADROOM;
+		rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
+		rxm->nb_segs = 1;
+		rxm->next = NULL;
+		rxm->pkt_len = pkt_len;
+		rxm->data_len = pkt_len;
+		rxm->port = rxq->port_id;
+
+		hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
+		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
+		rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+
+		pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
+		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
+		pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
+		rxm->ol_flags = pkt_flags;
+
+		if (likely(pkt_flags & PKT_RX_RSS_HASH))
+			rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
+		else if (pkt_flags & PKT_RX_FDIR) {
+			rxm->hash.fdir.hash =
+				(uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
+					   & IXGBE_ATR_HASH_MASK);
+			rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
+		}
+		/*
+		 * Store the mbuf address into the next entry of the array
+		 * of returned packets.
+		 */
+		rx_pkts[nb_rx++] = rxm;
+	}
+	rxq->rx_tail = rx_id;
+
+	/*
+	 * If the number of free RX descriptors is greater than the RX free
+	 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+	 * register.
+	 * Update the RDT with the value of the last processed RX descriptor
+	 * minus 1, to guarantee that the RDT register is never equal to the
+	 * RDH register, which creates a "full" ring situtation from the
+	 * hardware point of view...
+	 */
+	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+	if (nb_hold > rxq->rx_free_thresh) {
+		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+			   "nb_hold=%u nb_rx=%u",
+			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+			   (unsigned) rx_id, (unsigned) nb_hold,
+			   (unsigned) nb_rx);
+		rx_id = (uint16_t) ((rx_id == 0) ?
+				     (rxq->nb_rx_desc - 1) : (rx_id - 1));
+		IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+		nb_hold = 0;
+	}
+	rxq->nb_rx_hold = nb_hold;
+	return (nb_rx);
+}
+
+/**
+ * Detect an RSC descriptor.
+ */
+static inline uint32_t
+ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
+{
+	return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
+		IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
+}
+
+/**
+ * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
+ *
+ * Fill the following info in the HEAD buffer of the Rx cluster:
+ *    - RX port identifier
+ *    - hardware offload data, if any:
+ *      - RSS flag & hash
+ *      - IP checksum flag
+ *      - VLAN TCI, if any
+ *      - error flags
+ * @head HEAD of the packet cluster
+ * @desc HW descriptor to get data from
+ * @port_id Port ID of the Rx queue
+ */
+static inline void
+ixgbe_fill_cluster_head_buf(
+	struct rte_mbuf *head,
+	union ixgbe_adv_rx_desc *desc,
+	uint8_t port_id,
+	uint32_t staterr)
+{
+	uint32_t hlen_type_rss;
+	uint64_t pkt_flags;
+
+	head->port = port_id;
+
+	/*
+	 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
+	 * set in the pkt_flags field.
+	 */
+	head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
+	hlen_type_rss = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
+	pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
+	pkt_flags |= rx_desc_status_to_pkt_flags(staterr);
+	pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+	head->ol_flags = pkt_flags;
+
+	if (likely(pkt_flags & PKT_RX_RSS_HASH))
+		head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
+	else if (pkt_flags & PKT_RX_FDIR) {
+		head->hash.fdir.hash =
+			rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
+							  & IXGBE_ATR_HASH_MASK;
+		head->hash.fdir.id =
+			rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
+	}
+}
+
+/**
+ * ixgbe_recv_pkts_lro - receive handler for and LRO case.
+ *
+ * @rx_queue Rx queue handle
+ * @rx_pkts table of received packets
+ * @nb_pkts size of rx_pkts table
+ * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
+ *
+ * Handles the Rx HW ring completions when RSC feature is configured. Uses an
+ * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
+ *
+ * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
+ * 1) When non-EOP RSC completion arrives:
+ *    a) Update the HEAD of the current RSC aggregation cluster with the new
+ *       segment's data length.
+ *    b) Set the "next" pointer of the current segment to point to the segment
+ *       at the NEXTP index.
+ *    c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
+ *       in the sw_rsc_ring.
+ * 2) When EOP arrives we just update the cluster's total length and offload
+ *    flags and deliver the cluster up to the upper layers. In our case - put it
+ *    in the rx_pkts table.
+ *
+ * Returns the number of received packets/clusters (according to the "bulk
+ * receive" interface).
+ */
+static inline uint16_t
+ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
+		    bool bulk_alloc)
+{
+	struct ixgbe_rx_queue *rxq = rx_queue;
+	volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
+	struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
+	struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
+	uint16_t rx_id = rxq->rx_tail;
+	uint16_t nb_rx = 0;
+	uint16_t nb_hold = rxq->nb_rx_hold;
+	uint16_t prev_id = rxq->rx_tail;
+
+	while (nb_rx < nb_pkts) {
+		bool eop;
+		struct ixgbe_rx_entry *rxe;
+		struct ixgbe_scattered_rx_entry *sc_entry;
+		struct ixgbe_scattered_rx_entry *next_sc_entry;
+		struct ixgbe_rx_entry *next_rxe;
+		struct rte_mbuf *first_seg;
+		struct rte_mbuf *rxm;
+		struct rte_mbuf *nmb;
+		union ixgbe_adv_rx_desc rxd;
+		uint16_t data_len;
+		uint16_t next_id;
+		volatile union ixgbe_adv_rx_desc *rxdp;
+		uint32_t staterr;
+
+next_desc:
+		/*
+		 * The code in this whole file uses the volatile pointer to
+		 * ensure the read ordering of the status and the rest of the
+		 * descriptor fields (on the compiler level only!!!). This is so
+		 * UGLY - why not to just use the compiler barrier instead? DPDK
+		 * even has the rte_compiler_barrier() for that.
+		 *
+		 * But most importantly this is just wrong because this doesn't
+		 * ensure memory ordering in a general case at all. For
+		 * instance, DPDK is supposed to work on Power CPUs where
+		 * compiler barrier may just not be enough!
+		 *
+		 * I tried to write only this function properly to have a
+		 * starting point (as a part of an LRO/RSC series) but the
+		 * compiler cursed at me when I tried to cast away the
+		 * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
+		 * keeping it the way it is for now.
+		 *
+		 * The code in this file is broken in so many other places and
+		 * will just not work on a big endian CPU anyway therefore the
+		 * lines below will have to be revisited together with the rest
+		 * of the ixgbe PMD.
+		 *
+		 * TODO:
+		 *    - Get rid of "volatile" crap and let the compiler do its
+		 *      job.
+		 *    - Use the proper memory barrier (rte_rmb()) to ensure the
+		 *      memory ordering below.
+		 */
+		rxdp = &rx_ring[rx_id];
+		staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
+
+		if (!(staterr & IXGBE_RXDADV_STAT_DD))
+			break;
+
+		rxd = *rxdp;
+
+		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+				  "staterr=0x%x data_len=%u",
+			   rxq->port_id, rxq->queue_id, rx_id, staterr,
+			   rte_le_to_cpu_16(rxd.wb.upper.length));
+
+		if (!bulk_alloc) {
+			nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+			if (nmb == NULL) {
+				PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
+						  "port_id=%u queue_id=%u",
+					   rxq->port_id, rxq->queue_id);
+
+				rte_eth_devices[rxq->port_id].data->
+							rx_mbuf_alloc_failed++;
+				break;
+			}
+		} else if (nb_hold > rxq->rx_free_thresh) {
+			uint16_t next_rdt = rxq->rx_free_trigger;
+
+			if (!ixgbe_rx_alloc_bufs(rxq, false)) {
+				rte_wmb();
+				IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr,
+						    next_rdt);
+				nb_hold -= rxq->rx_free_thresh;
+			} else {
+				PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
+						  "port_id=%u queue_id=%u",
+					   rxq->port_id, rxq->queue_id);
+
+				rte_eth_devices[rxq->port_id].data->
+							rx_mbuf_alloc_failed++;
+				break;
+			}
+		}
+
+		nb_hold++;
+		rxe = &sw_ring[rx_id];
+		eop = staterr & IXGBE_RXDADV_STAT_EOP;
+
+		next_id = rx_id + 1;
+		if (next_id == rxq->nb_rx_desc)
+			next_id = 0;
+
+		/* Prefetch next mbuf while processing current one. */
+		rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
+
+		/*
+		 * When next RX descriptor is on a cache-line boundary,
+		 * prefetch the next 4 RX descriptors and the next 4 pointers
+		 * to mbufs.
+		 */
+		if ((next_id & 0x3) == 0) {
+			rte_ixgbe_prefetch(&rx_ring[next_id]);
+			rte_ixgbe_prefetch(&sw_ring[next_id]);
+		}
+
+		rxm = rxe->mbuf;
+
+		if (!bulk_alloc) {
+			__le64 dma =
+			  rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+			/*
+			 * Update RX descriptor with the physical address of the
+			 * new data buffer of the new allocated mbuf.
+			 */
+			rxe->mbuf = nmb;
+
+			rxm->data_off = RTE_PKTMBUF_HEADROOM;
+			rxdp->read.hdr_addr = dma;
+			rxdp->read.pkt_addr = dma;
+		} else
+			rxe->mbuf = NULL;
+
+		/*
+		 * Set data length & data buffer address of mbuf.
+		 */
+		data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
+		rxm->data_len = data_len;
+
+		if (!eop) {
+			uint16_t nextp_id;
+			/*
+			 * Get next descriptor index:
+			 *  - For RSC it's in the NEXTP field.
+			 *  - For a scattered packet - it's just a following
+			 *    descriptor.
+			 */
+			if (ixgbe_rsc_count(&rxd))
+				nextp_id =
+					(staterr & IXGBE_RXDADV_NEXTP_MASK) >>
+						       IXGBE_RXDADV_NEXTP_SHIFT;
+			else
+				nextp_id = next_id;
+
+			next_sc_entry = &sw_sc_ring[nextp_id];
+			next_rxe = &sw_ring[nextp_id];
+			rte_ixgbe_prefetch(next_rxe);
+		}
+
+		sc_entry = &sw_sc_ring[rx_id];
+		first_seg = sc_entry->fbuf;
+		sc_entry->fbuf = NULL;
+
+		/*
+		 * If this is the first buffer of the received packet,
+		 * set the pointer to the first mbuf of the packet and
+		 * initialize its context.
+		 * Otherwise, update the total length and the number of segments
+		 * of the current scattered packet, and update the pointer to
+		 * the last mbuf of the current packet.
+		 */
+		if (first_seg == NULL) {
+			first_seg = rxm;
+			first_seg->pkt_len = data_len;
+			first_seg->nb_segs = 1;
+		} else {
+			first_seg->pkt_len += data_len;
+			first_seg->nb_segs++;
+		}
+
+		prev_id = rx_id;
+		rx_id = next_id;
+
+		/*
+		 * If this is not the last buffer of the received packet, update
+		 * the pointer to the first mbuf at the NEXTP entry in the
+		 * sw_sc_ring and continue to parse the RX ring.
+		 */
+		if (!eop) {
+			rxm->next = next_rxe->mbuf;
+			next_sc_entry->fbuf = first_seg;
+			goto next_desc;
+		}
+
+		/*
+		 * This is the last buffer of the received packet - return
+		 * the current cluster to the user.
+		 */
+		rxm->next = NULL;
+
+		/* Initialize the first mbuf of the returned packet */
+		ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id,
+					    staterr);
+
+		/* Prefetch data of first segment, if configured to do so. */
+		rte_packet_prefetch((char *)first_seg->buf_addr +
+			first_seg->data_off);
+
+		/*
+		 * Store the mbuf address into the next entry of the array
+		 * of returned packets.
+		 */
+		rx_pkts[nb_rx++] = first_seg;
+	}
+
+	/*
+	 * Record index of the next RX descriptor to probe.
+	 */
+	rxq->rx_tail = rx_id;
+
+	/*
+	 * If the number of free RX descriptors is greater than the RX free
+	 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+	 * register.
+	 * Update the RDT with the value of the last processed RX descriptor
+	 * minus 1, to guarantee that the RDT register is never equal to the
+	 * RDH register, which creates a "full" ring situtation from the
+	 * hardware point of view...
+	 */
+	if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
+		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+			   "nb_hold=%u nb_rx=%u",
+			   rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
+
+		rte_wmb();
+		IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, prev_id);
+		nb_hold = 0;
+	}
+
+	rxq->nb_rx_hold = nb_hold;
+	return nb_rx;
+}
+
+uint16_t
+ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+				 uint16_t nb_pkts)
+{
+	return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
+}
+
+uint16_t
+ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+			       uint16_t nb_pkts)
+{
+	return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
+}
+
+/*********************************************************************
+ *
+ *  Queue management functions
+ *
+ **********************************************************************/
+
+/*
+ * Rings setup and release.
+ *
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
+ * also optimize cache line size effect. H/W supports up to cache line size 128.
+ */
+#define IXGBE_ALIGN 128
+
+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
+ * descriptors should meet the following condition:
+ *      (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
+ */
+#define IXGBE_MIN_RING_DESC 32
+#define IXGBE_MAX_RING_DESC 4096
+
+/*
+ * Create memzone for HW rings. malloc can't be used as the physical address is
+ * needed. If the memzone is already created, then this function returns a ptr
+ * to the old one.
+ */
+static const struct rte_memzone *
+ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
+		      uint16_t queue_id, uint32_t ring_size, int socket_id)
+{
+	char z_name[RTE_MEMZONE_NAMESIZE];
+	const struct rte_memzone *mz;
+
+	snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+			dev->driver->pci_drv.name, ring_name,
+			dev->data->port_id, queue_id);
+
+	mz = rte_memzone_lookup(z_name);
+	if (mz)
+		return mz;
+
+#ifdef RTE_LIBRTE_XEN_DOM0
+	return rte_memzone_reserve_bounded(z_name, ring_size,
+		socket_id, 0, IXGBE_ALIGN, RTE_PGSIZE_2M);
+#else
+	return rte_memzone_reserve_aligned(z_name, ring_size,
+		socket_id, 0, IXGBE_ALIGN);
+#endif
+}
+
+static void
+ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
+{
+	unsigned i;
+
+	if (txq->sw_ring != NULL) {
+		for (i = 0; i < txq->nb_tx_desc; i++) {
+			if (txq->sw_ring[i].mbuf != NULL) {
+				rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+				txq->sw_ring[i].mbuf = NULL;
+			}
+		}
+	}
+}
+
+static void
+ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
+{
+	if (txq != NULL &&
+	    txq->sw_ring != NULL)
+		rte_free(txq->sw_ring);
+}
+
+static void
+ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
+{
+	if (txq != NULL && txq->ops != NULL) {
+		txq->ops->release_mbufs(txq);
+		txq->ops->free_swring(txq);
+		rte_free(txq);
+	}
+}
+
+void
+ixgbe_dev_tx_queue_release(void *txq)
+{
+	ixgbe_tx_queue_release(txq);
+}
+
+/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
+static void
+ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
+{
+	static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
+	struct ixgbe_tx_entry *txe = txq->sw_ring;
+	uint16_t prev, i;
+
+	/* Zero out HW ring memory */
+	for (i = 0; i < txq->nb_tx_desc; i++) {
+		txq->tx_ring[i] = zeroed_desc;
+	}
+
+	/* Initialize SW ring entries */
+	prev = (uint16_t) (txq->nb_tx_desc - 1);
+	for (i = 0; i < txq->nb_tx_desc; i++) {
+		volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
+		txd->wb.status = IXGBE_TXD_STAT_DD;
+		txe[i].mbuf = NULL;
+		txe[i].last_id = i;
+		txe[prev].next_id = i;
+		prev = i;
+	}
+
+	txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+	txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+	txq->tx_tail = 0;
+	txq->nb_tx_used = 0;
+	/*
+	 * Always allow 1 descriptor to be un-allocated to avoid
+	 * a H/W race condition
+	 */
+	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+	txq->ctx_curr = 0;
+	memset((void*)&txq->ctx_cache, 0,
+		IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
+}
+
+static const struct ixgbe_txq_ops def_txq_ops = {
+	.release_mbufs = ixgbe_tx_queue_release_mbufs,
+	.free_swring = ixgbe_tx_free_swring,
+	.reset = ixgbe_reset_tx_queue,
+};
+
+/* Takes an ethdev and a queue and sets up the tx function to be used based on
+ * the queue parameters. Used in tx_queue_setup by primary process and then
+ * in dev_init by secondary process when attaching to an existing ethdev.
+ */
+void
+ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
+{
+	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
+	if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
+			&& (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
+		PMD_INIT_LOG(INFO, "Using simple tx code path");
+#ifdef RTE_IXGBE_INC_VECTOR
+		if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
+				(rte_eal_process_type() != RTE_PROC_PRIMARY ||
+					ixgbe_txq_vec_setup(txq) == 0)) {
+			PMD_INIT_LOG(INFO, "Vector tx enabled.");
+			dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
+		} else
+#endif
+		dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
+	} else {
+		PMD_INIT_LOG(INFO, "Using full-featured tx code path");
+		PMD_INIT_LOG(INFO,
+				" - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
+				(unsigned long)txq->txq_flags,
+				(unsigned long)IXGBE_SIMPLE_FLAGS);
+		PMD_INIT_LOG(INFO,
+				" - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
+				(unsigned long)txq->tx_rs_thresh,
+				(unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
+		dev->tx_pkt_burst = ixgbe_xmit_pkts;
+	}
+}
+
+int
+ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
+			 uint16_t queue_idx,
+			 uint16_t nb_desc,
+			 unsigned int socket_id,
+			 const struct rte_eth_txconf *tx_conf)
+{
+	const struct rte_memzone *tz;
+	struct ixgbe_tx_queue *txq;
+	struct ixgbe_hw     *hw;
+	uint16_t tx_rs_thresh, tx_free_thresh;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/*
+	 * Validate number of transmit descriptors.
+	 * It must not exceed hardware maximum, and must be multiple
+	 * of IXGBE_ALIGN.
+	 */
+	if (((nb_desc * sizeof(union ixgbe_adv_tx_desc)) % IXGBE_ALIGN) != 0 ||
+	    (nb_desc > IXGBE_MAX_RING_DESC) ||
+	    (nb_desc < IXGBE_MIN_RING_DESC)) {
+		return -EINVAL;
+	}
+
+	/*
+	 * The following two parameters control the setting of the RS bit on
+	 * transmit descriptors.
+	 * TX descriptors will have their RS bit set after txq->tx_rs_thresh
+	 * descriptors have been used.
+	 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
+	 * descriptors are used or if the number of descriptors required
+	 * to transmit a packet is greater than the number of free TX
+	 * descriptors.
+	 * The following constraints must be satisfied:
+	 *  tx_rs_thresh must be greater than 0.
+	 *  tx_rs_thresh must be less than the size of the ring minus 2.
+	 *  tx_rs_thresh must be less than or equal to tx_free_thresh.
+	 *  tx_rs_thresh must be a divisor of the ring size.
+	 *  tx_free_thresh must be greater than 0.
+	 *  tx_free_thresh must be less than the size of the ring minus 3.
+	 * One descriptor in the TX ring is used as a sentinel to avoid a
+	 * H/W race condition, hence the maximum threshold constraints.
+	 * When set to zero use default values.
+	 */
+	tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
+			tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
+	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+			tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
+	if (tx_rs_thresh >= (nb_desc - 2)) {
+		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
+			     "of TX descriptors minus 2. (tx_rs_thresh=%u "
+			     "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
+			     (int)dev->data->port_id, (int)queue_idx);
+		return -(EINVAL);
+	}
+	if (tx_free_thresh >= (nb_desc - 3)) {
+		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
+			     "tx_free_thresh must be less than the number of "
+			     "TX descriptors minus 3. (tx_free_thresh=%u "
+			     "port=%d queue=%d)",
+			     (unsigned int)tx_free_thresh,
+			     (int)dev->data->port_id, (int)queue_idx);
+		return -(EINVAL);
+	}
+	if (tx_rs_thresh > tx_free_thresh) {
+		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
+			     "tx_free_thresh. (tx_free_thresh=%u "
+			     "tx_rs_thresh=%u port=%d queue=%d)",
+			     (unsigned int)tx_free_thresh,
+			     (unsigned int)tx_rs_thresh,
+			     (int)dev->data->port_id,
+			     (int)queue_idx);
+		return -(EINVAL);
+	}
+	if ((nb_desc % tx_rs_thresh) != 0) {
+		PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
+			     "number of TX descriptors. (tx_rs_thresh=%u "
+			     "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
+			     (int)dev->data->port_id, (int)queue_idx);
+		return -(EINVAL);
+	}
+
+	/*
+	 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
+	 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
+	 * by the NIC and all descriptors are written back after the NIC
+	 * accumulates WTHRESH descriptors.
+	 */
+	if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
+		PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
+			     "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
+			     "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
+			     (int)dev->data->port_id, (int)queue_idx);
+		return -(EINVAL);
+	}
+
+	/* Free memory prior to re-allocation if needed... */
+	if (dev->data->tx_queues[queue_idx] != NULL) {
+		ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
+		dev->data->tx_queues[queue_idx] = NULL;
+	}
+
+	/* First allocate the tx queue data structure */
+	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+	if (txq == NULL)
+		return (-ENOMEM);
+
+	/*
+	 * Allocate TX ring hardware descriptors. A memzone large enough to
+	 * handle the maximum ring size is allocated in order to allow for
+	 * resizing in later calls to the queue setup function.
+	 */
+	tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
+			sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
+			socket_id);
+	if (tz == NULL) {
+		ixgbe_tx_queue_release(txq);
+		return (-ENOMEM);
+	}
+
+	txq->nb_tx_desc = nb_desc;
+	txq->tx_rs_thresh = tx_rs_thresh;
+	txq->tx_free_thresh = tx_free_thresh;
+	txq->pthresh = tx_conf->tx_thresh.pthresh;
+	txq->hthresh = tx_conf->tx_thresh.hthresh;
+	txq->wthresh = tx_conf->tx_thresh.wthresh;
+	txq->queue_id = queue_idx;
+	txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
+	txq->port_id = dev->data->port_id;
+	txq->txq_flags = tx_conf->txq_flags;
+	txq->ops = &def_txq_ops;
+	txq->tx_deferred_start = tx_conf->tx_deferred_start;
+
+	/*
+	 * Modification to set VFTDT for virtual function if vf is detected
+	 */
+	if (hw->mac.type == ixgbe_mac_82599_vf ||
+	    hw->mac.type == ixgbe_mac_X540_vf ||
+	    hw->mac.type == ixgbe_mac_X550_vf ||
+	    hw->mac.type == ixgbe_mac_X550EM_x_vf)
+		txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
+	else
+		txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
+#ifndef	RTE_LIBRTE_XEN_DOM0
+	txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
+#else
+	txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+#endif
+	txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
+
+	/* Allocate software ring */
+	txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
+				sizeof(struct ixgbe_tx_entry) * nb_desc,
+				RTE_CACHE_LINE_SIZE, socket_id);
+	if (txq->sw_ring == NULL) {
+		ixgbe_tx_queue_release(txq);
+		return (-ENOMEM);
+	}
+	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+		     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
+
+	/* set up vector or scalar TX function as appropriate */
+	ixgbe_set_tx_function(dev, txq);
+
+	txq->ops->reset(txq);
+
+	dev->data->tx_queues[queue_idx] = txq;
+
+
+	return (0);
+}
+
+/**
+ * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
+ *
+ * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
+ * in the sw_rsc_ring is not set to NULL but rather points to the next
+ * mbuf of this RSC aggregation (that has not been completed yet and still
+ * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
+ * will just free first "nb_segs" segments of the cluster explicitly by calling
+ * an rte_pktmbuf_free_seg().
+ *
+ * @m scattered cluster head
+ */
+static void
+ixgbe_free_sc_cluster(struct rte_mbuf *m)
+{
+	uint8_t i, nb_segs = m->nb_segs;
+	struct rte_mbuf *next_seg;
+
+	for (i = 0; i < nb_segs; i++) {
+		next_seg = m->next;
+		rte_pktmbuf_free_seg(m);
+		m = next_seg;
+	}
+}
+
+static void
+ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
+{
+	unsigned i;
+
+	if (rxq->sw_ring != NULL) {
+		for (i = 0; i < rxq->nb_rx_desc; i++) {
+			if (rxq->sw_ring[i].mbuf != NULL) {
+				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+				rxq->sw_ring[i].mbuf = NULL;
+			}
+		}
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+		if (rxq->rx_nb_avail) {
+			for (i = 0; i < rxq->rx_nb_avail; ++i) {
+				struct rte_mbuf *mb;
+				mb = rxq->rx_stage[rxq->rx_next_avail + i];
+				rte_pktmbuf_free_seg(mb);
+			}
+			rxq->rx_nb_avail = 0;
+		}
+#endif
+	}
+
+	if (rxq->sw_sc_ring)
+		for (i = 0; i < rxq->nb_rx_desc; i++)
+			if (rxq->sw_sc_ring[i].fbuf) {
+				ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
+				rxq->sw_sc_ring[i].fbuf = NULL;
+			}
+}
+
+static void
+ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
+{
+	if (rxq != NULL) {
+		ixgbe_rx_queue_release_mbufs(rxq);
+		rte_free(rxq->sw_ring);
+		rte_free(rxq->sw_sc_ring);
+		rte_free(rxq);
+	}
+}
+
+void
+ixgbe_dev_rx_queue_release(void *rxq)
+{
+	ixgbe_rx_queue_release(rxq);
+}
+
+/*
+ * Check if Rx Burst Bulk Alloc function can be used.
+ * Return
+ *        0: the preconditions are satisfied and the bulk allocation function
+ *           can be used.
+ *  -EINVAL: the preconditions are NOT satisfied and the default Rx burst
+ *           function must be used.
+ */
+static inline int
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
+#else
+check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq)
+#endif
+{
+	int ret = 0;
+
+	/*
+	 * Make sure the following pre-conditions are satisfied:
+	 *   rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
+	 *   rxq->rx_free_thresh < rxq->nb_rx_desc
+	 *   (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
+	 *   rxq->nb_rx_desc<(IXGBE_MAX_RING_DESC-RTE_PMD_IXGBE_RX_MAX_BURST)
+	 * Scattered packets are not supported.  This should be checked
+	 * outside of this function.
+	 */
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+	if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+			     "rxq->rx_free_thresh=%d, "
+			     "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
+			     rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
+		ret = -EINVAL;
+	} else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+			     "rxq->rx_free_thresh=%d, "
+			     "rxq->nb_rx_desc=%d",
+			     rxq->rx_free_thresh, rxq->nb_rx_desc);
+		ret = -EINVAL;
+	} else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+			     "rxq->nb_rx_desc=%d, "
+			     "rxq->rx_free_thresh=%d",
+			     rxq->nb_rx_desc, rxq->rx_free_thresh);
+		ret = -EINVAL;
+	} else if (!(rxq->nb_rx_desc <
+	       (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+			     "rxq->nb_rx_desc=%d, "
+			     "IXGBE_MAX_RING_DESC=%d, "
+			     "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
+			     rxq->nb_rx_desc, IXGBE_MAX_RING_DESC,
+			     RTE_PMD_IXGBE_RX_MAX_BURST);
+		ret = -EINVAL;
+	}
+#else
+	ret = -EINVAL;
+#endif
+
+	return ret;
+}
+
+/* Reset dynamic ixgbe_rx_queue fields back to defaults */
+static void
+ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
+{
+	static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
+	unsigned i;
+	uint16_t len = rxq->nb_rx_desc;
+
+	/*
+	 * By default, the Rx queue setup function allocates enough memory for
+	 * IXGBE_MAX_RING_DESC.  The Rx Burst bulk allocation function requires
+	 * extra memory at the end of the descriptor ring to be zero'd out. A
+	 * pre-condition for using the Rx burst bulk alloc function is that the
+	 * number of descriptors is less than or equal to
+	 * (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST). Check all the
+	 * constraints here to see if we need to zero out memory after the end
+	 * of the H/W descriptor ring.
+	 */
+	if (adapter->rx_bulk_alloc_allowed)
+		/* zero out extra memory */
+		len += RTE_PMD_IXGBE_RX_MAX_BURST;
+
+	/*
+	 * Zero out HW ring memory. Zero out extra memory at the end of
+	 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
+	 * reads extra memory as zeros.
+	 */
+	for (i = 0; i < len; i++) {
+		rxq->rx_ring[i] = zeroed_desc;
+	}
+
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+	/*
+	 * initialize extra software ring entries. Space for these extra
+	 * entries is always allocated
+	 */
+	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+	for (i = rxq->nb_rx_desc; i < len; ++i) {
+		rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
+	}
+
+	rxq->rx_nb_avail = 0;
+	rxq->rx_next_avail = 0;
+	rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
+	rxq->rx_tail = 0;
+	rxq->nb_rx_hold = 0;
+	rxq->pkt_first_seg = NULL;
+	rxq->pkt_last_seg = NULL;
+}
+
+int
+ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
+			 uint16_t queue_idx,
+			 uint16_t nb_desc,
+			 unsigned int socket_id,
+			 const struct rte_eth_rxconf *rx_conf,
+			 struct rte_mempool *mp)
+{
+	const struct rte_memzone *rz;
+	struct ixgbe_rx_queue *rxq;
+	struct ixgbe_hw     *hw;
+	uint16_t len;
+	struct ixgbe_adapter *adapter =
+		(struct ixgbe_adapter *)dev->data->dev_private;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/*
+	 * Validate number of receive descriptors.
+	 * It must not exceed hardware maximum, and must be multiple
+	 * of IXGBE_ALIGN.
+	 */
+	if (((nb_desc * sizeof(union ixgbe_adv_rx_desc)) % IXGBE_ALIGN) != 0 ||
+	    (nb_desc > IXGBE_MAX_RING_DESC) ||
+	    (nb_desc < IXGBE_MIN_RING_DESC)) {
+		return (-EINVAL);
+	}
+
+	/* Free memory prior to re-allocation if needed... */
+	if (dev->data->rx_queues[queue_idx] != NULL) {
+		ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
+		dev->data->rx_queues[queue_idx] = NULL;
+	}
+
+	/* First allocate the rx queue data structure */
+	rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+	if (rxq == NULL)
+		return (-ENOMEM);
+	rxq->mb_pool = mp;
+	rxq->nb_rx_desc = nb_desc;
+	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+	rxq->queue_id = queue_idx;
+	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
+	rxq->port_id = dev->data->port_id;
+	rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
+							0 : ETHER_CRC_LEN);
+	rxq->drop_en = rx_conf->rx_drop_en;
+	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+
+	/*
+	 * Allocate RX ring hardware descriptors. A memzone large enough to
+	 * handle the maximum ring size is allocated in order to allow for
+	 * resizing in later calls to the queue setup function.
+	 */
+	rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx,
+				   RX_RING_SZ, socket_id);
+	if (rz == NULL) {
+		ixgbe_rx_queue_release(rxq);
+		return (-ENOMEM);
+	}
+
+	/*
+	 * Zero init all the descriptors in the ring.
+	 */
+	memset (rz->addr, 0, RX_RING_SZ);
+
+	/*
+	 * Modified to setup VFRDT for Virtual Function
+	 */
+	if (hw->mac.type == ixgbe_mac_82599_vf ||
+	    hw->mac.type == ixgbe_mac_X540_vf ||
+	    hw->mac.type == ixgbe_mac_X550_vf ||
+	    hw->mac.type == ixgbe_mac_X550EM_x_vf) {
+		rxq->rdt_reg_addr =
+			IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
+		rxq->rdh_reg_addr =
+			IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
+	}
+	else {
+		rxq->rdt_reg_addr =
+			IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
+		rxq->rdh_reg_addr =
+			IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
+	}
+#ifndef RTE_LIBRTE_XEN_DOM0
+	rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
+#else
+	rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+#endif
+	rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
+
+	/*
+	 * Certain constraints must be met in order to use the bulk buffer
+	 * allocation Rx burst function. If any of Rx queues doesn't meet them
+	 * the feature should be disabled for the whole port.
+	 */
+	if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
+		PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
+				    "preconditions - canceling the feature for "
+				    "the whole port[%d]",
+			     rxq->queue_id, rxq->port_id);
+		adapter->rx_bulk_alloc_allowed = false;
+	}
+
+	/*
+	 * Allocate software ring. Allow for space at the end of the
+	 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
+	 * function does not access an invalid memory region.
+	 */
+	len = nb_desc;
+	if (adapter->rx_bulk_alloc_allowed)
+		len += RTE_PMD_IXGBE_RX_MAX_BURST;
+
+	rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
+					  sizeof(struct ixgbe_rx_entry) * len,
+					  RTE_CACHE_LINE_SIZE, socket_id);
+	if (!rxq->sw_ring) {
+		ixgbe_rx_queue_release(rxq);
+		return (-ENOMEM);
+	}
+
+	/*
+	 * Always allocate even if it's not going to be needed in order to
+	 * simplify the code.
+	 *
+	 * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
+	 * be requested in ixgbe_dev_rx_init(), which is called later from
+	 * dev_start() flow.
+	 */
+	rxq->sw_sc_ring =
+		rte_zmalloc_socket("rxq->sw_sc_ring",
+				   sizeof(struct ixgbe_scattered_rx_entry) * len,
+				   RTE_CACHE_LINE_SIZE, socket_id);
+	if (!rxq->sw_sc_ring) {
+		ixgbe_rx_queue_release(rxq);
+		return (-ENOMEM);
+	}
+
+	PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
+			    "dma_addr=0x%"PRIx64,
+		     rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
+		     rxq->rx_ring_phys_addr);
+
+	if (!rte_is_power_of_2(nb_desc)) {
+		PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
+				    "preconditions - canceling the feature for "
+				    "the whole port[%d]",
+			     rxq->queue_id, rxq->port_id);
+		adapter->rx_vec_allowed = false;
+	} else
+		ixgbe_rxq_vec_setup(rxq);
+
+	dev->data->rx_queues[queue_idx] = rxq;
+
+	ixgbe_reset_rx_queue(adapter, rxq);
+
+	return 0;
+}
+
+uint32_t
+ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#define IXGBE_RXQ_SCAN_INTERVAL 4
+	volatile union ixgbe_adv_rx_desc *rxdp;
+	struct ixgbe_rx_queue *rxq;
+	uint32_t desc = 0;
+
+	if (rx_queue_id >= dev->data->nb_rx_queues) {
+		PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
+		return 0;
+	}
+
+	rxq = dev->data->rx_queues[rx_queue_id];
+	rxdp = &(rxq->rx_ring[rxq->rx_tail]);
+
+	while ((desc < rxq->nb_rx_desc) &&
+		(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) {
+		desc += IXGBE_RXQ_SCAN_INTERVAL;
+		rxdp += IXGBE_RXQ_SCAN_INTERVAL;
+		if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+			rxdp = &(rxq->rx_ring[rxq->rx_tail +
+				desc - rxq->nb_rx_desc]);
+	}
+
+	return desc;
+}
+
+int
+ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
+{
+	volatile union ixgbe_adv_rx_desc *rxdp;
+	struct ixgbe_rx_queue *rxq = rx_queue;
+	uint32_t desc;
+
+	if (unlikely(offset >= rxq->nb_rx_desc))
+		return 0;
+	desc = rxq->rx_tail + offset;
+	if (desc >= rxq->nb_rx_desc)
+		desc -= rxq->nb_rx_desc;
+
+	rxdp = &rxq->rx_ring[desc];
+	return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD);
+}
+
+void
+ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
+{
+	unsigned i;
+	struct ixgbe_adapter *adapter =
+		(struct ixgbe_adapter *)dev->data->dev_private;
+
+	PMD_INIT_FUNC_TRACE();
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
+		if (txq != NULL) {
+			txq->ops->release_mbufs(txq);
+			txq->ops->reset(txq);
+		}
+	}
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
+		if (rxq != NULL) {
+			ixgbe_rx_queue_release_mbufs(rxq);
+			ixgbe_reset_rx_queue(adapter, rxq);
+		}
+	}
+}
+
+/*********************************************************************
+ *
+ *  Device RX/TX init functions
+ *
+ **********************************************************************/
+
+/**
+ * Receive Side Scaling (RSS)
+ * See section 7.1.2.8 in the following document:
+ *     "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
+ *
+ * Principles:
+ * The source and destination IP addresses of the IP header and the source
+ * and destination ports of TCP/UDP headers, if any, of received packets are
+ * hashed against a configurable random key to compute a 32-bit RSS hash result.
+ * The seven (7) LSBs of the 32-bit hash result are used as an index into a
+ * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
+ * RSS output index which is used as the RX queue index where to store the
+ * received packets.
+ * The following output is supplied in the RX write-back descriptor:
+ *     - 32-bit result of the Microsoft RSS hash function,
+ *     - 4-bit RSS type field.
+ */
+
+/*
+ * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
+ * Used as the default key.
+ */
+static uint8_t rss_intel_key[40] = {
+	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+};
+
+static void
+ixgbe_rss_disable(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw;
+	uint32_t mrqc;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+	mrqc &= ~IXGBE_MRQC_RSSEN;
+	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+}
+
+static void
+ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
+{
+	uint8_t  *hash_key;
+	uint32_t mrqc;
+	uint32_t rss_key;
+	uint64_t rss_hf;
+	uint16_t i;
+
+	hash_key = rss_conf->rss_key;
+	if (hash_key != NULL) {
+		/* Fill in RSS hash key */
+		for (i = 0; i < 10; i++) {
+			rss_key  = hash_key[(i * 4)];
+			rss_key |= hash_key[(i * 4) + 1] << 8;
+			rss_key |= hash_key[(i * 4) + 2] << 16;
+			rss_key |= hash_key[(i * 4) + 3] << 24;
+			IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, rss_key);
+		}
+	}
+
+	/* Set configured hashing protocols in MRQC register */
+	rss_hf = rss_conf->rss_hf;
+	mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
+	if (rss_hf & ETH_RSS_IPV4)
+		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
+	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
+	if (rss_hf & ETH_RSS_IPV6)
+		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
+	if (rss_hf & ETH_RSS_IPV6_EX)
+		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
+	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
+	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
+	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
+	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
+	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
+	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+}
+
+int
+ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
+			  struct rte_eth_rss_conf *rss_conf)
+{
+	struct ixgbe_hw *hw;
+	uint32_t mrqc;
+	uint64_t rss_hf;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/*
+	 * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
+	 *     "RSS enabling cannot be done dynamically while it must be
+	 *      preceded by a software reset"
+	 * Before changing anything, first check that the update RSS operation
+	 * does not attempt to disable RSS, if RSS was enabled at
+	 * initialization time, or does not attempt to enable RSS, if RSS was
+	 * disabled at initialization time.
+	 */
+	rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
+	mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+	if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
+		if (rss_hf != 0) /* Enable RSS */
+			return -(EINVAL);
+		return 0; /* Nothing to do */
+	}
+	/* RSS enabled */
+	if (rss_hf == 0) /* Disable RSS */
+		return -(EINVAL);
+	ixgbe_hw_rss_hash_set(hw, rss_conf);
+	return 0;
+}
+
+int
+ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+			    struct rte_eth_rss_conf *rss_conf)
+{
+	struct ixgbe_hw *hw;
+	uint8_t *hash_key;
+	uint32_t mrqc;
+	uint32_t rss_key;
+	uint64_t rss_hf;
+	uint16_t i;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	hash_key = rss_conf->rss_key;
+	if (hash_key != NULL) {
+		/* Return RSS hash key */
+		for (i = 0; i < 10; i++) {
+			rss_key = IXGBE_READ_REG_ARRAY(hw, IXGBE_RSSRK(0), i);
+			hash_key[(i * 4)] = rss_key & 0x000000FF;
+			hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
+			hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
+			hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
+		}
+	}
+
+	/* Get RSS functions configured in MRQC register */
+	mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+	if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
+		rss_conf->rss_hf = 0;
+		return 0;
+	}
+	rss_hf = 0;
+	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
+		rss_hf |= ETH_RSS_IPV4;
+	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
+		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
+		rss_hf |= ETH_RSS_IPV6;
+	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
+		rss_hf |= ETH_RSS_IPV6_EX;
+	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
+		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
+		rss_hf |= ETH_RSS_IPV6_TCP_EX;
+	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
+		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
+		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
+		rss_hf |= ETH_RSS_IPV6_UDP_EX;
+	rss_conf->rss_hf = rss_hf;
+	return 0;
+}
+
+static void
+ixgbe_rss_configure(struct rte_eth_dev *dev)
+{
+	struct rte_eth_rss_conf rss_conf;
+	struct ixgbe_hw *hw;
+	uint32_t reta;
+	uint16_t i;
+	uint16_t j;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/*
+	 * Fill in redirection table
+	 * The byte-swap is needed because NIC registers are in
+	 * little-endian order.
+	 */
+	reta = 0;
+	for (i = 0, j = 0; i < 128; i++, j++) {
+		if (j == dev->data->nb_rx_queues)
+			j = 0;
+		reta = (reta << 8) | j;
+		if ((i & 3) == 3)
+			IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2),
+					rte_bswap32(reta));
+	}
+
+	/*
+	 * Configure the RSS key and the RSS protocols used to compute
+	 * the RSS hash of input packets.
+	 */
+	rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
+	if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
+		ixgbe_rss_disable(dev);
+		return;
+	}
+	if (rss_conf.rss_key == NULL)
+		rss_conf.rss_key = rss_intel_key; /* Default hash key */
+	ixgbe_hw_rss_hash_set(hw, &rss_conf);
+}
+
+#define NUM_VFTA_REGISTERS 128
+#define NIC_RX_BUFFER_SIZE 0x200
+
+static void
+ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
+{
+	struct rte_eth_vmdq_dcb_conf *cfg;
+	struct ixgbe_hw *hw;
+	enum rte_eth_nb_pools num_pools;
+	uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
+	uint16_t pbsize;
+	uint8_t nb_tcs; /* number of traffic classes */
+	int i;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+	num_pools = cfg->nb_queue_pools;
+	/* Check we have a valid number of pools */
+	if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
+		ixgbe_rss_disable(dev);
+		return;
+	}
+	/* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
+	nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
+
+	/*
+	 * RXPBSIZE
+	 * split rx buffer up into sections, each for 1 traffic class
+	 */
+	pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
+	for (i = 0 ; i < nb_tcs; i++) {
+		uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+		rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
+		/* clear 10 bits. */
+		rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
+		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
+	}
+	/* zero alloc all unused TCs */
+	for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+		rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT ));
+		/* clear 10 bits. */
+		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
+	}
+
+	/* MRQC: enable vmdq and dcb */
+	mrqc = ((num_pools == ETH_16_POOLS) ? \
+		IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN );
+	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+	/* PFVTCTL: turn on virtualisation and set the default pool */
+	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
+	if (cfg->enable_default_pool) {
+		vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
+	} else {
+		vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
+	}
+
+	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
+
+	/* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
+	queue_mapping = 0;
+	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+		/*
+		 * mapping is done with 3 bits per priority,
+		 * so shift by i*3 each time
+		 */
+		queue_mapping |= ((cfg->dcb_queue[i] & 0x07) << (i * 3));
+
+	IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
+
+	/* RTRPCS: DCB related */
+	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
+
+	/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
+	vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+	vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+
+	/* VFTA - enable all vlan filters */
+	for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
+	}
+
+	/* VFRE: pool enabling for receive - 16 or 32 */
+	IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \
+			num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+
+	/*
+	 * MPSAR - allow pools to read specific mac addresses
+	 * In this case, all pools should be able to read from mac addr 0
+	 */
+	IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
+	IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
+
+	/* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
+	for (i = 0; i < cfg->nb_pool_maps; i++) {
+		/* set vlan id in VF register and set the valid bit */
+		IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
+				(cfg->pool_map[i].vlan_id & 0xFFF)));
+		/*
+		 * Put the allowed pools in VFB reg. As we only have 16 or 32
+		 * pools, we only need to use the first half of the register
+		 * i.e. bits 0-31
+		 */
+		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
+	}
+}
+
+/**
+ * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ */
+static void
+ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
+               struct ixgbe_dcb_config *dcb_config)
+{
+	uint32_t reg;
+	uint32_t q;
+
+	PMD_INIT_FUNC_TRACE();
+	if (hw->mac.type != ixgbe_mac_82598EB) {
+		/* Disable the Tx desc arbiter so that MTQC can be changed */
+		reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+		reg |= IXGBE_RTTDCS_ARBDIS;
+		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+		/* Enable DCB for Tx with 8 TCs */
+		if (dcb_config->num_tcs.pg_tcs == 8) {
+			reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+		}
+		else {
+			reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+		}
+		if (dcb_config->vt_mode)
+	            reg |= IXGBE_MTQC_VT_ENA;
+		IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
+
+		/* Disable drop for all queues */
+		for (q = 0; q < 128; q++)
+			IXGBE_WRITE_REG(hw, IXGBE_QDE,
+	             (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
+
+		/* Enable the Tx desc arbiter */
+		reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+		reg &= ~IXGBE_RTTDCS_ARBDIS;
+		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+		/* Enable Security TX Buffer IFG for DCB */
+		reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+		reg |= IXGBE_SECTX_DCB;
+		IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+	}
+	return;
+}
+
+/**
+ * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
+ * @dev: pointer to rte_eth_dev structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ */
+static void
+ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
+			struct ixgbe_dcb_config *dcb_config)
+{
+	struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+			&dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+	struct ixgbe_hw *hw =
+			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	PMD_INIT_FUNC_TRACE();
+	if (hw->mac.type != ixgbe_mac_82598EB)
+		/*PF VF Transmit Enable*/
+		IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
+			vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+
+	/*Configure general DCB TX parameters*/
+	ixgbe_dcb_tx_hw_config(hw,dcb_config);
+	return;
+}
+
+static void
+ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
+                        struct ixgbe_dcb_config *dcb_config)
+{
+	struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+			&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+	struct ixgbe_dcb_tc_config *tc;
+	uint8_t i,j;
+
+	/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
+	if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) {
+		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	}
+	else {
+		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+	}
+	/* User Priority to Traffic Class mapping */
+	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		j = vmdq_rx_conf->dcb_queue[i];
+		tc = &dcb_config->tc_config[j];
+		tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
+						(uint8_t)(1 << j);
+	}
+}
+
+static void
+ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
+                        struct ixgbe_dcb_config *dcb_config)
+{
+	struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+			&dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+	struct ixgbe_dcb_tc_config *tc;
+	uint8_t i,j;
+
+	/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
+	if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) {
+		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
+		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+	}
+	else {
+		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
+		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+	}
+
+	/* User Priority to Traffic Class mapping */
+	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		j = vmdq_tx_conf->dcb_queue[i];
+		tc = &dcb_config->tc_config[j];
+		tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
+						(uint8_t)(1 << j);
+	}
+	return;
+}
+
+static void
+ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
+		struct ixgbe_dcb_config *dcb_config)
+{
+	struct rte_eth_dcb_rx_conf *rx_conf =
+			&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+	struct ixgbe_dcb_tc_config *tc;
+	uint8_t i,j;
+
+	dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
+	dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
+
+	/* User Priority to Traffic Class mapping */
+	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		j = rx_conf->dcb_queue[i];
+		tc = &dcb_config->tc_config[j];
+		tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
+						(uint8_t)(1 << j);
+	}
+}
+
+static void
+ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
+		struct ixgbe_dcb_config *dcb_config)
+{
+	struct rte_eth_dcb_tx_conf *tx_conf =
+			&dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
+	struct ixgbe_dcb_tc_config *tc;
+	uint8_t i,j;
+
+	dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
+	dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
+
+	/* User Priority to Traffic Class mapping */
+	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		j = tx_conf->dcb_queue[i];
+		tc = &dcb_config->tc_config[j];
+		tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
+						(uint8_t)(1 << j);
+	}
+}
+
+/**
+ * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ */
+static void
+ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
+               struct ixgbe_dcb_config *dcb_config)
+{
+	uint32_t reg;
+	uint32_t vlanctrl;
+	uint8_t i;
+
+	PMD_INIT_FUNC_TRACE();
+	/*
+	 * Disable the arbiter before changing parameters
+	 * (always enable recycle mode; WSP)
+	 */
+	reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
+	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
+
+	if (hw->mac.type != ixgbe_mac_82598EB) {
+		reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+		if (dcb_config->num_tcs.pg_tcs == 4) {
+			if (dcb_config->vt_mode)
+				reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+					IXGBE_MRQC_VMDQRT4TCEN;
+			else {
+				IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
+				reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+					IXGBE_MRQC_RT4TCEN;
+			}
+		}
+		if (dcb_config->num_tcs.pg_tcs == 8) {
+			if (dcb_config->vt_mode)
+				reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+					IXGBE_MRQC_VMDQRT8TCEN;
+			else {
+				IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
+				reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+					IXGBE_MRQC_RT8TCEN;
+			}
+		}
+
+		IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
+	}
+
+	/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
+	vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+	vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+
+	/* VFTA - enable all vlan filters */
+	for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
+	}
+
+	/*
+	 * Configure Rx packet plane (recycle mode; WSP) and
+	 * enable arbiter
+	 */
+	reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
+	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
+
+	return;
+}
+
+static void
+ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
+			uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
+{
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+	case ixgbe_mac_X550:
+	case ixgbe_mac_X550EM_x:
+		ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
+						  tsa, map);
+		break;
+	default:
+		break;
+	}
+}
+
+static void
+ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
+			    uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
+{
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa);
+		ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+	case ixgbe_mac_X550:
+	case ixgbe_mac_X550EM_x:
+		ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa);
+		ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map);
+		break;
+	default:
+		break;
+	}
+}
+
+#define DCB_RX_CONFIG  1
+#define DCB_TX_CONFIG  1
+#define DCB_TX_PB      1024
+/**
+ * ixgbe_dcb_hw_configure - Enable DCB and configure
+ * general DCB in VT mode and non-VT mode parameters
+ * @dev: pointer to rte_eth_dev structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ */
+static int
+ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
+			struct ixgbe_dcb_config *dcb_config)
+{
+	int     ret = 0;
+	uint8_t i,pfc_en,nb_tcs;
+	uint16_t pbsize;
+	uint8_t config_dcb_rx = 0;
+	uint8_t config_dcb_tx = 0;
+	uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
+	uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
+	uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
+	uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
+	uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
+	struct ixgbe_dcb_tc_config *tc;
+	uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+	struct ixgbe_hw *hw =
+			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	switch(dev->data->dev_conf.rxmode.mq_mode){
+	case ETH_MQ_RX_VMDQ_DCB:
+		dcb_config->vt_mode = true;
+		if (hw->mac.type != ixgbe_mac_82598EB) {
+			config_dcb_rx = DCB_RX_CONFIG;
+			/*
+			 *get dcb and VT rx configuration parameters
+			 *from rte_eth_conf
+			 */
+			ixgbe_vmdq_dcb_rx_config(dev,dcb_config);
+			/*Configure general VMDQ and DCB RX parameters*/
+			ixgbe_vmdq_dcb_configure(dev);
+		}
+		break;
+	case ETH_MQ_RX_DCB:
+		dcb_config->vt_mode = false;
+		config_dcb_rx = DCB_RX_CONFIG;
+		/* Get dcb TX configuration parameters from rte_eth_conf */
+		ixgbe_dcb_rx_config(dev,dcb_config);
+		/*Configure general DCB RX parameters*/
+		ixgbe_dcb_rx_hw_config(hw, dcb_config);
+		break;
+	default:
+		PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
+		break;
+	}
+	switch (dev->data->dev_conf.txmode.mq_mode) {
+	case ETH_MQ_TX_VMDQ_DCB:
+		dcb_config->vt_mode = true;
+		config_dcb_tx = DCB_TX_CONFIG;
+		/* get DCB and VT TX configuration parameters from rte_eth_conf */
+		ixgbe_dcb_vt_tx_config(dev,dcb_config);
+		/*Configure general VMDQ and DCB TX parameters*/
+		ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config);
+		break;
+
+	case ETH_MQ_TX_DCB:
+		dcb_config->vt_mode = false;
+		config_dcb_tx = DCB_TX_CONFIG;
+		/*get DCB TX configuration parameters from rte_eth_conf*/
+		ixgbe_dcb_tx_config(dev,dcb_config);
+		/*Configure general DCB TX parameters*/
+		ixgbe_dcb_tx_hw_config(hw, dcb_config);
+		break;
+	default:
+		PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
+		break;
+	}
+
+	nb_tcs = dcb_config->num_tcs.pfc_tcs;
+	/* Unpack map */
+	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
+	if(nb_tcs == ETH_4_TCS) {
+		/* Avoid un-configured priority mapping to TC0 */
+		uint8_t j = 4;
+		uint8_t mask = 0xFF;
+		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+			mask = (uint8_t)(mask & (~ (1 << map[i])));
+		for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
+			if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
+				map[j++] = i;
+			mask >>= 1;
+		}
+		/* Re-configure 4 TCs BW */
+		for (i = 0; i < nb_tcs; i++) {
+			tc = &dcb_config->tc_config[i];
+			tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+						(uint8_t)(100 / nb_tcs);
+			tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
+						(uint8_t)(100 / nb_tcs);
+		}
+		for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+			tc = &dcb_config->tc_config[i];
+			tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
+			tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
+		}
+	}
+
+	if(config_dcb_rx) {
+		/* Set RX buffer size */
+		pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
+		uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
+		for (i = 0 ; i < nb_tcs; i++) {
+			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
+		}
+		/* zero alloc all unused TCs */
+		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+		}
+	}
+	if(config_dcb_tx) {
+		/* Only support an equally distributed Tx packet buffer strategy. */
+		uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
+		uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
+		for (i = 0; i < nb_tcs; i++) {
+			IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
+			IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
+		}
+		/* Clear unused TCs, if any, to zero buffer size*/
+		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+			IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
+			IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
+		}
+	}
+
+	/*Calculates traffic class credits*/
+	ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
+				IXGBE_DCB_TX_CONFIG);
+	ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
+				IXGBE_DCB_RX_CONFIG);
+
+	if(config_dcb_rx) {
+		/* Unpack CEE standard containers */
+		ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
+		ixgbe_dcb_unpack_max_cee(dcb_config, max);
+		ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
+		ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
+		/* Configure PG(ETS) RX */
+		ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map);
+	}
+
+	if(config_dcb_tx) {
+		/* Unpack CEE standard containers */
+		ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+		ixgbe_dcb_unpack_max_cee(dcb_config, max);
+		ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+		ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+		/* Configure PG(ETS) TX */
+		ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map);
+	}
+
+	/*Configure queue statistics registers*/
+	ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
+
+	/* Check if the PFC is supported */
+	if(dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+		pbsize = (uint16_t) (NIC_RX_BUFFER_SIZE / nb_tcs);
+		for (i = 0; i < nb_tcs; i++) {
+			/*
+			* If the TC count is 8,and the default high_water is 48,
+			* the low_water is 16 as default.
+			*/
+			hw->fc.high_water[i] = (pbsize * 3 ) / 4;
+			hw->fc.low_water[i] = pbsize / 4;
+			/* Enable pfc for this TC */
+			tc = &dcb_config->tc_config[i];
+			tc->pfc = ixgbe_dcb_pfc_enabled;
+		}
+		ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
+		if(dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+			pfc_en &= 0x0F;
+		ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
+	}
+
+	return ret;
+}
+
+/**
+ * ixgbe_configure_dcb - Configure DCB  Hardware
+ * @dev: pointer to rte_eth_dev
+ */
+void ixgbe_configure_dcb(struct rte_eth_dev *dev)
+{
+	struct ixgbe_dcb_config *dcb_cfg =
+			IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+	struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* check support mq_mode for DCB */
+	if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
+	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB))
+		return;
+
+	if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
+		return;
+
+	/** Configure DCB hardware **/
+	ixgbe_dcb_hw_configure(dev,dcb_cfg);
+
+	return;
+}
+
+/*
+ * VMDq only support for 10 GbE NIC.
+ */
+static void
+ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
+{
+	struct rte_eth_vmdq_rx_conf *cfg;
+	struct ixgbe_hw *hw;
+	enum rte_eth_nb_pools num_pools;
+	uint32_t mrqc, vt_ctl, vlanctrl;
+	uint32_t vmolr = 0;
+	int i;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+	num_pools = cfg->nb_queue_pools;
+
+	ixgbe_rss_disable(dev);
+
+	/* MRQC: enable vmdq */
+	mrqc = IXGBE_MRQC_VMDQEN;
+	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+	/* PFVTCTL: turn on virtualisation and set the default pool */
+	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
+	if (cfg->enable_default_pool)
+		vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
+	else
+		vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
+
+	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
+
+	for (i = 0; i < (int)num_pools; i++) {
+		vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
+		IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
+	}
+
+	/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
+	vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+	vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+
+	/* VFTA - enable all vlan filters */
+	for (i = 0; i < NUM_VFTA_REGISTERS; i++)
+		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
+
+	/* VFRE: pool enabling for receive - 64 */
+	IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
+	if (num_pools == ETH_64_POOLS)
+		IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
+
+	/*
+	 * MPSAR - allow pools to read specific mac addresses
+	 * In this case, all pools should be able to read from mac addr 0
+	 */
+	IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
+	IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
+
+	/* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
+	for (i = 0; i < cfg->nb_pool_maps; i++) {
+		/* set vlan id in VF register and set the valid bit */
+		IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
+				(cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
+		/*
+		 * Put the allowed pools in VFB reg. As we only have 16 or 64
+		 * pools, we only need to use the first half of the register
+		 * i.e. bits 0-31
+		 */
+		if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
+			IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \
+					(cfg->pool_map[i].pools & UINT32_MAX));
+		else
+			IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \
+					((cfg->pool_map[i].pools >> 32) \
+					& UINT32_MAX));
+
+	}
+
+	/* PFDMA Tx General Switch Control Enables VMDQ loopback */
+	if (cfg->enable_loop_back) {
+		IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+		for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
+			IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
+	}
+
+	IXGBE_WRITE_FLUSH(hw);
+}
+
+/*
+ * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
+ * @hw: pointer to hardware structure
+ */
+static void
+ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
+{
+	uint32_t reg;
+	uint32_t q;
+
+	PMD_INIT_FUNC_TRACE();
+	/*PF VF Transmit Enable*/
+	IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
+	IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
+
+	/* Disable the Tx desc arbiter so that MTQC can be changed */
+	reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+	reg |= IXGBE_RTTDCS_ARBDIS;
+	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+	reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
+	IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
+
+	/* Disable drop for all queues */
+	for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
+		IXGBE_WRITE_REG(hw, IXGBE_QDE,
+	          (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
+
+	/* Enable the Tx desc arbiter */
+	reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+	reg &= ~IXGBE_RTTDCS_ARBDIS;
+	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+	IXGBE_WRITE_FLUSH(hw);
+
+	return;
+}
+
+static int
+ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
+{
+	struct ixgbe_rx_entry *rxe = rxq->sw_ring;
+	uint64_t dma_addr;
+	unsigned i;
+
+	/* Initialize software ring entries */
+	for (i = 0; i < rxq->nb_rx_desc; i++) {
+		volatile union ixgbe_adv_rx_desc *rxd;
+		struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
+		if (mbuf == NULL) {
+			PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
+				     (unsigned) rxq->queue_id);
+			return (-ENOMEM);
+		}
+
+		rte_mbuf_refcnt_set(mbuf, 1);
+		mbuf->next = NULL;
+		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+		mbuf->nb_segs = 1;
+		mbuf->port = rxq->port_id;
+
+		dma_addr =
+			rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+		rxd = &rxq->rx_ring[i];
+		rxd->read.hdr_addr = dma_addr;
+		rxd->read.pkt_addr = dma_addr;
+		rxe[i].mbuf = mbuf;
+	}
+
+	return 0;
+}
+
+static int
+ixgbe_config_vf_rss(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw;
+	uint32_t mrqc;
+
+	ixgbe_rss_configure(dev);
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* MRQC: enable VF RSS */
+	mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+	mrqc &= ~IXGBE_MRQC_MRQE_MASK;
+	switch (RTE_ETH_DEV_SRIOV(dev).active) {
+	case ETH_64_POOLS:
+		mrqc |= IXGBE_MRQC_VMDQRSS64EN;
+		break;
+
+	case ETH_32_POOLS:
+		mrqc |= IXGBE_MRQC_VMDQRSS32EN;
+		break;
+
+	default:
+		PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
+		return -EINVAL;
+	}
+
+	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+	return 0;
+}
+
+static int
+ixgbe_config_vf_default(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	switch (RTE_ETH_DEV_SRIOV(dev).active) {
+	case ETH_64_POOLS:
+		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
+			IXGBE_MRQC_VMDQEN);
+		break;
+
+	case ETH_32_POOLS:
+		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
+			IXGBE_MRQC_VMDQRT4TCEN);
+		break;
+
+	case ETH_16_POOLS:
+		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
+			IXGBE_MRQC_VMDQRT8TCEN);
+		break;
+	default:
+		PMD_INIT_LOG(ERR,
+			"invalid pool number in IOV mode");
+		break;
+	}
+	return 0;
+}
+
+static int
+ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (hw->mac.type == ixgbe_mac_82598EB)
+		return 0;
+
+	if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+		/*
+		 * SRIOV inactive scheme
+		 * any DCB/RSS w/o VMDq multi-queue setting
+		 */
+		switch (dev->data->dev_conf.rxmode.mq_mode) {
+			case ETH_MQ_RX_RSS:
+				ixgbe_rss_configure(dev);
+				break;
+
+			case ETH_MQ_RX_VMDQ_DCB:
+				ixgbe_vmdq_dcb_configure(dev);
+				break;
+
+			case ETH_MQ_RX_VMDQ_ONLY:
+				ixgbe_vmdq_rx_hw_configure(dev);
+				break;
+
+			case ETH_MQ_RX_NONE:
+				/* if mq_mode is none, disable rss mode.*/
+			default: ixgbe_rss_disable(dev);
+		}
+	} else {
+		/*
+		 * SRIOV active scheme
+		 * Support RSS together with VMDq & SRIOV
+		 */
+		switch (dev->data->dev_conf.rxmode.mq_mode) {
+		case ETH_MQ_RX_RSS:
+		case ETH_MQ_RX_VMDQ_RSS:
+			ixgbe_config_vf_rss(dev);
+			break;
+
+		/* FIXME if support DCB/RSS together with VMDq & SRIOV */
+		case ETH_MQ_RX_VMDQ_DCB:
+		case ETH_MQ_RX_VMDQ_DCB_RSS:
+			PMD_INIT_LOG(ERR,
+				"Could not support DCB with VMDq & SRIOV");
+			return -1;
+		default:
+			ixgbe_config_vf_default(dev);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int
+ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t mtqc;
+	uint32_t rttdcs;
+
+	if (hw->mac.type == ixgbe_mac_82598EB)
+		return 0;
+
+	/* disable arbiter before setting MTQC */
+	rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+	rttdcs |= IXGBE_RTTDCS_ARBDIS;
+	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+
+	if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+		/*
+		 * SRIOV inactive scheme
+		 * any DCB w/o VMDq multi-queue setting
+		 */
+		if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+			ixgbe_vmdq_tx_hw_configure(hw);
+		else {
+			mtqc = IXGBE_MTQC_64Q_1PB;
+			IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
+		}
+	} else {
+		switch (RTE_ETH_DEV_SRIOV(dev).active) {
+
+		/*
+		 * SRIOV active scheme
+		 * FIXME if support DCB together with VMDq & SRIOV
+		 */
+		case ETH_64_POOLS:
+			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
+			break;
+		case ETH_32_POOLS:
+			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
+			break;
+		case ETH_16_POOLS:
+			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
+				IXGBE_MTQC_8TC_8TQ;
+			break;
+		default:
+			mtqc = IXGBE_MTQC_64Q_1PB;
+			PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
+		}
+		IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
+	}
+
+	/* re-enable arbiter */
+	rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
+	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+
+	return 0;
+}
+
+/**
+ * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
+ *
+ * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
+ * spec rev. 3.0 chapter 8.2.3.8.13.
+ *
+ * @pool Memory pool of the Rx queue
+ */
+static inline uint32_t
+ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
+{
+	struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
+
+	/* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
+	uint16_t maxdesc =
+		IPV4_MAX_PKT_LEN /
+			(mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
+
+	if (maxdesc >= 16)
+		return IXGBE_RSCCTL_MAXDESC_16;
+	else if (maxdesc >= 8)
+		return IXGBE_RSCCTL_MAXDESC_8;
+	else if (maxdesc >= 4)
+		return IXGBE_RSCCTL_MAXDESC_4;
+	else
+		return IXGBE_RSCCTL_MAXDESC_1;
+}
+
+/**
+ * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
+ * interrupt
+ *
+ * (Taken from FreeBSD tree)
+ * (yes this is all very magic and confusing :)
+ *
+ * @dev port handle
+ * @entry the register array entry
+ * @vector the MSIX vector for this queue
+ * @type RX/TX/MISC
+ */
+static void
+ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	u32 ivar, index;
+
+	vector |= IXGBE_IVAR_ALLOC_VAL;
+
+	switch (hw->mac.type) {
+
+	case ixgbe_mac_82598EB:
+		if (type == -1)
+			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
+		else
+			entry += (type * 64);
+		index = (entry >> 2) & 0x1F;
+		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
+		ivar &= ~(0xFF << (8 * (entry & 0x3)));
+		ivar |= (vector << (8 * (entry & 0x3)));
+		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
+		break;
+
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+		if (type == -1) { /* MISC IVAR */
+			index = (entry & 1) * 8;
+			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
+			ivar &= ~(0xFF << index);
+			ivar |= (vector << index);
+			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
+		} else {	/* RX/TX IVARS */
+			index = (16 * (entry & 1)) + (8 * type);
+			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
+			ivar &= ~(0xFF << index);
+			ivar |= (vector << index);
+			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
+		}
+
+		break;
+
+	default:
+		break;
+	}
+}
+
+void ixgbe_set_rx_function(struct rte_eth_dev *dev)
+{
+	struct ixgbe_adapter *adapter =
+		(struct ixgbe_adapter *)dev->data->dev_private;
+
+	/*
+	 * In order to allow Vector Rx there are a few configuration
+	 * conditions to be met and Rx Bulk Allocation should be allowed.
+	 */
+	if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
+	    !adapter->rx_bulk_alloc_allowed) {
+		PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
+				    "preconditions or RTE_IXGBE_INC_VECTOR is "
+				    "not enabled",
+			     dev->data->port_id);
+
+		adapter->rx_vec_allowed = false;
+	}
+
+	/*
+	 * Initialize the appropriate LRO callback.
+	 *
+	 * If all queues satisfy the bulk allocation preconditions
+	 * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
+	 * Otherwise use a single allocation version.
+	 */
+	if (dev->data->lro) {
+		if (adapter->rx_bulk_alloc_allowed) {
+			PMD_INIT_LOG(INFO, "LRO is requested. Using a bulk "
+					   "allocation version");
+			dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
+		} else {
+			PMD_INIT_LOG(INFO, "LRO is requested. Using a single "
+					   "allocation version");
+			dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
+		}
+	} else if (dev->data->scattered_rx) {
+		/*
+		 * Set the non-LRO scattered callback: there are Vector and
+		 * single allocation versions.
+		 */
+		if (adapter->rx_vec_allowed) {
+			PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
+					    "callback (port=%d).",
+				     dev->data->port_id);
+
+			dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
+		} else if (adapter->rx_bulk_alloc_allowed) {
+			PMD_INIT_LOG(INFO, "Using a Scattered with bulk "
+					   "allocation callback (port=%d).",
+				     dev->data->port_id);
+			dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
+		} else {
+			PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
+					    "single allocation) "
+					    "Scattered Rx callback "
+					    "(port=%d).",
+				     dev->data->port_id);
+
+			dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
+		}
+	/*
+	 * Below we set "simple" callbacks according to port/queues parameters.
+	 * If parameters allow we are going to choose between the following
+	 * callbacks:
+	 *    - Vector
+	 *    - Bulk Allocation
+	 *    - Single buffer allocation (the simplest one)
+	 */
+	} else if (adapter->rx_vec_allowed) {
+		PMD_INIT_LOG(INFO, "Vector rx enabled, please make sure RX "
+				   "burst size no less than 32.");
+
+		dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
+	} else if (adapter->rx_bulk_alloc_allowed) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+				    "satisfied. Rx Burst Bulk Alloc function "
+				    "will be used on port=%d.",
+			     dev->data->port_id);
+
+		dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
+	} else {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
+				    "satisfied, or Scattered Rx is requested, "
+				    "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC "
+				    "is not enabled (port=%d).",
+			     dev->data->port_id);
+
+		dev->rx_pkt_burst = ixgbe_recv_pkts;
+	}
+}
+
+/**
+ * ixgbe_set_rsc - configure RSC related port HW registers
+ *
+ * Configures the port's RSC related registers according to the 4.6.7.2 chapter
+ * of 82599 Spec (x540 configuration is virtually the same).
+ *
+ * @dev port handle
+ *
+ * Returns 0 in case of success or a non-zero error code
+ */
+static int
+ixgbe_set_rsc(struct rte_eth_dev *dev)
+{
+	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct rte_eth_dev_info dev_info = { 0 };
+	bool rsc_capable = false;
+	uint16_t i;
+	uint32_t rdrxctl;
+
+	/* Sanity check */
+	dev->dev_ops->dev_infos_get(dev, &dev_info);
+	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+		rsc_capable = true;
+
+	if (!rsc_capable && rx_conf->enable_lro) {
+		PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
+				   "support it");
+		return -EINVAL;
+	}
+
+	/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
+
+	if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
+		/*
+		 * According to chapter of 4.6.7.2.1 of the Spec Rev.
+		 * 3.0 RSC configuration requires HW CRC stripping being
+		 * enabled. If user requested both HW CRC stripping off
+		 * and RSC on - return an error.
+		 */
+		PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
+				    "is disabled");
+		return -EINVAL;
+	}
+
+	/* RFCTL configuration  */
+	if (rsc_capable) {
+		uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
+		if (rx_conf->enable_lro)
+			/*
+			 * Since NFS packets coalescing is not supported - clear
+			 * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
+			 * enabled.
+			 */
+			rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
+				   IXGBE_RFCTL_NFSR_DIS);
+		else
+			rfctl |= IXGBE_RFCTL_RSC_DIS;
+
+		IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
+	}
+
+	/* If LRO hasn't been requested - we are done here. */
+	if (!rx_conf->enable_lro)
+		return 0;
+
+	/* Set RDRXCTL.RSCACKC bit */
+	rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+	rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
+	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+
+	/* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
+		uint32_t srrctl =
+			IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
+		uint32_t rscctl =
+			IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
+		uint32_t psrtype =
+			IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
+		uint32_t eitr =
+			IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
+
+		/*
+		 * ixgbe PMD doesn't support header-split at the moment.
+		 *
+		 * Following the 4.6.7.2.1 chapter of the 82599/x540
+		 * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
+		 * should be configured even if header split is not
+		 * enabled. We will configure it 128 bytes following the
+		 * recommendation in the spec.
+		 */
+		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
+		srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+					    IXGBE_SRRCTL_BSIZEHDR_MASK;
+
+		/*
+		 * TODO: Consider setting the Receive Descriptor Minimum
+		 * Threshold Size for an RSC case. This is not an obviously
+		 * beneficiary option but the one worth considering...
+		 */
+
+		rscctl |= IXGBE_RSCCTL_RSCEN;
+		rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
+		psrtype |= IXGBE_PSRTYPE_TCPHDR;
+
+		/*
+		 * RSC: Set ITR interval corresponding to 2K ints/s.
+		 *
+		 * Full-sized RSC aggregations for a 10Gb/s link will
+		 * arrive at about 20K aggregation/s rate.
+		 *
+		 * 2K inst/s rate will make only 10% of the
+		 * aggregations to be closed due to the interrupt timer
+		 * expiration for a streaming at wire-speed case.
+		 *
+		 * For a sparse streaming case this setting will yield
+		 * at most 500us latency for a single RSC aggregation.
+		 */
+		eitr &= ~IXGBE_EITR_ITR_INT_MASK;
+		eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS;
+
+		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
+		IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
+		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
+		IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
+
+		/*
+		 * RSC requires the mapping of the queue to the
+		 * interrupt vector.
+		 */
+		ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
+	}
+
+	dev->data->lro = 1;
+
+	PMD_INIT_LOG(INFO, "enabling LRO mode");
+
+	return 0;
+}
+
+/*
+ * Initializes Receive Unit.
+ */
+int
+ixgbe_dev_rx_init(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw     *hw;
+	struct ixgbe_rx_queue *rxq;
+	uint64_t bus_addr;
+	uint32_t rxctrl;
+	uint32_t fctrl;
+	uint32_t hlreg0;
+	uint32_t maxfrs;
+	uint32_t srrctl;
+	uint32_t rdrxctl;
+	uint32_t rxcsum;
+	uint16_t buf_size;
+	uint16_t i;
+	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+	int rc;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/*
+	 * Make sure receives are disabled while setting
+	 * up the RX context (registers, descriptor rings, etc.).
+	 */
+	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
+
+	/* Enable receipt of broadcasted frames */
+	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+	fctrl |= IXGBE_FCTRL_BAM;
+	fctrl |= IXGBE_FCTRL_DPF;
+	fctrl |= IXGBE_FCTRL_PMCF;
+	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+
+	/*
+	 * Configure CRC stripping, if any.
+	 */
+	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+	if (rx_conf->hw_strip_crc)
+		hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
+	else
+		hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
+
+	/*
+	 * Configure jumbo frame support, if any.
+	 */
+	if (rx_conf->jumbo_frame == 1) {
+		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+		maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
+		maxfrs &= 0x0000FFFF;
+		maxfrs |= (rx_conf->max_rx_pkt_len << 16);
+		IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
+	} else
+		hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
+
+	/*
+	 * If loopback mode is configured for 82599, set LPBK bit.
+	 */
+	if (hw->mac.type == ixgbe_mac_82599EB &&
+			dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+		hlreg0 |= IXGBE_HLREG0_LPBK;
+	else
+		hlreg0 &= ~IXGBE_HLREG0_LPBK;
+
+	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+	/* Setup RX queues */
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+
+		/*
+		 * Reset crc_len in case it was changed after queue setup by a
+		 * call to configure.
+		 */
+		rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
+
+		/* Setup the Base and Length of the Rx Descriptor Rings */
+		bus_addr = rxq->rx_ring_phys_addr;
+		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
+				(uint32_t)(bus_addr & 0x00000000ffffffffULL));
+		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
+				(uint32_t)(bus_addr >> 32));
+		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
+				rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
+		IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
+
+		/* Configure the SRRCTL register */
+#ifdef RTE_HEADER_SPLIT_ENABLE
+		/*
+		 * Configure Header Split
+		 */
+		if (rx_conf->header_split) {
+			if (hw->mac.type == ixgbe_mac_82599EB) {
+				/* Must setup the PSRTYPE register */
+				uint32_t psrtype;
+				psrtype = IXGBE_PSRTYPE_TCPHDR |
+					IXGBE_PSRTYPE_UDPHDR   |
+					IXGBE_PSRTYPE_IPV4HDR  |
+					IXGBE_PSRTYPE_IPV6HDR;
+				IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
+			}
+			srrctl = ((rx_conf->split_hdr_size <<
+				IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+				IXGBE_SRRCTL_BSIZEHDR_MASK);
+			srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+		} else
+#endif
+			srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+		/* Set if packets are dropped when no descriptors available */
+		if (rxq->drop_en)
+			srrctl |= IXGBE_SRRCTL_DROP_EN;
+
+		/*
+		 * Configure the RX buffer size in the BSIZEPACKET field of
+		 * the SRRCTL register of the queue.
+		 * The value is in 1 KB resolution. Valid values can be from
+		 * 1 KB to 16 KB.
+		 */
+		buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+			RTE_PKTMBUF_HEADROOM);
+		srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
+			   IXGBE_SRRCTL_BSIZEPKT_MASK);
+
+		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
+
+		buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
+				       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
+
+		/* It adds dual VLAN length for supporting dual VLAN */
+		if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+					    2 * IXGBE_VLAN_TAG_SIZE > buf_size)
+			dev->data->scattered_rx = 1;
+	}
+
+	if (rx_conf->enable_scatter)
+		dev->data->scattered_rx = 1;
+
+	/*
+	 * Device configured with multiple RX queues.
+	 */
+	ixgbe_dev_mq_rx_configure(dev);
+
+	/*
+	 * Setup the Checksum Register.
+	 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
+	 * Enable IP/L4 checkum computation by hardware if requested to do so.
+	 */
+	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+	rxcsum |= IXGBE_RXCSUM_PCSD;
+	if (rx_conf->hw_ip_checksum)
+		rxcsum |= IXGBE_RXCSUM_IPPCSE;
+	else
+		rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
+
+	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
+
+	if (hw->mac.type == ixgbe_mac_82599EB ||
+	    hw->mac.type == ixgbe_mac_X540) {
+		rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+		if (rx_conf->hw_strip_crc)
+			rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
+		else
+			rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
+		rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
+		IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+	}
+
+	rc = ixgbe_set_rsc(dev);
+	if (rc)
+		return rc;
+
+	ixgbe_set_rx_function(dev);
+
+	return 0;
+}
+
+/*
+ * Initializes Transmit Unit.
+ */
+void
+ixgbe_dev_tx_init(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw     *hw;
+	struct ixgbe_tx_queue *txq;
+	uint64_t bus_addr;
+	uint32_t hlreg0;
+	uint32_t txctrl;
+	uint16_t i;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Enable TX CRC (checksum offload requirement) and hw padding
+	 * (TSO requirement) */
+	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+	hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
+	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+	/* Setup the Base and Length of the Tx Descriptor Rings */
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+
+		bus_addr = txq->tx_ring_phys_addr;
+		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
+				(uint32_t)(bus_addr & 0x00000000ffffffffULL));
+		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
+				(uint32_t)(bus_addr >> 32));
+		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
+				txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
+		/* Setup the HW Tx Head and TX Tail descriptor pointers */
+		IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
+
+		/*
+		 * Disable Tx Head Writeback RO bit, since this hoses
+		 * bookkeeping if things aren't delivered in order.
+		 */
+		switch (hw->mac.type) {
+			case ixgbe_mac_82598EB:
+				txctrl = IXGBE_READ_REG(hw,
+							IXGBE_DCA_TXCTRL(txq->reg_idx));
+				txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+				IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
+						txctrl);
+				break;
+
+			case ixgbe_mac_82599EB:
+			case ixgbe_mac_X540:
+			case ixgbe_mac_X550:
+			case ixgbe_mac_X550EM_x:
+			default:
+				txctrl = IXGBE_READ_REG(hw,
+						IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
+				txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+				IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
+						txctrl);
+				break;
+		}
+	}
+
+	/* Device configured with multiple TX queues. */
+	ixgbe_dev_mq_tx_configure(dev);
+}
+
+/*
+ * Set up link for 82599 loopback mode Tx->Rx.
+ */
+static inline void
+ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+		if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
+				IXGBE_SUCCESS) {
+			PMD_INIT_LOG(ERR, "Could not enable loopback mode");
+			/* ignore error */
+			return;
+		}
+	}
+
+	/* Restart link */
+	IXGBE_WRITE_REG(hw,
+			IXGBE_AUTOC,
+			IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
+	ixgbe_reset_pipeline_82599(hw);
+
+	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+	msec_delay(50);
+}
+
+
+/*
+ * Start Transmit and Receive Units.
+ */
+int
+ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw     *hw;
+	struct ixgbe_tx_queue *txq;
+	struct ixgbe_rx_queue *rxq;
+	uint32_t txdctl;
+	uint32_t dmatxctl;
+	uint32_t rxctrl;
+	uint16_t i;
+	int ret = 0;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		/* Setup Transmit Threshold Registers */
+		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+		txdctl |= txq->pthresh & 0x7F;
+		txdctl |= ((txq->hthresh & 0x7F) << 8);
+		txdctl |= ((txq->wthresh & 0x7F) << 16);
+		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+	}
+
+	if (hw->mac.type != ixgbe_mac_82598EB) {
+		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+		dmatxctl |= IXGBE_DMATXCTL_TE;
+		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
+	}
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		if (!txq->tx_deferred_start) {
+			ret = ixgbe_dev_tx_queue_start(dev, i);
+			if (ret < 0)
+				return ret;
+		}
+	}
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+		if (!rxq->rx_deferred_start) {
+			ret = ixgbe_dev_rx_queue_start(dev, i);
+			if (ret < 0)
+				return ret;
+		}
+	}
+
+	/* Enable Receive engine */
+	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+	if (hw->mac.type == ixgbe_mac_82598EB)
+		rxctrl |= IXGBE_RXCTRL_DMBYPS;
+	rxctrl |= IXGBE_RXCTRL_RXEN;
+	hw->mac.ops.enable_rx_dma(hw, rxctrl);
+
+	/* If loopback mode is enabled for 82599, set up the link accordingly */
+	if (hw->mac.type == ixgbe_mac_82599EB &&
+			dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+		ixgbe_setup_loopback_link_82599(hw);
+
+	return 0;
+}
+
+/*
+ * Start Receive Units for specified queue.
+ */
+int
+ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+	struct ixgbe_hw     *hw;
+	struct ixgbe_rx_queue *rxq;
+	uint32_t rxdctl;
+	int poll_ms;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (rx_queue_id < dev->data->nb_rx_queues) {
+		rxq = dev->data->rx_queues[rx_queue_id];
+
+		/* Allocate buffers for descriptor rings */
+		if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
+			PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
+				     rx_queue_id);
+			return -1;
+		}
+		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+		rxdctl |= IXGBE_RXDCTL_ENABLE;
+		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+
+		/* Wait until RX Enable ready */
+		poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+		do {
+			rte_delay_ms(1);
+			rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+		} while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+		if (!poll_ms)
+			PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d",
+				     rx_queue_id);
+		rte_wmb();
+		IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
+	} else
+		return -1;
+
+	return 0;
+}
+
+/*
+ * Stop Receive Units for specified queue.
+ */
+int
+ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+	struct ixgbe_hw     *hw;
+	struct ixgbe_adapter *adapter =
+		(struct ixgbe_adapter *)dev->data->dev_private;
+	struct ixgbe_rx_queue *rxq;
+	uint32_t rxdctl;
+	int poll_ms;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (rx_queue_id < dev->data->nb_rx_queues) {
+		rxq = dev->data->rx_queues[rx_queue_id];
+
+		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+
+		/* Wait until RX Enable ready */
+		poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+		do {
+			rte_delay_ms(1);
+			rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+		} while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
+		if (!poll_ms)
+			PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
+				     rx_queue_id);
+
+		rte_delay_us(RTE_IXGBE_WAIT_100_US);
+
+		ixgbe_rx_queue_release_mbufs(rxq);
+		ixgbe_reset_rx_queue(adapter, rxq);
+	} else
+		return -1;
+
+	return 0;
+}
+
+
+/*
+ * Start Transmit Units for specified queue.
+ */
+int
+ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	struct ixgbe_hw     *hw;
+	struct ixgbe_tx_queue *txq;
+	uint32_t txdctl;
+	int poll_ms;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (tx_queue_id < dev->data->nb_tx_queues) {
+		txq = dev->data->tx_queues[tx_queue_id];
+		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+		txdctl |= IXGBE_TXDCTL_ENABLE;
+		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+
+		/* Wait until TX Enable ready */
+		if (hw->mac.type == ixgbe_mac_82599EB) {
+			poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+			do {
+				rte_delay_ms(1);
+				txdctl = IXGBE_READ_REG(hw,
+					IXGBE_TXDCTL(txq->reg_idx));
+			} while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
+			if (!poll_ms)
+				PMD_INIT_LOG(ERR, "Could not enable "
+					     "Tx Queue %d", tx_queue_id);
+		}
+		rte_wmb();
+		IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
+	} else
+		return -1;
+
+	return 0;
+}
+
+/*
+ * Stop Transmit Units for specified queue.
+ */
+int
+ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	struct ixgbe_hw     *hw;
+	struct ixgbe_tx_queue *txq;
+	uint32_t txdctl;
+	uint32_t txtdh, txtdt;
+	int poll_ms;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (tx_queue_id < dev->data->nb_tx_queues) {
+		txq = dev->data->tx_queues[tx_queue_id];
+
+		/* Wait until TX queue is empty */
+		if (hw->mac.type == ixgbe_mac_82599EB) {
+			poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+			do {
+				rte_delay_us(RTE_IXGBE_WAIT_100_US);
+				txtdh = IXGBE_READ_REG(hw,
+						IXGBE_TDH(txq->reg_idx));
+				txtdt = IXGBE_READ_REG(hw,
+						IXGBE_TDT(txq->reg_idx));
+			} while (--poll_ms && (txtdh != txtdt));
+			if (!poll_ms)
+				PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
+					     "when stopping.", tx_queue_id);
+		}
+
+		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+		txdctl &= ~IXGBE_TXDCTL_ENABLE;
+		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+
+		/* Wait until TX Enable ready */
+		if (hw->mac.type == ixgbe_mac_82599EB) {
+			poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+			do {
+				rte_delay_ms(1);
+				txdctl = IXGBE_READ_REG(hw,
+						IXGBE_TXDCTL(txq->reg_idx));
+			} while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
+			if (!poll_ms)
+				PMD_INIT_LOG(ERR, "Could not disable "
+					     "Tx Queue %d", tx_queue_id);
+		}
+
+		if (txq->ops != NULL) {
+			txq->ops->release_mbufs(txq);
+			txq->ops->reset(txq);
+		}
+	} else
+		return -1;
+
+	return 0;
+}
+
+/*
+ * [VF] Initializes Receive Unit.
+ */
+int
+ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw     *hw;
+	struct ixgbe_rx_queue *rxq;
+	uint64_t bus_addr;
+	uint32_t srrctl, psrtype = 0;
+	uint16_t buf_size;
+	uint16_t i;
+	int ret;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
+		PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
+			"it should be power of 2");
+		return -1;
+	}
+
+	if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
+		PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
+			"it should be equal to or less than %d",
+			hw->mac.max_rx_queues);
+		return -1;
+	}
+
+	/*
+	 * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
+	 * disables the VF receipt of packets if the PF MTU is > 1500.
+	 * This is done to deal with 82599 limitations that imposes
+	 * the PF and all VFs to share the same MTU.
+	 * Then, the PF driver enables again the VF receipt of packet when
+	 * the VF driver issues a IXGBE_VF_SET_LPE request.
+	 * In the meantime, the VF device cannot be used, even if the VF driver
+	 * and the Guest VM network stack are ready to accept packets with a
+	 * size up to the PF MTU.
+	 * As a work-around to this PF behaviour, force the call to
+	 * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
+	 * VF packets received can work in all cases.
+	 */
+	ixgbevf_rlpml_set_vf(hw,
+		(uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
+
+	/* Setup RX queues */
+	dev->rx_pkt_burst = ixgbe_recv_pkts;
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+
+		/* Allocate buffers for descriptor rings */
+		ret = ixgbe_alloc_rx_queue_mbufs(rxq);
+		if (ret)
+			return ret;
+
+		/* Setup the Base and Length of the Rx Descriptor Rings */
+		bus_addr = rxq->rx_ring_phys_addr;
+
+		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
+				(uint32_t)(bus_addr & 0x00000000ffffffffULL));
+		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
+				(uint32_t)(bus_addr >> 32));
+		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
+				rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
+		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
+
+
+		/* Configure the SRRCTL register */
+#ifdef RTE_HEADER_SPLIT_ENABLE
+		/*
+		 * Configure Header Split
+		 */
+		if (dev->data->dev_conf.rxmode.header_split) {
+			srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
+				IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+				IXGBE_SRRCTL_BSIZEHDR_MASK);
+			srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+		} else
+#endif
+			srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+		/* Set if packets are dropped when no descriptors available */
+		if (rxq->drop_en)
+			srrctl |= IXGBE_SRRCTL_DROP_EN;
+
+		/*
+		 * Configure the RX buffer size in the BSIZEPACKET field of
+		 * the SRRCTL register of the queue.
+		 * The value is in 1 KB resolution. Valid values can be from
+		 * 1 KB to 16 KB.
+		 */
+		buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+			RTE_PKTMBUF_HEADROOM);
+		srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
+			   IXGBE_SRRCTL_BSIZEPKT_MASK);
+
+		/*
+		 * VF modification to write virtual function SRRCTL register
+		 */
+		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
+
+		buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
+				       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
+
+		if (dev->data->dev_conf.rxmode.enable_scatter ||
+		    /* It adds dual VLAN length for supporting dual VLAN */
+		    (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+				2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
+			if (!dev->data->scattered_rx)
+				PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+			dev->data->scattered_rx = 1;
+#ifdef RTE_IXGBE_INC_VECTOR
+			if (rte_is_power_of_2(rxq->nb_rx_desc))
+				dev->rx_pkt_burst =
+					ixgbe_recv_scattered_pkts_vec;
+			else
+#endif
+				dev->rx_pkt_burst =
+					ixgbe_recv_pkts_lro_single_alloc;
+		}
+	}
+
+#ifdef RTE_HEADER_SPLIT_ENABLE
+	if (dev->data->dev_conf.rxmode.header_split)
+		/* Must setup the PSRTYPE register */
+		psrtype = IXGBE_PSRTYPE_TCPHDR |
+			IXGBE_PSRTYPE_UDPHDR   |
+			IXGBE_PSRTYPE_IPV4HDR  |
+			IXGBE_PSRTYPE_IPV6HDR;
+#endif
+
+	/* Set RQPL for VF RSS according to max Rx queue */
+	psrtype |= (dev->data->nb_rx_queues >> 1) <<
+		IXGBE_PSRTYPE_RQPL_SHIFT;
+	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
+
+	return 0;
+}
+
+/*
+ * [VF] Initializes Transmit Unit.
+ */
+void
+ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw     *hw;
+	struct ixgbe_tx_queue *txq;
+	uint64_t bus_addr;
+	uint32_t txctrl;
+	uint16_t i;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Setup the Base and Length of the Tx Descriptor Rings */
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		bus_addr = txq->tx_ring_phys_addr;
+		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
+				(uint32_t)(bus_addr & 0x00000000ffffffffULL));
+		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
+				(uint32_t)(bus_addr >> 32));
+		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
+				txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
+		/* Setup the HW Tx Head and TX Tail descriptor pointers */
+		IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
+
+		/*
+		 * Disable Tx Head Writeback RO bit, since this hoses
+		 * bookkeeping if things aren't delivered in order.
+		 */
+		txctrl = IXGBE_READ_REG(hw,
+				IXGBE_VFDCA_TXCTRL(i));
+		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
+				txctrl);
+	}
+}
+
+/*
+ * [VF] Start Transmit and Receive Units.
+ */
+void
+ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw     *hw;
+	struct ixgbe_tx_queue *txq;
+	struct ixgbe_rx_queue *rxq;
+	uint32_t txdctl;
+	uint32_t rxdctl;
+	uint16_t i;
+	int poll_ms;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		/* Setup Transmit Threshold Registers */
+		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+		txdctl |= txq->pthresh & 0x7F;
+		txdctl |= ((txq->hthresh & 0x7F) << 8);
+		txdctl |= ((txq->wthresh & 0x7F) << 16);
+		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
+	}
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+
+		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+		txdctl |= IXGBE_TXDCTL_ENABLE;
+		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
+
+		poll_ms = 10;
+		/* Wait until TX Enable ready */
+		do {
+			rte_delay_ms(1);
+			txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+		} while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
+		if (!poll_ms)
+			PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
+	}
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+
+		rxq = dev->data->rx_queues[i];
+
+		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+		rxdctl |= IXGBE_RXDCTL_ENABLE;
+		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
+
+		/* Wait until RX Enable ready */
+		poll_ms = 10;
+		do {
+			rte_delay_ms(1);
+			rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+		} while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+		if (!poll_ms)
+			PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
+		rte_wmb();
+		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
+
+	}
+}
+
+/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
+int __attribute__((weak))
+ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
+{
+	return -1;
+}
+
+uint16_t __attribute__((weak))
+ixgbe_recv_pkts_vec(
+	void __rte_unused *rx_queue,
+	struct rte_mbuf __rte_unused **rx_pkts,
+	uint16_t __rte_unused nb_pkts)
+{
+	return 0;
+}
+
+uint16_t __attribute__((weak))
+ixgbe_recv_scattered_pkts_vec(
+	void __rte_unused *rx_queue,
+	struct rte_mbuf __rte_unused **rx_pkts,
+	uint16_t __rte_unused nb_pkts)
+{
+	return 0;
+}
+
+int __attribute__((weak))
+ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
+{
+	return -1;
+}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
new file mode 100644
index 0000000..af36438
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -0,0 +1,293 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IXGBE_RXTX_H_
+#define _IXGBE_RXTX_H_
+
+
+#define RTE_PMD_IXGBE_TX_MAX_BURST 32
+#define RTE_PMD_IXGBE_RX_MAX_BURST 32
+
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+#define RTE_IXGBE_DESCS_PER_LOOP           4
+#elif defined(RTE_IXGBE_INC_VECTOR)
+#define RTE_IXGBE_DESCS_PER_LOOP           4
+#else
+#define RTE_IXGBE_DESCS_PER_LOOP           1
+#endif
+
+#define RTE_MBUF_DATA_DMA_ADDR(mb) \
+	(uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
+
+#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
+	(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
+
+#ifdef RTE_IXGBE_INC_VECTOR
+#define RTE_IXGBE_VPMD_RX_BURST         32
+#define RTE_IXGBE_VPMD_TX_BURST         32
+#define RTE_IXGBE_RXQ_REARM_THRESH      RTE_IXGBE_VPMD_RX_BURST
+#define RTE_IXGBE_TX_MAX_FREE_BUF_SZ    64
+#endif
+
+#define RX_RING_SZ ((IXGBE_MAX_RING_DESC + RTE_IXGBE_DESCS_PER_LOOP - 1) * \
+		    sizeof(union ixgbe_adv_rx_desc))
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p)  rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p)  do {} while(0)
+#endif
+
+#define RTE_IXGBE_REGISTER_POLL_WAIT_10_MS  10
+#define RTE_IXGBE_WAIT_100_US               100
+#define RTE_IXGBE_VMTXSW_REGISTER_COUNT     2
+
+/**
+ * Structure associated with each descriptor of the RX ring of a RX queue.
+ */
+struct ixgbe_rx_entry {
+	struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
+};
+
+struct ixgbe_scattered_rx_entry {
+	struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct ixgbe_tx_entry {
+	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+	uint16_t next_id; /**< Index of next descriptor in ring. */
+	uint16_t last_id; /**< Index of last scattered descriptor. */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct ixgbe_tx_entry_v {
+	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+};
+
+/**
+ * Structure associated with each RX queue.
+ */
+struct ixgbe_rx_queue {
+	struct rte_mempool  *mb_pool; /**< mbuf pool to populate RX ring. */
+	volatile union ixgbe_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
+	uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
+	volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
+	volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
+	struct ixgbe_rx_entry *sw_ring; /**< address of RX software ring. */
+	struct ixgbe_scattered_rx_entry *sw_sc_ring; /**< address of scattered Rx software ring. */
+	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+	struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
+	uint64_t            mbuf_initializer; /**< value to init mbufs */
+	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
+	uint16_t            rx_tail;  /**< current value of RDT register. */
+	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+	uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
+	uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
+	uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
+#endif
+#ifdef RTE_IXGBE_INC_VECTOR
+	uint16_t            rxrearm_nb; /**< the idx we start the re-arming from */
+	uint16_t            rxrearm_start;  /**< number of remaining to be re-armed */
+#endif
+	uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
+	uint16_t            queue_id; /**< RX queue index. */
+	uint16_t            reg_idx;  /**< RX queue register index. */
+	uint8_t             port_id;  /**< Device port identifier. */
+	uint8_t             crc_len;  /**< 0 if CRC stripped, 4 otherwise. */
+	uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
+	uint8_t             rx_deferred_start; /**< not in global dev start. */
+#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
+	struct rte_mbuf fake_mbuf;
+	/** hold packets to return to application */
+	struct rte_mbuf *rx_stage[RTE_PMD_IXGBE_RX_MAX_BURST*2];
+#endif
+};
+
+/**
+ * IXGBE CTX Constants
+ */
+enum ixgbe_advctx_num {
+	IXGBE_CTX_0    = 0, /**< CTX0 */
+	IXGBE_CTX_1    = 1, /**< CTX1  */
+	IXGBE_CTX_NUM  = 2, /**< CTX NUMBER  */
+};
+
+/** Offload features */
+union ixgbe_tx_offload {
+	uint64_t data;
+	struct {
+		uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
+		uint64_t l3_len:9; /**< L3 (IP) Header Length. */
+		uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
+		uint64_t tso_segsz:16; /**< TCP TSO segment size */
+		uint64_t vlan_tci:16;
+		/**< VLAN Tag Control Identifier (CPU order). */
+	};
+};
+
+/*
+ * Compare mask for vlan_macip_len.data,
+ * should be in sync with ixgbe_vlan_macip.f layout.
+ * */
+#define TX_VLAN_CMP_MASK        0xFFFF0000  /**< VLAN length - 16-bits. */
+#define TX_MAC_LEN_CMP_MASK     0x0000FE00  /**< MAC length - 7-bits. */
+#define TX_IP_LEN_CMP_MASK      0x000001FF  /**< IP  length - 9-bits. */
+/** MAC+IP  length. */
+#define TX_MACIP_LEN_CMP_MASK   (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
+
+/**
+ * Structure to check if new context need be built
+ */
+
+struct ixgbe_advctx_info {
+	uint64_t flags;           /**< ol_flags for context build. */
+	/**< tx offload: vlan, tso, l2-l3-l4 lengths. */
+	union ixgbe_tx_offload tx_offload;
+	/** compare mask for tx offload. */
+	union ixgbe_tx_offload tx_offload_mask;
+};
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct ixgbe_tx_queue {
+	/** TX ring virtual address. */
+	volatile union ixgbe_adv_tx_desc *tx_ring;
+	uint64_t            tx_ring_phys_addr; /**< TX ring DMA address. */
+	struct ixgbe_tx_entry *sw_ring;      /**< virtual address of SW ring. */
+	volatile uint32_t   *tdt_reg_addr; /**< Address of TDT register. */
+	uint16_t            nb_tx_desc;    /**< number of TX descriptors. */
+	uint16_t            tx_tail;       /**< current value of TDT reg. */
+	uint16_t            tx_free_thresh;/**< minimum TX before freeing. */
+	/** Number of TX descriptors to use before RS bit is set. */
+	uint16_t            tx_rs_thresh;
+	/** Number of TX descriptors used since RS bit was set. */
+	uint16_t            nb_tx_used;
+	/** Index to last TX descriptor to have been cleaned. */
+	uint16_t            last_desc_cleaned;
+	/** Total number of TX descriptors ready to be allocated. */
+	uint16_t            nb_tx_free;
+	uint16_t tx_next_dd; /**< next desc to scan for DD bit */
+	uint16_t tx_next_rs; /**< next desc to set RS bit */
+	uint16_t            queue_id;      /**< TX queue index. */
+	uint16_t            reg_idx;       /**< TX queue register index. */
+	uint8_t             port_id;       /**< Device port identifier. */
+	uint8_t             pthresh;       /**< Prefetch threshold register. */
+	uint8_t             hthresh;       /**< Host threshold register. */
+	uint8_t             wthresh;       /**< Write-back threshold reg. */
+	uint32_t txq_flags; /**< Holds flags for this TXq */
+	uint32_t            ctx_curr;      /**< Hardware context states. */
+	/** Hardware context0 history. */
+	struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
+	const struct ixgbe_txq_ops *ops;       /**< txq ops */
+	uint8_t             tx_deferred_start; /**< not in global dev start. */
+};
+
+struct ixgbe_txq_ops {
+	void (*release_mbufs)(struct ixgbe_tx_queue *txq);
+	void (*free_swring)(struct ixgbe_tx_queue *txq);
+	void (*reset)(struct ixgbe_tx_queue *txq);
+};
+
+/*
+ * The "simple" TX queue functions require that the following
+ * flags are set when the TX queue is configured:
+ *  - ETH_TXQ_FLAGS_NOMULTSEGS
+ *  - ETH_TXQ_FLAGS_NOVLANOFFL
+ *  - ETH_TXQ_FLAGS_NOXSUMSCTP
+ *  - ETH_TXQ_FLAGS_NOXSUMUDP
+ *  - ETH_TXQ_FLAGS_NOXSUMTCP
+ * and that the RS bit threshold (tx_rs_thresh) is at least equal to
+ * RTE_PMD_IXGBE_TX_MAX_BURST.
+ */
+#define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
+			    ETH_TXQ_FLAGS_NOOFFLOADS)
+
+/*
+ * Populate descriptors with the following info:
+ * 1.) buffer_addr = phys_addr + headroom
+ * 2.) cmd_type_len = DCMD_DTYP_FLAGS | pkt_len
+ * 3.) olinfo_status = pkt_len << PAYLEN_SHIFT
+ */
+
+/* Defines for Tx descriptor */
+#define DCMD_DTYP_FLAGS (IXGBE_ADVTXD_DTYP_DATA |\
+			 IXGBE_ADVTXD_DCMD_IFCS |\
+			 IXGBE_ADVTXD_DCMD_DEXT |\
+			 IXGBE_ADVTXD_DCMD_EOP)
+
+
+/* Takes an ethdev and a queue and sets up the tx function to be used based on
+ * the queue parameters. Used in tx_queue_setup by primary process and then
+ * in dev_init by secondary process when attaching to an existing ethdev.
+ */
+void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq);
+
+/**
+ * Sets the rx_pkt_burst callback in the ixgbe rte_eth_dev instance.
+ *
+ * Sets the callback based on the device parameters:
+ *  - ixgbe_hw.rx_bulk_alloc_allowed
+ *  - rte_eth_dev_data.scattered_rx
+ *  - rte_eth_dev_data.lro
+ *  - conditions checked in ixgbe_rx_vec_condition_check()
+ *
+ *  This means that the parameters above have to be configured prior to calling
+ *  to this function.
+ *
+ * @dev rte_eth_dev handle
+ */
+void ixgbe_set_rx_function(struct rte_eth_dev *dev);
+
+uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts);
+uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
+int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq);
+
+#ifdef RTE_IXGBE_INC_VECTOR
+
+uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+		uint16_t nb_pkts);
+int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
+
+#endif /* RTE_IXGBE_INC_VECTOR */
+#endif /* _IXGBE_RXTX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec.c b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
new file mode 100644
index 0000000..abd10f6
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
@@ -0,0 +1,792 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "ixgbe_ethdev.h"
+#include "ixgbe_rxtx.h"
+
+#include <tmmintrin.h>
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+static inline void
+ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
+{
+	int i;
+	uint16_t rx_id;
+	volatile union ixgbe_adv_rx_desc *rxdp;
+	struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+	struct rte_mbuf *mb0, *mb1;
+	__m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
+			RTE_PKTMBUF_HEADROOM);
+	__m128i dma_addr0, dma_addr1;
+
+	rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+	/* Pull 'n' more MBUFs into the software ring */
+	if (rte_mempool_get_bulk(rxq->mb_pool,
+				 (void *)rxep,
+				 RTE_IXGBE_RXQ_REARM_THRESH) < 0) {
+		if (rxq->rxrearm_nb + RTE_IXGBE_RXQ_REARM_THRESH >=
+		    rxq->nb_rx_desc) {
+			dma_addr0 = _mm_setzero_si128();
+			for (i = 0; i < RTE_IXGBE_DESCS_PER_LOOP; i++) {
+				rxep[i].mbuf = &rxq->fake_mbuf;
+				_mm_store_si128((__m128i *)&rxdp[i].read,
+						dma_addr0);
+			}
+		}
+		rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+			RTE_IXGBE_RXQ_REARM_THRESH;
+		return;
+	}
+
+	/* Initialize the mbufs in vector, process 2 mbufs in one loop */
+	for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+		__m128i vaddr0, vaddr1;
+		uintptr_t p0, p1;
+
+		mb0 = rxep[0].mbuf;
+		mb1 = rxep[1].mbuf;
+
+		/*
+		 * Flush mbuf with pkt template.
+		 * Data to be rearmed is 6 bytes long.
+		 * Though, RX will overwrite ol_flags that are coming next
+		 * anyway. So overwrite whole 8 bytes with one load:
+		 * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
+		 */
+		p0 = (uintptr_t)&mb0->rearm_data;
+		*(uint64_t *)p0 = rxq->mbuf_initializer;
+		p1 = (uintptr_t)&mb1->rearm_data;
+		*(uint64_t *)p1 = rxq->mbuf_initializer;
+
+		/* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+		vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr));
+		vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr));
+
+		/* convert pa to dma_addr hdr/data */
+		dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
+		dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
+
+		/* add headroom to pa values */
+		dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
+		dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
+
+		/* flush desc with pa dma_addr */
+		_mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
+		_mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
+	}
+
+	rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH;
+	if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+		rxq->rxrearm_start = 0;
+
+	rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH;
+
+	rx_id = (uint16_t) ((rxq->rxrearm_start == 0) ?
+			     (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+	/* Update the tail pointer on the NIC */
+	IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+}
+
+/* Handling the offload flags (olflags) field takes computation
+ * time when receiving packets. Therefore we provide a flag to disable
+ * the processing of the olflags field when they are not needed. This
+ * gives improved performance, at the cost of losing the offload info
+ * in the received packet
+ */
+#ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
+
+#define OLFLAGS_MASK     ((uint16_t)(PKT_RX_VLAN_PKT | PKT_RX_IPV4_HDR |\
+				     PKT_RX_IPV4_HDR_EXT | PKT_RX_IPV6_HDR |\
+				     PKT_RX_IPV6_HDR_EXT))
+#define OLFLAGS_MASK_V   (((uint64_t)OLFLAGS_MASK << 48) | \
+			  ((uint64_t)OLFLAGS_MASK << 32) | \
+			  ((uint64_t)OLFLAGS_MASK << 16) | \
+			  ((uint64_t)OLFLAGS_MASK))
+#define PTYPE_SHIFT    (1)
+#define VTAG_SHIFT     (3)
+
+static inline void
+desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+{
+	__m128i ptype0, ptype1, vtag0, vtag1;
+	union {
+		uint16_t e[4];
+		uint64_t dword;
+	} vol;
+
+	ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
+	ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
+	vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
+	vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]);
+
+	ptype1 = _mm_unpacklo_epi32(ptype0, ptype1);
+	vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
+
+	ptype1 = _mm_slli_epi16(ptype1, PTYPE_SHIFT);
+	vtag1 = _mm_srli_epi16(vtag1, VTAG_SHIFT);
+
+	ptype1 = _mm_or_si128(ptype1, vtag1);
+	vol.dword = _mm_cvtsi128_si64(ptype1) & OLFLAGS_MASK_V;
+
+	rx_pkts[0]->ol_flags = vol.e[0];
+	rx_pkts[1]->ol_flags = vol.e[1];
+	rx_pkts[2]->ol_flags = vol.e[2];
+	rx_pkts[3]->ol_flags = vol.e[3];
+}
+#else
+#define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
+#endif
+
+/*
+ * vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
+ * in one loop
+ *
+ * Notice:
+ * - nb_pkts < RTE_IXGBE_VPMD_RX_BURST, just return no packet
+ * - nb_pkts > RTE_IXGBE_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST
+ *   numbers of DD bit
+ * - don't support ol_flags for rss and csum err
+ */
+static inline uint16_t
+_recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts, uint8_t *split_packet)
+{
+	volatile union ixgbe_adv_rx_desc *rxdp;
+	struct ixgbe_rx_entry *sw_ring;
+	uint16_t nb_pkts_recd;
+	int pos;
+	uint64_t var;
+	__m128i shuf_msk;
+	__m128i crc_adjust = _mm_set_epi16(
+				0, 0, 0, 0, /* ignore non-length fields */
+				0,          /* ignore high-16bits of pkt_len */
+				-rxq->crc_len, /* sub crc on pkt_len */
+				-rxq->crc_len, /* sub crc on data_len */
+				0            /* ignore pkt_type field */
+			);
+	__m128i dd_check, eop_check;
+
+	if (unlikely(nb_pkts < RTE_IXGBE_VPMD_RX_BURST))
+		return 0;
+
+	/* Just the act of getting into the function from the application is
+	 * going to cost about 7 cycles */
+	rxdp = rxq->rx_ring + rxq->rx_tail;
+
+	_mm_prefetch((const void *)rxdp, _MM_HINT_T0);
+
+	/* See if we need to rearm the RX queue - gives the prefetch a bit
+	 * of time to act */
+	if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
+		ixgbe_rxq_rearm(rxq);
+
+	/* Before we start moving massive data around, check to see if
+	 * there is actually a packet available */
+	if (!(rxdp->wb.upper.status_error &
+				rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+		return 0;
+
+	/* 4 packets DD mask */
+	dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
+
+	/* 4 packets EOP mask */
+	eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
+
+	/* mask to shuffle from desc. to mbuf */
+	shuf_msk = _mm_set_epi8(
+		7, 6, 5, 4,  /* octet 4~7, 32bits rss */
+		0xFF, 0xFF,  /* skip high 16 bits vlan_macip, zero out */
+		15, 14,      /* octet 14~15, low 16 bits vlan_macip */
+		0xFF, 0xFF,  /* skip high 16 bits pkt_len, zero out */
+		13, 12,      /* octet 12~13, low 16 bits pkt_len */
+		13, 12,      /* octet 12~13, 16 bits data_len */
+		0xFF, 0xFF   /* skip pkt_type field */
+		);
+
+	/* Cache is empty -> need to scan the buffer rings, but first move
+	 * the next 'n' mbufs into the cache */
+	sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+	/*
+	 * A. load 4 packet in one loop
+	 * B. copy 4 mbuf point from swring to rx_pkts
+	 * C. calc the number of DD bits among the 4 packets
+	 * [C*. extract the end-of-packet bit, if requested]
+	 * D. fill info. from desc to mbuf
+	 */
+	for (pos = 0, nb_pkts_recd = 0; pos < RTE_IXGBE_VPMD_RX_BURST;
+			pos += RTE_IXGBE_DESCS_PER_LOOP,
+			rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
+		__m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
+		__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+		__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
+		__m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+
+		if (split_packet) {
+			rte_prefetch0(&rx_pkts[pos]->cacheline1);
+			rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
+			rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
+			rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
+		}
+
+		/* B.1 load 1 mbuf point */
+		mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
+
+		/* Read desc statuses backwards to avoid race condition */
+		/* A.1 load 4 pkts desc */
+		descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
+
+		/* B.2 copy 2 mbuf point into rx_pkts  */
+		_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
+
+		/* B.1 load 1 mbuf point */
+		mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
+
+		descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+		/* B.1 load 2 mbuf point */
+		descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+		descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+
+		/* B.2 copy 2 mbuf point into rx_pkts  */
+		_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+
+		/* avoid compiler reorder optimization */
+		rte_compiler_barrier();
+
+		/* D.1 pkt 3,4 convert format from desc to pktmbuf */
+		pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
+		pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
+
+		/* C.1 4=>2 filter staterr info only */
+		sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
+		/* C.1 4=>2 filter staterr info only */
+		sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
+
+		/* set ol_flags with packet type and vlan tag */
+		desc_to_olflags_v(descs, &rx_pkts[pos]);
+
+		/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+		pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
+		pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
+
+		/* D.1 pkt 1,2 convert format from desc to pktmbuf */
+		pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
+		pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
+
+		/* C.2 get 4 pkts staterr value  */
+		zero = _mm_xor_si128(dd_check, dd_check);
+		staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
+
+		/* D.3 copy final 3,4 data to rx_pkts */
+		_mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
+				pkt_mb4);
+		_mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
+				pkt_mb3);
+
+		/* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+		pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
+		pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
+
+		/* C* extract and record EOP bit */
+		if (split_packet) {
+			__m128i eop_shuf_mask = _mm_set_epi8(
+					0xFF, 0xFF, 0xFF, 0xFF,
+					0xFF, 0xFF, 0xFF, 0xFF,
+					0xFF, 0xFF, 0xFF, 0xFF,
+					0x04, 0x0C, 0x00, 0x08
+					);
+
+			/* and with mask to extract bits, flipping 1-0 */
+			__m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
+			/* the staterr values are not in order, as the count
+			 * count of dd bits doesn't care. However, for end of
+			 * packet tracking, we do care, so shuffle. This also
+			 * compresses the 32-bit values to 8-bit */
+			eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
+			/* store the resulting 32-bit value */
+			*(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
+			split_packet += RTE_IXGBE_DESCS_PER_LOOP;
+
+			/* zero-out next pointers */
+			rx_pkts[pos]->next = NULL;
+			rx_pkts[pos + 1]->next = NULL;
+			rx_pkts[pos + 2]->next = NULL;
+			rx_pkts[pos + 3]->next = NULL;
+		}
+
+		/* C.3 calc available number of desc */
+		staterr = _mm_and_si128(staterr, dd_check);
+		staterr = _mm_packs_epi32(staterr, zero);
+
+		/* D.3 copy final 1,2 data to rx_pkts */
+		_mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
+				pkt_mb2);
+		_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+				pkt_mb1);
+
+		/* C.4 calc avaialbe number of desc */
+		var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+		nb_pkts_recd += var;
+		if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
+			break;
+	}
+
+	/* Update our internal tail pointer */
+	rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+	rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+	rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+	return nb_pkts_recd;
+}
+
+/*
+ * vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
+ * in one loop
+ *
+ * Notice:
+ * - nb_pkts < RTE_IXGBE_VPMD_RX_BURST, just return no packet
+ * - nb_pkts > RTE_IXGBE_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST
+ *   numbers of DD bit
+ * - don't support ol_flags for rss and csum err
+ */
+uint16_t
+ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+static inline uint16_t
+reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
+		uint16_t nb_bufs, uint8_t *split_flags)
+{
+	struct rte_mbuf *pkts[RTE_IXGBE_VPMD_RX_BURST]; /*finished pkts*/
+	struct rte_mbuf *start = rxq->pkt_first_seg;
+	struct rte_mbuf *end =  rxq->pkt_last_seg;
+	unsigned pkt_idx, buf_idx;
+
+
+	for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
+		if (end != NULL) {
+			/* processing a split packet */
+			end->next = rx_bufs[buf_idx];
+			rx_bufs[buf_idx]->data_len += rxq->crc_len;
+
+			start->nb_segs++;
+			start->pkt_len += rx_bufs[buf_idx]->data_len;
+			end = end->next;
+
+			if (!split_flags[buf_idx]) {
+				/* it's the last packet of the set */
+				start->hash = end->hash;
+				start->ol_flags = end->ol_flags;
+				/* we need to strip crc for the whole packet */
+				start->pkt_len -= rxq->crc_len;
+				if (end->data_len > rxq->crc_len)
+					end->data_len -= rxq->crc_len;
+				else {
+					/* free up last mbuf */
+					struct rte_mbuf *secondlast = start;
+					while (secondlast->next != end)
+						secondlast = secondlast->next;
+					secondlast->data_len -= (rxq->crc_len -
+							end->data_len);
+					secondlast->next = NULL;
+					rte_pktmbuf_free_seg(end);
+					end = secondlast;
+				}
+				pkts[pkt_idx++] = start;
+				start = end = NULL;
+			}
+		} else {
+			/* not processing a split packet */
+			if (!split_flags[buf_idx]) {
+				/* not a split packet, save and skip */
+				pkts[pkt_idx++] = rx_bufs[buf_idx];
+				continue;
+			}
+			end = start = rx_bufs[buf_idx];
+			rx_bufs[buf_idx]->data_len += rxq->crc_len;
+			rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
+		}
+	}
+
+	/* save the partial packet for next time */
+	rxq->pkt_first_seg = start;
+	rxq->pkt_last_seg = end;
+	memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+	return pkt_idx;
+}
+
+/*
+ * vPMD receive routine that reassembles scattered packets
+ *
+ * Notice:
+ * - don't support ol_flags for rss and csum err
+ * - now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
+ */
+uint16_t
+ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	struct ixgbe_rx_queue *rxq = rx_queue;
+	uint8_t split_flags[RTE_IXGBE_VPMD_RX_BURST] = {0};
+
+	/* get some new buffers */
+	uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+			split_flags);
+	if (nb_bufs == 0)
+		return 0;
+
+	/* happy day case, full burst + no packets to be joined */
+	const uint32_t *split_fl32 = (uint32_t *)split_flags;
+	if (rxq->pkt_first_seg == NULL &&
+			split_fl32[0] == 0 && split_fl32[1] == 0 &&
+			split_fl32[2] == 0 && split_fl32[3] == 0)
+		return nb_bufs;
+
+	/* reassemble any packets that need reassembly*/
+	unsigned i = 0;
+	if (rxq->pkt_first_seg == NULL) {
+		/* find the first split flag, and only reassemble then*/
+		while (i < nb_bufs && !split_flags[i])
+			i++;
+		if (i == nb_bufs)
+			return nb_bufs;
+	}
+	return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+		&split_flags[i]);
+}
+
+static inline void
+vtx1(volatile union ixgbe_adv_tx_desc *txdp,
+		struct rte_mbuf *pkt, uint64_t flags)
+{
+	__m128i descriptor = _mm_set_epi64x((uint64_t)pkt->pkt_len << 46 |
+			flags | pkt->data_len,
+			pkt->buf_physaddr + pkt->data_off);
+	_mm_store_si128((__m128i *)&txdp->read, descriptor);
+}
+
+static inline void
+vtx(volatile union ixgbe_adv_tx_desc *txdp,
+		struct rte_mbuf **pkt, uint16_t nb_pkts,  uint64_t flags)
+{
+	int i;
+	for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+		vtx1(txdp, *pkt, flags);
+}
+
+static inline int __attribute__((always_inline))
+ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
+{
+	struct ixgbe_tx_entry_v *txep;
+	uint32_t status;
+	uint32_t n;
+	uint32_t i;
+	int nb_free = 0;
+	struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
+
+	/* check DD bit on threshold descriptor */
+	status = txq->tx_ring[txq->tx_next_dd].wb.status;
+	if (!(status & IXGBE_ADVTXD_STAT_DD))
+		return 0;
+
+	n = txq->tx_rs_thresh;
+
+	/*
+	 * first buffer to free from S/W ring is at index
+	 * tx_next_dd - (tx_rs_thresh-1)
+	 */
+	txep = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[txq->tx_next_dd -
+			(n - 1)];
+	m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
+	if (likely(m != NULL)) {
+		free[0] = m;
+		nb_free = 1;
+		for (i = 1; i < n; i++) {
+			m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+			if (likely(m != NULL)) {
+				if (likely(m->pool == free[0]->pool))
+					free[nb_free++] = m;
+				else {
+					rte_mempool_put_bulk(free[0]->pool,
+							(void *)free, nb_free);
+					free[0] = m;
+					nb_free = 1;
+				}
+			}
+		}
+		rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+	} else {
+		for (i = 1; i < n; i++) {
+			m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+			if (m != NULL)
+				rte_mempool_put(m->pool, m);
+		}
+	}
+
+	/* buffers were freed, update counters */
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+	if (txq->tx_next_dd >= txq->nb_tx_desc)
+		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+	return txq->tx_rs_thresh;
+}
+
+static inline void __attribute__((always_inline))
+tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
+		 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	int i;
+	for (i = 0; i < (int)nb_pkts; ++i)
+		txep[i].mbuf = tx_pkts[i];
+}
+
+uint16_t
+ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+		       uint16_t nb_pkts)
+{
+	struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+	volatile union ixgbe_adv_tx_desc *txdp;
+	struct ixgbe_tx_entry_v *txep;
+	uint16_t n, nb_commit, tx_id;
+	uint64_t flags = DCMD_DTYP_FLAGS;
+	uint64_t rs = IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS;
+	int i;
+
+	if (unlikely(nb_pkts > RTE_IXGBE_VPMD_TX_BURST))
+		nb_pkts = RTE_IXGBE_VPMD_TX_BURST;
+
+	if (txq->nb_tx_free < txq->tx_free_thresh)
+		ixgbe_tx_free_bufs(txq);
+
+	nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	tx_id = txq->tx_tail;
+	txdp = &txq->tx_ring[tx_id];
+	txep = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[tx_id];
+
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+	n = (uint16_t)(txq->nb_tx_desc - tx_id);
+	if (nb_commit >= n) {
+
+		tx_backlog_entry(txep, tx_pkts, n);
+
+		for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+			vtx1(txdp, *tx_pkts, flags);
+
+		vtx1(txdp, *tx_pkts++, rs);
+
+		nb_commit = (uint16_t)(nb_commit - n);
+
+		tx_id = 0;
+		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+		/* avoid reach the end of ring */
+		txdp = &(txq->tx_ring[tx_id]);
+		txep = &(((struct ixgbe_tx_entry_v *)txq->sw_ring)[tx_id]);
+	}
+
+	tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+	vtx(txdp, tx_pkts, nb_commit, flags);
+
+	tx_id = (uint16_t)(tx_id + nb_commit);
+	if (tx_id > txq->tx_next_rs) {
+		txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
+			rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
+		txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
+			txq->tx_rs_thresh);
+	}
+
+	txq->tx_tail = tx_id;
+
+	IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
+
+	return nb_pkts;
+}
+
+static void
+ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
+{
+	unsigned i;
+	struct ixgbe_tx_entry_v *txe;
+	uint16_t nb_free, max_desc;
+
+	if (txq->sw_ring != NULL) {
+		/* release the used mbufs in sw_ring */
+		nb_free = txq->nb_tx_free;
+		max_desc = (uint16_t)(txq->nb_tx_desc - 1);
+		for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
+		     nb_free < max_desc && i != txq->tx_tail;
+		     i = (i + 1) & max_desc) {
+			txe = (struct ixgbe_tx_entry_v *)&txq->sw_ring[i];
+			if (txe->mbuf != NULL)
+				rte_pktmbuf_free_seg(txe->mbuf);
+		}
+		/* reset tx_entry */
+		for (i = 0; i < txq->nb_tx_desc; i++) {
+			txe = (struct ixgbe_tx_entry_v *)&txq->sw_ring[i];
+			txe->mbuf = NULL;
+		}
+	}
+}
+
+static void
+ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
+{
+	if (txq == NULL)
+		return;
+
+	if (txq->sw_ring != NULL) {
+		rte_free((struct ixgbe_rx_entry *)txq->sw_ring - 1);
+		txq->sw_ring = NULL;
+	}
+}
+
+static void
+ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
+{
+	static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
+	struct ixgbe_tx_entry_v *txe = (struct ixgbe_tx_entry_v *)txq->sw_ring;
+	uint16_t i;
+
+	/* Zero out HW ring memory */
+	for (i = 0; i < txq->nb_tx_desc; i++)
+		txq->tx_ring[i] = zeroed_desc;
+
+	/* Initialize SW ring entries */
+	for (i = 0; i < txq->nb_tx_desc; i++) {
+		volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
+		txd->wb.status = IXGBE_TXD_STAT_DD;
+		txe[i].mbuf = NULL;
+	}
+
+	txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+	txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+	txq->tx_tail = 0;
+	txq->nb_tx_used = 0;
+	/*
+	 * Always allow 1 descriptor to be un-allocated to avoid
+	 * a H/W race condition
+	 */
+	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+	txq->ctx_curr = 0;
+	memset((void *)&txq->ctx_cache, 0,
+		IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
+}
+
+static const struct ixgbe_txq_ops vec_txq_ops = {
+	.release_mbufs = ixgbe_tx_queue_release_mbufs,
+	.free_swring = ixgbe_tx_free_swring,
+	.reset = ixgbe_reset_tx_queue,
+};
+
+int
+ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
+{
+	uintptr_t p;
+	struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+	mb_def.nb_segs = 1;
+	mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+	mb_def.port = rxq->port_id;
+	rte_mbuf_refcnt_set(&mb_def, 1);
+
+	/* prevent compiler reordering: rearm_data covers previous fields */
+	rte_compiler_barrier();
+	p = (uintptr_t)&mb_def.rearm_data;
+	rxq->mbuf_initializer = *(uint64_t *)p;
+	return 0;
+}
+
+int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
+{
+	if (txq->sw_ring == NULL)
+		return -1;
+
+	/* leave the first one for overflow */
+	txq->sw_ring = (struct ixgbe_tx_entry *)
+		((struct ixgbe_tx_entry_v *)txq->sw_ring + 1);
+	txq->ops = &vec_txq_ops;
+
+	return 0;
+}
+
+int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+#ifndef RTE_LIBRTE_IEEE1588
+	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+
+#ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
+	/* whithout rx ol_flags, no VP flag report */
+	if (rxmode->hw_vlan_strip != 0 ||
+	    rxmode->hw_vlan_extend != 0)
+		return -1;
+#endif
+
+	/* no fdir support */
+	if (fconf->mode != RTE_FDIR_MODE_NONE)
+		return -1;
+
+	/*
+	 * - no csum error report support
+	 * - no header split support
+	 */
+	if (rxmode->hw_ip_checksum == 1 ||
+	    rxmode->header_split == 1)
+		return -1;
+
+	return 0;
+#else
+	RTE_SET_USED(dev);
+	return -1;
+#endif
+}
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
new file mode 100644
index 0000000..ef35398
--- /dev/null
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
@@ -0,0 +1,4 @@ 
+DPDK_2.0 {
+
+	local: *;
+};
diff --git a/lib/Makefile b/lib/Makefile
index 73e0bd1..ffd7870 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -41,7 +41,6 @@  DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += librte_timer
 DIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += librte_cfgfile
 DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline
 DIRS-$(CONFIG_RTE_LIBRTE_ETHER) += librte_ether
-DIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += librte_pmd_ixgbe
 DIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += librte_pmd_mlx4
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += librte_pmd_ring
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += librte_pmd_pcap
diff --git a/lib/librte_pmd_ixgbe/Makefile b/lib/librte_pmd_ixgbe/Makefile
deleted file mode 100644
index e0f8916..0000000
--- a/lib/librte_pmd_ixgbe/Makefile
+++ /dev/null
@@ -1,126 +0,0 @@ 
-#   BSD LICENSE
-#
-#   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-#   All rights reserved.
-#
-#   Redistribution and use in source and binary forms, with or without
-#   modification, are permitted provided that the following conditions
-#   are met:
-#
-#     * Redistributions of source code must retain the above copyright
-#       notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above copyright
-#       notice, this list of conditions and the following disclaimer in
-#       the documentation and/or other materials provided with the
-#       distribution.
-#     * Neither the name of Intel Corporation nor the names of its
-#       contributors may be used to endorse or promote products derived
-#       from this software without specific prior written permission.
-#
-#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-#
-# library name
-#
-LIB = librte_pmd_ixgbe.a
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-
-EXPORT_MAP := rte_pmd_ixgbe_version.map
-
-LIBABIVER := 1
-
-ifeq ($(CC), icc)
-#
-# CFLAGS for icc
-#
-CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259
-
-else ifeq ($(CC), clang)
-#
-# CFLAGS for clang
-#
-CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
-CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
-
-else
-#
-# CFLAGS for gcc
-#
-ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
-CFLAGS     += -Wno-deprecated
-CFLAGS_ixgbe_common.o += -Wno-unused-but-set-variable
-CFLAGS_ixgbe_x550.o += -Wno-unused-but-set-variable
-endif
-CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
-CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
-
-ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1)
-CFLAGS_ixgbe_x550.o += -Wno-maybe-uninitialized
-endif
-
-ifeq ($(shell test $(GCC_VERSION) -ge 50 && echo 1), 1)
-CFLAGS_ixgbe_common.o += -Wno-logical-not-parentheses
-endif
-
-endif
-
-#
-# Add extra flags for base driver files (also known as shared code)
-# to disable warnings in them
-#
-BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/ixgbe/*.c)))
-$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
-
-VPATH += $(SRCDIR)/ixgbe
-
-#
-# all source are stored in SRCS-y
-#
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_common.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82598.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_x540.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_x550.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_phy.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_api.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_vf.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82599.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82598.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_mbx.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_rxtx.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ethdev.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_fdir.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_pf.c
-SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec.c
-
-ifeq ($(CONFIG_RTE_NIC_BYPASS),y)
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
-SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c
-endif
-
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_net lib/librte_malloc
-
-ifeq ($(CONFIG_RTE_IXGBE_INC_VECTOR)$(CONFIG_RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC),yn)
-$(error The ixgbe vpmd depends on Rx bulk alloc)
-endif
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_pmd_ixgbe/ixgbe/README b/lib/librte_pmd_ixgbe/ixgbe/README
deleted file mode 100644
index ba1249b..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/README
+++ /dev/null
@@ -1,67 +0,0 @@ 
-..
-     BSD LICENSE
-   
-     Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-     All rights reserved.
-   
-     Redistribution and use in source and binary forms, with or without
-     modification, are permitted provided that the following conditions
-     are met:
-   
-       * Redistributions of source code must retain the above copyright
-         notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above copyright
-         notice, this list of conditions and the following disclaimer in
-         the documentation and/or other materials provided with the
-         distribution.
-       * Neither the name of Intel Corporation nor the names of its
-         contributors may be used to endorse or promote products derived
-         from this software without specific prior written permission.
-   
-     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-     "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-     LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-     A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-     OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-     LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-     DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-     THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-     (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-Intel® IXGBE driver
-===================
-
-This directory contains source code of FreeBSD ixgbe driver of version
-cid-10g-shared-code.2015.02.03 released by LAD. The sub-directory of lad/
-contains the original source package.
-This driver is valid for the product(s) listed below
-
-* Intel® 10 Gigabit AF DA Dual Port Server Adapter
-* Intel® 10 Gigabit AT Server Adapter
-* Intel® 10 Gigabit AT2 Server Adapter
-* Intel® 10 Gigabit CX4 Dual Port Server Adapter
-* Intel® 10 Gigabit XF LR Server Adapter
-* Intel® 10 Gigabit XF SR Dual Port Server Adapter
-* Intel® 10 Gigabit XF SR Server Adapter
-* Intel® 82598 10 Gigabit Ethernet Controller
-* Intel® 82599 10 Gigabit Ethernet Controller
-* Intel® Ethernet Controller X540-AT2
-* Intel® Ethernet Server Adapter X520 Series
-* Intel® Ethernet Server Adapter X520-T2
-* Intel® Ethernet Controller X550-BT2
-
-Updating driver
-===============
-
-The following modifications have been made to this code to integrate it with the
-Intel® DPDK:
-
-
-ixgbe_osdep.h
--------------
-
-The OS dependency layer has been extensively modified to support the drivers in
-the Intel® DPDK environment. It is expected that these files will not need to be
-changed on updating the driver.
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82598.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82598.c
deleted file mode 100644
index 4e06550..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82598.c
+++ /dev/null
@@ -1,1435 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#include "ixgbe_type.h"
-#include "ixgbe_82598.h"
-#include "ixgbe_api.h"
-#include "ixgbe_common.h"
-#include "ixgbe_phy.h"
-
-#define IXGBE_82598_MAX_TX_QUEUES 32
-#define IXGBE_82598_MAX_RX_QUEUES 64
-#define IXGBE_82598_RAR_ENTRIES   16
-#define IXGBE_82598_MC_TBL_SIZE  128
-#define IXGBE_82598_VFT_TBL_SIZE 128
-#define IXGBE_82598_RX_PB_SIZE   512
-
-STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
-					     ixgbe_link_speed *speed,
-					     bool *autoneg);
-STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
-STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
-				      bool autoneg_wait_to_complete);
-STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
-				      ixgbe_link_speed *speed, bool *link_up,
-				      bool link_up_wait_to_complete);
-STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
-				      ixgbe_link_speed speed,
-				      bool autoneg_wait_to_complete);
-STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
-					 ixgbe_link_speed speed,
-					 bool autoneg_wait_to_complete);
-STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
-STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
-STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
-				  u32 headroom, int strategy);
-STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
-					u8 *sff8472_data);
-/**
- *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
- *  @hw: pointer to the HW structure
- *
- *  The defaults for 82598 should be in the range of 50us to 50ms,
- *  however the hardware default for these parts is 500us to 1ms which is less
- *  than the 10ms recommended by the pci-e spec.  To address this we need to
- *  increase the value to either 10ms to 250ms for capability version 1 config,
- *  or 16ms to 55ms for version 2.
- **/
-void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
-{
-	u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
-	u16 pcie_devctl2;
-
-	/* only take action if timeout value is defaulted to 0 */
-	if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
-		goto out;
-
-	/*
-	 * if capababilities version is type 1 we can write the
-	 * timeout of 10ms to 250ms through the GCR register
-	 */
-	if (!(gcr & IXGBE_GCR_CAP_VER2)) {
-		gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
-		goto out;
-	}
-
-	/*
-	 * for version 2 capabilities we need to write the config space
-	 * directly in order to set the completion timeout value for
-	 * 16ms to 55ms
-	 */
-	pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
-	pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
-	IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
-out:
-	/* disable completion timeout resend */
-	gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
-	IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
-}
-
-/**
- *  ixgbe_init_ops_82598 - Inits func ptrs and MAC type
- *  @hw: pointer to hardware structure
- *
- *  Initialize the function pointers and assign the MAC type for 82598.
- *  Does not touch the hardware.
- **/
-s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
-{
-	struct ixgbe_mac_info *mac = &hw->mac;
-	struct ixgbe_phy_info *phy = &hw->phy;
-	s32 ret_val;
-
-	DEBUGFUNC("ixgbe_init_ops_82598");
-
-	ret_val = ixgbe_init_phy_ops_generic(hw);
-	ret_val = ixgbe_init_ops_generic(hw);
-
-	/* PHY */
-	phy->ops.init = ixgbe_init_phy_ops_82598;
-
-	/* MAC */
-	mac->ops.start_hw = ixgbe_start_hw_82598;
-	mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598;
-	mac->ops.reset_hw = ixgbe_reset_hw_82598;
-	mac->ops.get_media_type = ixgbe_get_media_type_82598;
-	mac->ops.get_supported_physical_layer =
-				ixgbe_get_supported_physical_layer_82598;
-	mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598;
-	mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598;
-	mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598;
-	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598;
-
-	/* RAR, Multicast, VLAN */
-	mac->ops.set_vmdq = ixgbe_set_vmdq_82598;
-	mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598;
-	mac->ops.set_vfta = ixgbe_set_vfta_82598;
-	mac->ops.set_vlvf = NULL;
-	mac->ops.clear_vfta = ixgbe_clear_vfta_82598;
-
-	/* Flow Control */
-	mac->ops.fc_enable = ixgbe_fc_enable_82598;
-
-	mac->mcft_size		= IXGBE_82598_MC_TBL_SIZE;
-	mac->vft_size		= IXGBE_82598_VFT_TBL_SIZE;
-	mac->num_rar_entries	= IXGBE_82598_RAR_ENTRIES;
-	mac->rx_pb_size		= IXGBE_82598_RX_PB_SIZE;
-	mac->max_rx_queues	= IXGBE_82598_MAX_RX_QUEUES;
-	mac->max_tx_queues	= IXGBE_82598_MAX_TX_QUEUES;
-	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
-
-	/* SFP+ Module */
-	phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598;
-	phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598;
-
-	/* Link */
-	mac->ops.check_link = ixgbe_check_mac_link_82598;
-	mac->ops.setup_link = ixgbe_setup_mac_link_82598;
-	mac->ops.flap_tx_laser = NULL;
-	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598;
-	mac->ops.setup_rxpba = ixgbe_set_rxpba_82598;
-
-	/* Manageability interface */
-	mac->ops.set_fw_drv_ver = NULL;
-
-	mac->ops.get_rtrup2tc = NULL;
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_init_phy_ops_82598 - PHY/SFP specific init
- *  @hw: pointer to hardware structure
- *
- *  Initialize any function pointers that were not able to be
- *  set during init_shared_code because the PHY/SFP type was
- *  not known.  Perform the SFP init if necessary.
- *
- **/
-s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
-{
-	struct ixgbe_mac_info *mac = &hw->mac;
-	struct ixgbe_phy_info *phy = &hw->phy;
-	s32 ret_val = IXGBE_SUCCESS;
-	u16 list_offset, data_offset;
-
-	DEBUGFUNC("ixgbe_init_phy_ops_82598");
-
-	/* Identify the PHY */
-	phy->ops.identify(hw);
-
-	/* Overwrite the link function pointers if copper PHY */
-	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
-		mac->ops.setup_link = ixgbe_setup_copper_link_82598;
-		mac->ops.get_link_capabilities =
-				ixgbe_get_copper_link_capabilities_generic;
-	}
-
-	switch (hw->phy.type) {
-	case ixgbe_phy_tn:
-		phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
-		phy->ops.check_link = ixgbe_check_phy_link_tnx;
-		phy->ops.get_firmware_version =
-					ixgbe_get_phy_firmware_version_tnx;
-		break;
-	case ixgbe_phy_nl:
-		phy->ops.reset = ixgbe_reset_phy_nl;
-
-		/* Call SFP+ identify routine to get the SFP+ module type */
-		ret_val = phy->ops.identify_sfp(hw);
-		if (ret_val != IXGBE_SUCCESS)
-			goto out;
-		else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
-			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
-			goto out;
-		}
-
-		/* Check to see if SFP+ module is supported */
-		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
-							      &list_offset,
-							      &data_offset);
-		if (ret_val != IXGBE_SUCCESS) {
-			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
-			goto out;
-		}
-		break;
-	default:
-		break;
-	}
-
-out:
-	return ret_val;
-}
-
-/**
- *  ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
- *  @hw: pointer to hardware structure
- *
- *  Starts the hardware using the generic start_hw function.
- *  Disables relaxed ordering Then set pcie completion timeout
- *
- **/
-s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
-{
-	u32 regval;
-	u32 i;
-	s32 ret_val = IXGBE_SUCCESS;
-
-	DEBUGFUNC("ixgbe_start_hw_82598");
-
-	ret_val = ixgbe_start_hw_generic(hw);
-
-	/* Disable relaxed ordering */
-	for (i = 0; ((i < hw->mac.max_tx_queues) &&
-	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
-		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
-		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
-		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
-	}
-
-	for (i = 0; ((i < hw->mac.max_rx_queues) &&
-	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
-		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
-		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
-			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
-		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
-	}
-
-	/* set the completion timeout for interface */
-	if (ret_val == IXGBE_SUCCESS)
-		ixgbe_set_pcie_completion_timeout(hw);
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_get_link_capabilities_82598 - Determines link capabilities
- *  @hw: pointer to hardware structure
- *  @speed: pointer to link speed
- *  @autoneg: boolean auto-negotiation value
- *
- *  Determines the link capabilities by reading the AUTOC register.
- **/
-STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
-					     ixgbe_link_speed *speed,
-					     bool *autoneg)
-{
-	s32 status = IXGBE_SUCCESS;
-	u32 autoc = 0;
-
-	DEBUGFUNC("ixgbe_get_link_capabilities_82598");
-
-	/*
-	 * Determine link capabilities based on the stored value of AUTOC,
-	 * which represents EEPROM defaults.  If AUTOC value has not been
-	 * stored, use the current register value.
-	 */
-	if (hw->mac.orig_link_settings_stored)
-		autoc = hw->mac.orig_autoc;
-	else
-		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-
-	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
-	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
-		*speed = IXGBE_LINK_SPEED_1GB_FULL;
-		*autoneg = false;
-		break;
-
-	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
-		*speed = IXGBE_LINK_SPEED_10GB_FULL;
-		*autoneg = false;
-		break;
-
-	case IXGBE_AUTOC_LMS_1G_AN:
-		*speed = IXGBE_LINK_SPEED_1GB_FULL;
-		*autoneg = true;
-		break;
-
-	case IXGBE_AUTOC_LMS_KX4_AN:
-	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
-		*speed = IXGBE_LINK_SPEED_UNKNOWN;
-		if (autoc & IXGBE_AUTOC_KX4_SUPP)
-			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
-		if (autoc & IXGBE_AUTOC_KX_SUPP)
-			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
-		*autoneg = true;
-		break;
-
-	default:
-		status = IXGBE_ERR_LINK_SETUP;
-		break;
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_get_media_type_82598 - Determines media type
- *  @hw: pointer to hardware structure
- *
- *  Returns the media type (fiber, copper, backplane)
- **/
-STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
-{
-	enum ixgbe_media_type media_type;
-
-	DEBUGFUNC("ixgbe_get_media_type_82598");
-
-	/* Detect if there is a copper PHY attached. */
-	switch (hw->phy.type) {
-	case ixgbe_phy_cu_unknown:
-	case ixgbe_phy_tn:
-		media_type = ixgbe_media_type_copper;
-		goto out;
-	default:
-		break;
-	}
-
-	/* Media type for I82598 is based on device ID */
-	switch (hw->device_id) {
-	case IXGBE_DEV_ID_82598:
-	case IXGBE_DEV_ID_82598_BX:
-		/* Default device ID is mezzanine card KX/KX4 */
-		media_type = ixgbe_media_type_backplane;
-		break;
-	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
-	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
-	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
-	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
-	case IXGBE_DEV_ID_82598EB_XF_LR:
-	case IXGBE_DEV_ID_82598EB_SFP_LOM:
-		media_type = ixgbe_media_type_fiber;
-		break;
-	case IXGBE_DEV_ID_82598EB_CX4:
-	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
-		media_type = ixgbe_media_type_cx4;
-		break;
-	case IXGBE_DEV_ID_82598AT:
-	case IXGBE_DEV_ID_82598AT2:
-		media_type = ixgbe_media_type_copper;
-		break;
-	default:
-		media_type = ixgbe_media_type_unknown;
-		break;
-	}
-out:
-	return media_type;
-}
-
-/**
- *  ixgbe_fc_enable_82598 - Enable flow control
- *  @hw: pointer to hardware structure
- *
- *  Enable flow control according to the current settings.
- **/
-s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
-{
-	s32 ret_val = IXGBE_SUCCESS;
-	u32 fctrl_reg;
-	u32 rmcs_reg;
-	u32 reg;
-	u32 fcrtl, fcrth;
-	u32 link_speed = 0;
-	int i;
-	bool link_up;
-
-	DEBUGFUNC("ixgbe_fc_enable_82598");
-
-	/* Validate the water mark configuration */
-	if (!hw->fc.pause_time) {
-		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-		goto out;
-	}
-
-	/* Low water mark of zero causes XOFF floods */
-	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
-		    hw->fc.high_water[i]) {
-			if (!hw->fc.low_water[i] ||
-			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
-				DEBUGOUT("Invalid water mark configuration\n");
-				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-				goto out;
-			}
-		}
-	}
-
-	/*
-	 * On 82598 having Rx FC on causes resets while doing 1G
-	 * so if it's on turn it off once we know link_speed. For
-	 * more details see 82598 Specification update.
-	 */
-	hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
-	if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
-		switch (hw->fc.requested_mode) {
-		case ixgbe_fc_full:
-			hw->fc.requested_mode = ixgbe_fc_tx_pause;
-			break;
-		case ixgbe_fc_rx_pause:
-			hw->fc.requested_mode = ixgbe_fc_none;
-			break;
-		default:
-			/* no change */
-			break;
-		}
-	}
-
-	/* Negotiate the fc mode to use */
-	ixgbe_fc_autoneg(hw);
-
-	/* Disable any previous flow control settings */
-	fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-	fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
-
-	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
-	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
-
-	/*
-	 * The possible values of fc.current_mode are:
-	 * 0: Flow control is completely disabled
-	 * 1: Rx flow control is enabled (we can receive pause frames,
-	 *    but not send pause frames).
-	 * 2: Tx flow control is enabled (we can send pause frames but
-	 *     we do not support receiving pause frames).
-	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
-	 * other: Invalid.
-	 */
-	switch (hw->fc.current_mode) {
-	case ixgbe_fc_none:
-		/*
-		 * Flow control is disabled by software override or autoneg.
-		 * The code below will actually disable it in the HW.
-		 */
-		break;
-	case ixgbe_fc_rx_pause:
-		/*
-		 * Rx Flow control is enabled and Tx Flow control is
-		 * disabled by software override. Since there really
-		 * isn't a way to advertise that we are capable of RX
-		 * Pause ONLY, we will advertise that we support both
-		 * symmetric and asymmetric Rx PAUSE.  Later, we will
-		 * disable the adapter's ability to send PAUSE frames.
-		 */
-		fctrl_reg |= IXGBE_FCTRL_RFCE;
-		break;
-	case ixgbe_fc_tx_pause:
-		/*
-		 * Tx Flow control is enabled, and Rx Flow control is
-		 * disabled by software override.
-		 */
-		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
-		break;
-	case ixgbe_fc_full:
-		/* Flow control (both Rx and Tx) is enabled by SW override. */
-		fctrl_reg |= IXGBE_FCTRL_RFCE;
-		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
-		break;
-	default:
-		DEBUGOUT("Flow control param set incorrectly\n");
-		ret_val = IXGBE_ERR_CONFIG;
-		goto out;
-		break;
-	}
-
-	/* Set 802.3x based flow control settings. */
-	fctrl_reg |= IXGBE_FCTRL_DPF;
-	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
-	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
-
-	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
-	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
-		    hw->fc.high_water[i]) {
-			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
-			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
-			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
-			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
-		} else {
-			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
-			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
-		}
-
-	}
-
-	/* Configure pause time (2 TCs per register) */
-	reg = hw->fc.pause_time * 0x00010001;
-	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
-		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
-
-	/* Configure flow control refresh threshold value */
-	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
-
-out:
-	return ret_val;
-}
-
-/**
- *  ixgbe_start_mac_link_82598 - Configures MAC link settings
- *  @hw: pointer to hardware structure
- *
- *  Configures link settings based on values in the ixgbe_hw struct.
- *  Restarts the link.  Performs autonegotiation if needed.
- **/
-STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
-				      bool autoneg_wait_to_complete)
-{
-	u32 autoc_reg;
-	u32 links_reg;
-	u32 i;
-	s32 status = IXGBE_SUCCESS;
-
-	DEBUGFUNC("ixgbe_start_mac_link_82598");
-
-	/* Restart link */
-	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
-	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
-
-	/* Only poll for autoneg to complete if specified to do so */
-	if (autoneg_wait_to_complete) {
-		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
-		     IXGBE_AUTOC_LMS_KX4_AN ||
-		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
-		     IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
-			links_reg = 0; /* Just in case Autoneg time = 0 */
-			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
-				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
-				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
-					break;
-				msec_delay(100);
-			}
-			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
-				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
-				DEBUGOUT("Autonegotiation did not complete.\n");
-			}
-		}
-	}
-
-	/* Add delay to filter out noises during initial link setup */
-	msec_delay(50);
-
-	return status;
-}
-
-/**
- *  ixgbe_validate_link_ready - Function looks for phy link
- *  @hw: pointer to hardware structure
- *
- *  Function indicates success when phy link is available. If phy is not ready
- *  within 5 seconds of MAC indicating link, the function returns error.
- **/
-STATIC s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
-{
-	u32 timeout;
-	u16 an_reg;
-
-	if (hw->device_id != IXGBE_DEV_ID_82598AT2)
-		return IXGBE_SUCCESS;
-
-	for (timeout = 0;
-	     timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
-		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
-				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
-
-		if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
-		    (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
-			break;
-
-		msec_delay(100);
-	}
-
-	if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
-		DEBUGOUT("Link was indicated but link is down\n");
-		return IXGBE_ERR_LINK_SETUP;
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_check_mac_link_82598 - Get link/speed status
- *  @hw: pointer to hardware structure
- *  @speed: pointer to link speed
- *  @link_up: true is link is up, false otherwise
- *  @link_up_wait_to_complete: bool used to wait for link up or not
- *
- *  Reads the links register to determine if link is up and the current speed
- **/
-STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
-				      ixgbe_link_speed *speed, bool *link_up,
-				      bool link_up_wait_to_complete)
-{
-	u32 links_reg;
-	u32 i;
-	u16 link_reg, adapt_comp_reg;
-
-	DEBUGFUNC("ixgbe_check_mac_link_82598");
-
-	/*
-	 * SERDES PHY requires us to read link status from undocumented
-	 * register 0xC79F.  Bit 0 set indicates link is up/ready; clear
-	 * indicates link down.  OxC00C is read to check that the XAUI lanes
-	 * are active.  Bit 0 clear indicates active; set indicates inactive.
-	 */
-	if (hw->phy.type == ixgbe_phy_nl) {
-		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
-		hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
-		hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
-				     &adapt_comp_reg);
-		if (link_up_wait_to_complete) {
-			for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
-				if ((link_reg & 1) &&
-				    ((adapt_comp_reg & 1) == 0)) {
-					*link_up = true;
-					break;
-				} else {
-					*link_up = false;
-				}
-				msec_delay(100);
-				hw->phy.ops.read_reg(hw, 0xC79F,
-						     IXGBE_TWINAX_DEV,
-						     &link_reg);
-				hw->phy.ops.read_reg(hw, 0xC00C,
-						     IXGBE_TWINAX_DEV,
-						     &adapt_comp_reg);
-			}
-		} else {
-			if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
-				*link_up = true;
-			else
-				*link_up = false;
-		}
-
-		if (*link_up == false)
-			goto out;
-	}
-
-	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
-	if (link_up_wait_to_complete) {
-		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
-			if (links_reg & IXGBE_LINKS_UP) {
-				*link_up = true;
-				break;
-			} else {
-				*link_up = false;
-			}
-			msec_delay(100);
-			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
-		}
-	} else {
-		if (links_reg & IXGBE_LINKS_UP)
-			*link_up = true;
-		else
-			*link_up = false;
-	}
-
-	if (links_reg & IXGBE_LINKS_SPEED)
-		*speed = IXGBE_LINK_SPEED_10GB_FULL;
-	else
-		*speed = IXGBE_LINK_SPEED_1GB_FULL;
-
-	if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) &&
-	    (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
-		*link_up = false;
-
-out:
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_setup_mac_link_82598 - Set MAC link speed
- *  @hw: pointer to hardware structure
- *  @speed: new link speed
- *  @autoneg_wait_to_complete: true when waiting for completion is needed
- *
- *  Set the link speed in the AUTOC register and restarts link.
- **/
-STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
-				      ixgbe_link_speed speed,
-				      bool autoneg_wait_to_complete)
-{
-	bool autoneg = false;
-	s32 status = IXGBE_SUCCESS;
-	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
-	u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-	u32 autoc = curr_autoc;
-	u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
-
-	DEBUGFUNC("ixgbe_setup_mac_link_82598");
-
-	/* Check to see if speed passed in is supported. */
-	ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
-	speed &= link_capabilities;
-
-	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
-		status = IXGBE_ERR_LINK_SETUP;
-
-	/* Set KX4/KX support according to speed requested */
-	else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
-		 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
-		autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
-		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
-			autoc |= IXGBE_AUTOC_KX4_SUPP;
-		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
-			autoc |= IXGBE_AUTOC_KX_SUPP;
-		if (autoc != curr_autoc)
-			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
-	}
-
-	if (status == IXGBE_SUCCESS) {
-		/*
-		 * Setup and restart the link based on the new values in
-		 * ixgbe_hw This will write the AUTOC register based on the new
-		 * stored values
-		 */
-		status = ixgbe_start_mac_link_82598(hw,
-						    autoneg_wait_to_complete);
-	}
-
-	return status;
-}
-
-
-/**
- *  ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
- *  @hw: pointer to hardware structure
- *  @speed: new link speed
- *  @autoneg_wait_to_complete: true if waiting is needed to complete
- *
- *  Sets the link speed in the AUTOC register in the MAC and restarts link.
- **/
-STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
-					 ixgbe_link_speed speed,
-					 bool autoneg_wait_to_complete)
-{
-	s32 status;
-
-	DEBUGFUNC("ixgbe_setup_copper_link_82598");
-
-	/* Setup the PHY according to input speed */
-	status = hw->phy.ops.setup_link_speed(hw, speed,
-					      autoneg_wait_to_complete);
-	/* Set up MAC */
-	ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
-
-	return status;
-}
-
-/**
- *  ixgbe_reset_hw_82598 - Performs hardware reset
- *  @hw: pointer to hardware structure
- *
- *  Resets the hardware by resetting the transmit and receive units, masks and
- *  clears all interrupts, performing a PHY reset, and performing a link (MAC)
- *  reset.
- **/
-STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
-{
-	s32 status = IXGBE_SUCCESS;
-	s32 phy_status = IXGBE_SUCCESS;
-	u32 ctrl;
-	u32 gheccr;
-	u32 i;
-	u32 autoc;
-	u8  analog_val;
-
-	DEBUGFUNC("ixgbe_reset_hw_82598");
-
-	/* Call adapter stop to disable tx/rx and clear interrupts */
-	status = hw->mac.ops.stop_adapter(hw);
-	if (status != IXGBE_SUCCESS)
-		goto reset_hw_out;
-
-	/*
-	 * Power up the Atlas Tx lanes if they are currently powered down.
-	 * Atlas Tx lanes are powered down for MAC loopback tests, but
-	 * they are not automatically restored on reset.
-	 */
-	hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
-	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
-		/* Enable Tx Atlas so packets can be transmitted again */
-		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
-					     &analog_val);
-		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
-		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
-					      analog_val);
-
-		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
-					     &analog_val);
-		analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
-		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
-					      analog_val);
-
-		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
-					     &analog_val);
-		analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
-		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
-					      analog_val);
-
-		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
-					     &analog_val);
-		analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
-		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
-					      analog_val);
-	}
-
-	/* Reset PHY */
-	if (hw->phy.reset_disable == false) {
-		/* PHY ops must be identified and initialized prior to reset */
-
-		/* Init PHY and function pointers, perform SFP setup */
-		phy_status = hw->phy.ops.init(hw);
-		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
-			goto reset_hw_out;
-		if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
-			goto mac_reset_top;
-
-		hw->phy.ops.reset(hw);
-	}
-
-mac_reset_top:
-	/*
-	 * Issue global reset to the MAC.  This needs to be a SW reset.
-	 * If link reset is used, it might reset the MAC when mng is using it
-	 */
-	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
-	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
-	IXGBE_WRITE_FLUSH(hw);
-
-	/* Poll for reset bit to self-clear indicating reset is complete */
-	for (i = 0; i < 10; i++) {
-		usec_delay(1);
-		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
-		if (!(ctrl & IXGBE_CTRL_RST))
-			break;
-	}
-	if (ctrl & IXGBE_CTRL_RST) {
-		status = IXGBE_ERR_RESET_FAILED;
-		DEBUGOUT("Reset polling failed to complete.\n");
-	}
-
-	msec_delay(50);
-
-	/*
-	 * Double resets are required for recovery from certain error
-	 * conditions.  Between resets, it is necessary to stall to allow time
-	 * for any pending HW events to complete.
-	 */
-	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
-		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
-		goto mac_reset_top;
-	}
-
-	gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
-	gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
-	IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
-
-	/*
-	 * Store the original AUTOC value if it has not been
-	 * stored off yet.  Otherwise restore the stored original
-	 * AUTOC value since the reset operation sets back to deaults.
-	 */
-	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-	if (hw->mac.orig_link_settings_stored == false) {
-		hw->mac.orig_autoc = autoc;
-		hw->mac.orig_link_settings_stored = true;
-	} else if (autoc != hw->mac.orig_autoc) {
-		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
-	}
-
-	/* Store the permanent mac address */
-	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
-
-	/*
-	 * Store MAC address from RAR0, clear receive address registers, and
-	 * clear the multicast table
-	 */
-	hw->mac.ops.init_rx_addrs(hw);
-
-reset_hw_out:
-	if (phy_status != IXGBE_SUCCESS)
-		status = phy_status;
-
-	return status;
-}
-
-/**
- *  ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
- *  @hw: pointer to hardware struct
- *  @rar: receive address register index to associate with a VMDq index
- *  @vmdq: VMDq set index
- **/
-s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
-{
-	u32 rar_high;
-	u32 rar_entries = hw->mac.num_rar_entries;
-
-	DEBUGFUNC("ixgbe_set_vmdq_82598");
-
-	/* Make sure we are using a valid rar index range */
-	if (rar >= rar_entries) {
-		DEBUGOUT1("RAR index %d is out of range.\n", rar);
-		return IXGBE_ERR_INVALID_ARGUMENT;
-	}
-
-	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
-	rar_high &= ~IXGBE_RAH_VIND_MASK;
-	rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
-	IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
- *  @hw: pointer to hardware struct
- *  @rar: receive address register index to associate with a VMDq index
- *  @vmdq: VMDq clear index (not used in 82598, but elsewhere)
- **/
-STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
-{
-	u32 rar_high;
-	u32 rar_entries = hw->mac.num_rar_entries;
-
-	UNREFERENCED_1PARAMETER(vmdq);
-
-	/* Make sure we are using a valid rar index range */
-	if (rar >= rar_entries) {
-		DEBUGOUT1("RAR index %d is out of range.\n", rar);
-		return IXGBE_ERR_INVALID_ARGUMENT;
-	}
-
-	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
-	if (rar_high & IXGBE_RAH_VIND_MASK) {
-		rar_high &= ~IXGBE_RAH_VIND_MASK;
-		IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_set_vfta_82598 - Set VLAN filter table
- *  @hw: pointer to hardware structure
- *  @vlan: VLAN id to write to VLAN filter
- *  @vind: VMDq output index that maps queue to VLAN id in VFTA
- *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
- *
- *  Turn on/off specified VLAN in the VLAN filter table.
- **/
-s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
-			 bool vlan_on)
-{
-	u32 regindex;
-	u32 bitindex;
-	u32 bits;
-	u32 vftabyte;
-
-	DEBUGFUNC("ixgbe_set_vfta_82598");
-
-	if (vlan > 4095)
-		return IXGBE_ERR_PARAM;
-
-	/* Determine 32-bit word position in array */
-	regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */
-
-	/* Determine the location of the (VMD) queue index */
-	vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
-	bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
-
-	/* Set the nibble for VMD queue index */
-	bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
-	bits &= (~(0x0F << bitindex));
-	bits |= (vind << bitindex);
-	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
-
-	/* Determine the location of the bit for this VLAN id */
-	bitindex = vlan & 0x1F;   /* lower five bits */
-
-	bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
-	if (vlan_on)
-		/* Turn on this VLAN id */
-		bits |= (1 << bitindex);
-	else
-		/* Turn off this VLAN id */
-		bits &= ~(1 << bitindex);
-	IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_clear_vfta_82598 - Clear VLAN filter table
- *  @hw: pointer to hardware structure
- *
- *  Clears the VLAN filer table, and the VMDq index associated with the filter
- **/
-STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
-{
-	u32 offset;
-	u32 vlanbyte;
-
-	DEBUGFUNC("ixgbe_clear_vfta_82598");
-
-	for (offset = 0; offset < hw->mac.vft_size; offset++)
-		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
-
-	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
-		for (offset = 0; offset < hw->mac.vft_size; offset++)
-			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
-					0);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
- *  @hw: pointer to hardware structure
- *  @reg: analog register to read
- *  @val: read value
- *
- *  Performs read operation to Atlas analog register specified.
- **/
-s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
-{
-	u32  atlas_ctl;
-
-	DEBUGFUNC("ixgbe_read_analog_reg8_82598");
-
-	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
-			IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
-	IXGBE_WRITE_FLUSH(hw);
-	usec_delay(10);
-	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
-	*val = (u8)atlas_ctl;
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
- *  @hw: pointer to hardware structure
- *  @reg: atlas register to write
- *  @val: value to write
- *
- *  Performs write operation to Atlas analog register specified.
- **/
-s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
-{
-	u32  atlas_ctl;
-
-	DEBUGFUNC("ixgbe_write_analog_reg8_82598");
-
-	atlas_ctl = (reg << 8) | val;
-	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
-	IXGBE_WRITE_FLUSH(hw);
-	usec_delay(10);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
- *  @hw: pointer to hardware structure
- *  @dev_addr: address to read from
- *  @byte_offset: byte offset to read from dev_addr
- *  @eeprom_data: value read
- *
- *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
- **/
-STATIC s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
-				    u8 byte_offset, u8 *eeprom_data)
-{
-	s32 status = IXGBE_SUCCESS;
-	u16 sfp_addr = 0;
-	u16 sfp_data = 0;
-	u16 sfp_stat = 0;
-	u16 gssr;
-	u32 i;
-
-	DEBUGFUNC("ixgbe_read_i2c_phy_82598");
-
-	if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
-		gssr = IXGBE_GSSR_PHY1_SM;
-	else
-		gssr = IXGBE_GSSR_PHY0_SM;
-
-	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
-		return IXGBE_ERR_SWFW_SYNC;
-
-	if (hw->phy.type == ixgbe_phy_nl) {
-		/*
-		 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
-		 * 0xC30D. These registers are used to talk to the SFP+
-		 * module's EEPROM through the SDA/SCL (I2C) interface.
-		 */
-		sfp_addr = (dev_addr << 8) + byte_offset;
-		sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
-		hw->phy.ops.write_reg_mdi(hw,
-					  IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
-					  IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-					  sfp_addr);
-
-		/* Poll status */
-		for (i = 0; i < 100; i++) {
-			hw->phy.ops.read_reg_mdi(hw,
-						IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
-						IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-						&sfp_stat);
-			sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
-			if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
-				break;
-			msec_delay(10);
-		}
-
-		if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
-			DEBUGOUT("EEPROM read did not pass.\n");
-			status = IXGBE_ERR_SFP_NOT_PRESENT;
-			goto out;
-		}
-
-		/* Read data */
-		hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
-					IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
-
-		*eeprom_data = (u8)(sfp_data >> 8);
-	} else {
-		status = IXGBE_ERR_PHY;
-	}
-
-out:
-	hw->mac.ops.release_swfw_sync(hw, gssr);
-	return status;
-}
-
-/**
- *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
- *  @hw: pointer to hardware structure
- *  @byte_offset: EEPROM byte offset to read
- *  @eeprom_data: value read
- *
- *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
- **/
-s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
-				u8 *eeprom_data)
-{
-	return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
-					byte_offset, eeprom_data);
-}
-
-/**
- *  ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
- *  @hw: pointer to hardware structure
- *  @byte_offset: byte offset at address 0xA2
- *  @eeprom_data: value read
- *
- *  Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
- **/
-STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
-					u8 *sff8472_data)
-{
-	return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
-					byte_offset, sff8472_data);
-}
-
-/**
- *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
- *  @hw: pointer to hardware structure
- *
- *  Determines physical layer capabilities of the current configuration.
- **/
-u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
-{
-	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-	u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
-	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
-	u16 ext_ability = 0;
-
-	DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
-
-	hw->phy.ops.identify(hw);
-
-	/* Copper PHY must be checked before AUTOC LMS to determine correct
-	 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
-	switch (hw->phy.type) {
-	case ixgbe_phy_tn:
-	case ixgbe_phy_cu_unknown:
-		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
-		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
-		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
-		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
-		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
-		goto out;
-	default:
-		break;
-	}
-
-	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
-	case IXGBE_AUTOC_LMS_1G_AN:
-	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
-		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
-			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
-		else
-			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
-		break;
-	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
-		if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
-		else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
-		else /* XAUI */
-			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-		break;
-	case IXGBE_AUTOC_LMS_KX4_AN:
-	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
-		if (autoc & IXGBE_AUTOC_KX_SUPP)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
-		if (autoc & IXGBE_AUTOC_KX4_SUPP)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
-		break;
-	default:
-		break;
-	}
-
-	if (hw->phy.type == ixgbe_phy_nl) {
-		hw->phy.ops.identify_sfp(hw);
-
-		switch (hw->phy.sfp_type) {
-		case ixgbe_sfp_type_da_cu:
-			physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
-			break;
-		case ixgbe_sfp_type_sr:
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
-			break;
-		case ixgbe_sfp_type_lr:
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
-			break;
-		default:
-			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-			break;
-		}
-	}
-
-	switch (hw->device_id) {
-	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
-		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
-		break;
-	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
-	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
-	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
-		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
-		break;
-	case IXGBE_DEV_ID_82598EB_XF_LR:
-		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
-		break;
-	default:
-		break;
-	}
-
-out:
-	return physical_layer;
-}
-
-/**
- *  ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
- *  port devices.
- *  @hw: pointer to the HW structure
- *
- *  Calls common function and corrects issue with some single port devices
- *  that enable LAN1 but not LAN0.
- **/
-void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
-{
-	struct ixgbe_bus_info *bus = &hw->bus;
-	u16 pci_gen = 0;
-	u16 pci_ctrl2 = 0;
-
-	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
-
-	ixgbe_set_lan_id_multi_port_pcie(hw);
-
-	/* check if LAN0 is disabled */
-	hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
-	if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
-
-		hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
-
-		/* if LAN0 is completely disabled force function to 0 */
-		if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
-		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
-		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
-
-			bus->func = 0;
-		}
-	}
-}
-
-/**
- *  ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
- *  @hw: pointer to hardware structure
- *
- **/
-void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
-{
-	u32 regval;
-	u32 i;
-
-	DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
-
-	/* Enable relaxed ordering */
-	for (i = 0; ((i < hw->mac.max_tx_queues) &&
-	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
-		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
-		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
-		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
-	}
-
-	for (i = 0; ((i < hw->mac.max_rx_queues) &&
-	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
-		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
-		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
-			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
-		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
-	}
-
-}
-
-/**
- * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
- * @hw: pointer to hardware structure
- * @num_pb: number of packet buffers to allocate
- * @headroom: reserve n KB of headroom
- * @strategy: packet buffer allocation strategy
- **/
-STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
-				  u32 headroom, int strategy)
-{
-	u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
-	u8 i = 0;
-	UNREFERENCED_1PARAMETER(headroom);
-
-	if (!num_pb)
-		return;
-
-	/* Setup Rx packet buffer sizes */
-	switch (strategy) {
-	case PBA_STRATEGY_WEIGHTED:
-		/* Setup the first four at 80KB */
-		rxpktsize = IXGBE_RXPBSIZE_80KB;
-		for (; i < 4; i++)
-			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
-		/* Setup the last four at 48KB...don't re-init i */
-		rxpktsize = IXGBE_RXPBSIZE_48KB;
-		/* Fall Through */
-	case PBA_STRATEGY_EQUAL:
-	default:
-		/* Divide the remaining Rx packet buffer evenly among the TCs */
-		for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
-			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
-		break;
-	}
-
-	/* Setup Tx packet buffer sizes */
-	for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
-		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
-}
-
-/**
- *  ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit
- *  @hw: pointer to hardware structure
- *  @regval: register value to write to RXCTRL
- *
- *  Enables the Rx DMA unit
- **/
-s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval)
-{
-	DEBUGFUNC("ixgbe_enable_rx_dma_82598");
-
-	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
-
-	return IXGBE_SUCCESS;
-}
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82598.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82598.h
deleted file mode 100644
index 4000486..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82598.h
+++ /dev/null
@@ -1,52 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#ifndef _IXGBE_82598_H_
-#define _IXGBE_82598_H_
-
-u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
-s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
-void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
-s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
-s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
-s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
-s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
-				u8 *eeprom_data);
-u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
-s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
-void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
-void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval);
-#endif /* _IXGBE_82598_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82599.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82599.c
deleted file mode 100644
index 239b833..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82599.c
+++ /dev/null
@@ -1,2713 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#include "ixgbe_type.h"
-#include "ixgbe_82599.h"
-#include "ixgbe_api.h"
-#include "ixgbe_common.h"
-#include "ixgbe_phy.h"
-
-#define IXGBE_82599_MAX_TX_QUEUES 128
-#define IXGBE_82599_MAX_RX_QUEUES 128
-#define IXGBE_82599_RAR_ENTRIES   128
-#define IXGBE_82599_MC_TBL_SIZE   128
-#define IXGBE_82599_VFT_TBL_SIZE  128
-#define IXGBE_82599_RX_PB_SIZE	  512
-
-STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
-					 ixgbe_link_speed speed,
-					 bool autoneg_wait_to_complete);
-STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
-STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
-				   u16 offset, u16 *data);
-STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
-					  u16 words, u16 *data);
-STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
-					u8 dev_addr, u8 *data);
-STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
-					u8 dev_addr, u8 data);
-
-void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
-{
-	struct ixgbe_mac_info *mac = &hw->mac;
-
-	DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
-
-	/*
-	 * enable the laser control functions for SFP+ fiber
-	 * and MNG not enabled
-	 */
-	if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
-	    !ixgbe_mng_enabled(hw)) {
-		mac->ops.disable_tx_laser =
-				       ixgbe_disable_tx_laser_multispeed_fiber;
-		mac->ops.enable_tx_laser =
-					ixgbe_enable_tx_laser_multispeed_fiber;
-		mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber;
-
-	} else {
-		mac->ops.disable_tx_laser = NULL;
-		mac->ops.enable_tx_laser = NULL;
-		mac->ops.flap_tx_laser = NULL;
-	}
-
-	if (hw->phy.multispeed_fiber) {
-		/* Set up dual speed SFP+ support */
-		mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
-	} else {
-		if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
-		     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
-		      hw->phy.smart_speed == ixgbe_smart_speed_on) &&
-		      !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
-			mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed;
-		} else {
-			mac->ops.setup_link = ixgbe_setup_mac_link_82599;
-		}
-	}
-}
-
-/**
- *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
- *  @hw: pointer to hardware structure
- *
- *  Initialize any function pointers that were not able to be
- *  set during init_shared_code because the PHY/SFP type was
- *  not known.  Perform the SFP init if necessary.
- *
- **/
-s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
-{
-	struct ixgbe_mac_info *mac = &hw->mac;
-	struct ixgbe_phy_info *phy = &hw->phy;
-	s32 ret_val = IXGBE_SUCCESS;
-	u32 esdp;
-
-	DEBUGFUNC("ixgbe_init_phy_ops_82599");
-
-	if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
-		/* Store flag indicating I2C bus access control unit. */
-		hw->phy.qsfp_shared_i2c_bus = TRUE;
-
-		/* Initialize access to QSFP+ I2C bus */
-		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
-		esdp |= IXGBE_ESDP_SDP0_DIR;
-		esdp &= ~IXGBE_ESDP_SDP1_DIR;
-		esdp &= ~IXGBE_ESDP_SDP0;
-		esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
-		esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
-		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
-		IXGBE_WRITE_FLUSH(hw);
-
-		phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599;
-		phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599;
-	}
-	/* Identify the PHY or SFP module */
-	ret_val = phy->ops.identify(hw);
-	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
-		goto init_phy_ops_out;
-
-	/* Setup function pointers based on detected SFP module and speeds */
-	ixgbe_init_mac_link_ops_82599(hw);
-	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
-		hw->phy.ops.reset = NULL;
-
-	/* If copper media, overwrite with copper function pointers */
-	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
-		mac->ops.setup_link = ixgbe_setup_copper_link_82599;
-		mac->ops.get_link_capabilities =
-				  ixgbe_get_copper_link_capabilities_generic;
-	}
-
-	/* Set necessary function pointers based on PHY type */
-	switch (hw->phy.type) {
-	case ixgbe_phy_tn:
-		phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
-		phy->ops.check_link = ixgbe_check_phy_link_tnx;
-		phy->ops.get_firmware_version =
-			     ixgbe_get_phy_firmware_version_tnx;
-		break;
-	default:
-		break;
-	}
-init_phy_ops_out:
-	return ret_val;
-}
-
-s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
-{
-	s32 ret_val = IXGBE_SUCCESS;
-	u16 list_offset, data_offset, data_value;
-
-	DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
-
-	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
-		ixgbe_init_mac_link_ops_82599(hw);
-
-		hw->phy.ops.reset = NULL;
-
-		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
-							      &data_offset);
-		if (ret_val != IXGBE_SUCCESS)
-			goto setup_sfp_out;
-
-		/* PHY config will finish before releasing the semaphore */
-		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
-							IXGBE_GSSR_MAC_CSR_SM);
-		if (ret_val != IXGBE_SUCCESS) {
-			ret_val = IXGBE_ERR_SWFW_SYNC;
-			goto setup_sfp_out;
-		}
-
-		if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
-			goto setup_sfp_err;
-		while (data_value != 0xffff) {
-			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
-			IXGBE_WRITE_FLUSH(hw);
-			if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
-				goto setup_sfp_err;
-		}
-
-		/* Release the semaphore */
-		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
-		/* Delay obtaining semaphore again to allow FW access
-		 * prot_autoc_write uses the semaphore too.
-		 */
-		msec_delay(hw->eeprom.semaphore_delay);
-
-		/* Restart DSP and set SFI mode */
-		ret_val = hw->mac.ops.prot_autoc_write(hw,
-			hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
-			false);
-
-		if (ret_val) {
-			DEBUGOUT("sfp module setup not complete\n");
-			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
-			goto setup_sfp_out;
-		}
-
-	}
-
-setup_sfp_out:
-	return ret_val;
-
-setup_sfp_err:
-	/* Release the semaphore */
-	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
-	/* Delay obtaining semaphore again to allow FW access */
-	msec_delay(hw->eeprom.semaphore_delay);
-	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
-		      "eeprom read at offset %d failed", data_offset);
-	return IXGBE_ERR_PHY;
-}
-
-/**
- *  prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
- *  @hw: pointer to hardware structure
- *  @locked: Return the if we locked for this read.
- *  @reg_val: Value we read from AUTOC
- *
- *  For this part (82599) we need to wrap read-modify-writes with a possible
- *  FW/SW lock.  It is assumed this lock will be freed with the next
- *  prot_autoc_write_82599().
- */
-s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
-{
-	s32 ret_val;
-
-	*locked = false;
-	 /* If LESM is on then we need to hold the SW/FW semaphore. */
-	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
-		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
-					IXGBE_GSSR_MAC_CSR_SM);
-		if (ret_val != IXGBE_SUCCESS)
-			return IXGBE_ERR_SWFW_SYNC;
-
-		*locked = true;
-	}
-
-	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-	return IXGBE_SUCCESS;
-}
-
-/**
- * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
- * @hw: pointer to hardware structure
- * @reg_val: value to write to AUTOC
- * @locked: bool to indicate whether the SW/FW lock was already taken by
- *           previous proc_autoc_read_82599.
- *
- * This part (82599) may need to hold the SW/FW lock around all writes to
- * AUTOC. Likewise after a write we need to do a pipeline reset.
- */
-s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
-{
-	s32 ret_val = IXGBE_SUCCESS;
-
-	/* Blocked by MNG FW so bail */
-	if (ixgbe_check_reset_blocked(hw))
-		goto out;
-
-	/* We only need to get the lock if:
-	 *  - We didn't do it already (in the read part of a read-modify-write)
-	 *  - LESM is enabled.
-	 */
-	if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
-		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
-					IXGBE_GSSR_MAC_CSR_SM);
-		if (ret_val != IXGBE_SUCCESS)
-			return IXGBE_ERR_SWFW_SYNC;
-
-		locked = true;
-	}
-
-	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
-	ret_val = ixgbe_reset_pipeline_82599(hw);
-
-out:
-	/* Free the SW/FW semaphore as we either grabbed it here or
-	 * already had it when this function was called.
-	 */
-	if (locked)
-		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_init_ops_82599 - Inits func ptrs and MAC type
- *  @hw: pointer to hardware structure
- *
- *  Initialize the function pointers and assign the MAC type for 82599.
- *  Does not touch the hardware.
- **/
-
-s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
-{
-	struct ixgbe_mac_info *mac = &hw->mac;
-	struct ixgbe_phy_info *phy = &hw->phy;
-	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
-	s32 ret_val;
-
-	DEBUGFUNC("ixgbe_init_ops_82599");
-
-	ixgbe_init_phy_ops_generic(hw);
-	ret_val = ixgbe_init_ops_generic(hw);
-
-	/* PHY */
-	phy->ops.identify = ixgbe_identify_phy_82599;
-	phy->ops.init = ixgbe_init_phy_ops_82599;
-
-	/* MAC */
-	mac->ops.reset_hw = ixgbe_reset_hw_82599;
-	mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
-	mac->ops.get_media_type = ixgbe_get_media_type_82599;
-	mac->ops.get_supported_physical_layer =
-				    ixgbe_get_supported_physical_layer_82599;
-	mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
-	mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
-	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599;
-	mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599;
-	mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599;
-	mac->ops.start_hw = ixgbe_start_hw_82599;
-	mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
-	mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
-	mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
-	mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
-	mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
-	mac->ops.prot_autoc_read = prot_autoc_read_82599;
-	mac->ops.prot_autoc_write = prot_autoc_write_82599;
-
-	/* RAR, Multicast, VLAN */
-	mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
-	mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
-	mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
-	mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
-	mac->rar_highwater = 1;
-	mac->ops.set_vfta = ixgbe_set_vfta_generic;
-	mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
-	mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
-	mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
-	mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599;
-	mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
-	mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
-
-	/* Link */
-	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599;
-	mac->ops.check_link = ixgbe_check_mac_link_generic;
-	mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
-	ixgbe_init_mac_link_ops_82599(hw);
-
-	mac->mcft_size		= IXGBE_82599_MC_TBL_SIZE;
-	mac->vft_size		= IXGBE_82599_VFT_TBL_SIZE;
-	mac->num_rar_entries	= IXGBE_82599_RAR_ENTRIES;
-	mac->rx_pb_size		= IXGBE_82599_RX_PB_SIZE;
-	mac->max_rx_queues	= IXGBE_82599_MAX_RX_QUEUES;
-	mac->max_tx_queues	= IXGBE_82599_MAX_TX_QUEUES;
-	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
-
-	mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
-				   IXGBE_FWSM_MODE_MASK) ? true : false;
-
-	hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
-
-	/* EEPROM */
-	eeprom->ops.read = ixgbe_read_eeprom_82599;
-	eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599;
-
-	/* Manageability interface */
-	mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
-
-	mac->ops.get_thermal_sensor_data =
-					 ixgbe_get_thermal_sensor_data_generic;
-	mac->ops.init_thermal_sensor_thresh =
-				      ixgbe_init_thermal_sensor_thresh_generic;
-
-	mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
- *  @hw: pointer to hardware structure
- *  @speed: pointer to link speed
- *  @autoneg: true when autoneg or autotry is enabled
- *
- *  Determines the link capabilities by reading the AUTOC register.
- **/
-s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
-				      ixgbe_link_speed *speed,
-				      bool *autoneg)
-{
-	s32 status = IXGBE_SUCCESS;
-	u32 autoc = 0;
-
-	DEBUGFUNC("ixgbe_get_link_capabilities_82599");
-
-
-	/* Check if 1G SFP module. */
-	if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
-	    hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
-	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
-	    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
-	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
-	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
-		*speed = IXGBE_LINK_SPEED_1GB_FULL;
-		*autoneg = true;
-		goto out;
-	}
-
-	/*
-	 * Determine link capabilities based on the stored value of AUTOC,
-	 * which represents EEPROM defaults.  If AUTOC value has not
-	 * been stored, use the current register values.
-	 */
-	if (hw->mac.orig_link_settings_stored)
-		autoc = hw->mac.orig_autoc;
-	else
-		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-
-	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
-	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
-		*speed = IXGBE_LINK_SPEED_1GB_FULL;
-		*autoneg = false;
-		break;
-
-	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
-		*speed = IXGBE_LINK_SPEED_10GB_FULL;
-		*autoneg = false;
-		break;
-
-	case IXGBE_AUTOC_LMS_1G_AN:
-		*speed = IXGBE_LINK_SPEED_1GB_FULL;
-		*autoneg = true;
-		break;
-
-	case IXGBE_AUTOC_LMS_10G_SERIAL:
-		*speed = IXGBE_LINK_SPEED_10GB_FULL;
-		*autoneg = false;
-		break;
-
-	case IXGBE_AUTOC_LMS_KX4_KX_KR:
-	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
-		*speed = IXGBE_LINK_SPEED_UNKNOWN;
-		if (autoc & IXGBE_AUTOC_KR_SUPP)
-			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
-		if (autoc & IXGBE_AUTOC_KX4_SUPP)
-			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
-		if (autoc & IXGBE_AUTOC_KX_SUPP)
-			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
-		*autoneg = true;
-		break;
-
-	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
-		*speed = IXGBE_LINK_SPEED_100_FULL;
-		if (autoc & IXGBE_AUTOC_KR_SUPP)
-			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
-		if (autoc & IXGBE_AUTOC_KX4_SUPP)
-			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
-		if (autoc & IXGBE_AUTOC_KX_SUPP)
-			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
-		*autoneg = true;
-		break;
-
-	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
-		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
-		*autoneg = false;
-		break;
-
-	default:
-		status = IXGBE_ERR_LINK_SETUP;
-		goto out;
-		break;
-	}
-
-	if (hw->phy.multispeed_fiber) {
-		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
-			  IXGBE_LINK_SPEED_1GB_FULL;
-
-		/* QSFP must not enable full auto-negotiation
-		 * Limited autoneg is enabled at 1G
-		 */
-		if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
-			*autoneg = false;
-		else
-			*autoneg = true;
-	}
-
-out:
-	return status;
-}
-
-/**
- *  ixgbe_get_media_type_82599 - Get media type
- *  @hw: pointer to hardware structure
- *
- *  Returns the media type (fiber, copper, backplane)
- **/
-enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
-{
-	enum ixgbe_media_type media_type;
-
-	DEBUGFUNC("ixgbe_get_media_type_82599");
-
-	/* Detect if there is a copper PHY attached. */
-	switch (hw->phy.type) {
-	case ixgbe_phy_cu_unknown:
-	case ixgbe_phy_tn:
-		media_type = ixgbe_media_type_copper;
-		goto out;
-	default:
-		break;
-	}
-
-	switch (hw->device_id) {
-	case IXGBE_DEV_ID_82599_KX4:
-	case IXGBE_DEV_ID_82599_KX4_MEZZ:
-	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
-	case IXGBE_DEV_ID_82599_KR:
-	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
-	case IXGBE_DEV_ID_82599_XAUI_LOM:
-		/* Default device ID is mezzanine card KX/KX4 */
-		media_type = ixgbe_media_type_backplane;
-		break;
-	case IXGBE_DEV_ID_82599_SFP:
-	case IXGBE_DEV_ID_82599_SFP_FCOE:
-	case IXGBE_DEV_ID_82599_SFP_EM:
-	case IXGBE_DEV_ID_82599_SFP_SF2:
-	case IXGBE_DEV_ID_82599_SFP_SF_QP:
-	case IXGBE_DEV_ID_82599EN_SFP:
-		media_type = ixgbe_media_type_fiber;
-		break;
-	case IXGBE_DEV_ID_82599_CX4:
-		media_type = ixgbe_media_type_cx4;
-		break;
-	case IXGBE_DEV_ID_82599_T3_LOM:
-		media_type = ixgbe_media_type_copper;
-		break;
-	case IXGBE_DEV_ID_82599_LS:
-		media_type = ixgbe_media_type_fiber_lco;
-		break;
-	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
-		media_type = ixgbe_media_type_fiber_qsfp;
-		break;
-	default:
-		media_type = ixgbe_media_type_unknown;
-		break;
-	}
-out:
-	return media_type;
-}
-
-/**
- *  ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
- *  @hw: pointer to hardware structure
- *
- *  Disables link during D3 power down sequence.
- *
- **/
-void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
-{
-	u32 autoc2_reg;
-	u16 ee_ctrl_2 = 0;
-
-	DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
-	ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
-
-	if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
-	    ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
-		autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
-		autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
-		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
-	}
-}
-
-/**
- *  ixgbe_start_mac_link_82599 - Setup MAC link settings
- *  @hw: pointer to hardware structure
- *  @autoneg_wait_to_complete: true when waiting for completion is needed
- *
- *  Configures link settings based on values in the ixgbe_hw struct.
- *  Restarts the link.  Performs autonegotiation if needed.
- **/
-s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
-			       bool autoneg_wait_to_complete)
-{
-	u32 autoc_reg;
-	u32 links_reg;
-	u32 i;
-	s32 status = IXGBE_SUCCESS;
-	bool got_lock = false;
-
-	DEBUGFUNC("ixgbe_start_mac_link_82599");
-
-
-	/*  reset_pipeline requires us to hold this lock as it writes to
-	 *  AUTOC.
-	 */
-	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
-		status = hw->mac.ops.acquire_swfw_sync(hw,
-						       IXGBE_GSSR_MAC_CSR_SM);
-		if (status != IXGBE_SUCCESS)
-			goto out;
-
-		got_lock = true;
-	}
-
-	/* Restart link */
-	ixgbe_reset_pipeline_82599(hw);
-
-	if (got_lock)
-		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
-
-	/* Only poll for autoneg to complete if specified to do so */
-	if (autoneg_wait_to_complete) {
-		autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
-		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
-		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
-		     IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
-		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
-		     IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
-			links_reg = 0; /* Just in case Autoneg time = 0 */
-			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
-				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
-				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
-					break;
-				msec_delay(100);
-			}
-			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
-				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
-				DEBUGOUT("Autoneg did not complete.\n");
-			}
-		}
-	}
-
-	/* Add delay to filter out noises during initial link setup */
-	msec_delay(50);
-
-out:
-	return status;
-}
-
-/**
- *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
- *  @hw: pointer to hardware structure
- *
- *  The base drivers may require better control over SFP+ module
- *  PHY states.  This includes selectively shutting down the Tx
- *  laser on the PHY, effectively halting physical link.
- **/
-void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
-{
-	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
-
-	/* Blocked by MNG FW so bail */
-	if (ixgbe_check_reset_blocked(hw))
-		return;
-
-	/* Disable Tx laser; allow 100us to go dark per spec */
-	esdp_reg |= IXGBE_ESDP_SDP3;
-	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
-	IXGBE_WRITE_FLUSH(hw);
-	usec_delay(100);
-}
-
-/**
- *  ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
- *  @hw: pointer to hardware structure
- *
- *  The base drivers may require better control over SFP+ module
- *  PHY states.  This includes selectively turning on the Tx
- *  laser on the PHY, effectively starting physical link.
- **/
-void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
-{
-	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
-
-	/* Enable Tx laser; allow 100ms to light up */
-	esdp_reg &= ~IXGBE_ESDP_SDP3;
-	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
-	IXGBE_WRITE_FLUSH(hw);
-	msec_delay(100);
-}
-
-/**
- *  ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
- *  @hw: pointer to hardware structure
- *
- *  When the driver changes the link speeds that it can support,
- *  it sets autotry_restart to true to indicate that we need to
- *  initiate a new autotry session with the link partner.  To do
- *  so, we set the speed then disable and re-enable the Tx laser, to
- *  alert the link partner that it also needs to restart autotry on its
- *  end.  This is consistent with true clause 37 autoneg, which also
- *  involves a loss of signal.
- **/
-void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
-{
-	DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
-
-	/* Blocked by MNG FW so bail */
-	if (ixgbe_check_reset_blocked(hw))
-		return;
-
-	if (hw->mac.autotry_restart) {
-		ixgbe_disable_tx_laser_multispeed_fiber(hw);
-		ixgbe_enable_tx_laser_multispeed_fiber(hw);
-		hw->mac.autotry_restart = false;
-	}
-}
-
-
-/**
- *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
- *  @hw: pointer to hardware structure
- *  @speed: new link speed
- *  @autoneg_wait_to_complete: true when waiting for completion is needed
- *
- *  Set the link speed in the AUTOC register and restarts link.
- **/
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
-				     ixgbe_link_speed speed,
-				     bool autoneg_wait_to_complete)
-{
-	s32 status = IXGBE_SUCCESS;
-	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
-	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
-	u32 speedcnt = 0;
-	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
-	u32 i = 0;
-	bool autoneg, link_up = false;
-
-	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
-
-	/* Mask off requested but non-supported speeds */
-	status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
-	if (status != IXGBE_SUCCESS)
-		return status;
-
-	speed &= link_speed;
-
-	/*
-	 * Try each speed one by one, highest priority first.  We do this in
-	 * software because 10gb fiber doesn't support speed autonegotiation.
-	 */
-	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
-		speedcnt++;
-		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
-
-		/* If we already have link at this speed, just jump out */
-		status = ixgbe_check_link(hw, &link_speed, &link_up, false);
-		if (status != IXGBE_SUCCESS)
-			return status;
-
-		if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
-			goto out;
-
-		/* Set the module link speed */
-		switch (hw->phy.media_type) {
-		case ixgbe_media_type_fiber:
-			esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
-			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
-			IXGBE_WRITE_FLUSH(hw);
-			break;
-		case ixgbe_media_type_fiber_qsfp:
-			/* QSFP module automatically detects MAC link speed */
-			break;
-		default:
-			DEBUGOUT("Unexpected media type.\n");
-			break;
-		}
-
-		/* Allow module to change analog characteristics (1G->10G) */
-		msec_delay(40);
-
-		status = ixgbe_setup_mac_link_82599(hw,
-						    IXGBE_LINK_SPEED_10GB_FULL,
-						    autoneg_wait_to_complete);
-		if (status != IXGBE_SUCCESS)
-			return status;
-
-		/* Flap the tx laser if it has not already been done */
-		ixgbe_flap_tx_laser(hw);
-
-		/*
-		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
-		 * Section 73.10.2, we may have to wait up to 500ms if KR is
-		 * attempted.  82599 uses the same timing for 10g SFI.
-		 */
-		for (i = 0; i < 5; i++) {
-			/* Wait for the link partner to also set speed */
-			msec_delay(100);
-
-			/* If we have link, just jump out */
-			status = ixgbe_check_link(hw, &link_speed,
-						  &link_up, false);
-			if (status != IXGBE_SUCCESS)
-				return status;
-
-			if (link_up)
-				goto out;
-		}
-	}
-
-	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
-		speedcnt++;
-		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
-			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
-
-		/* If we already have link at this speed, just jump out */
-		status = ixgbe_check_link(hw, &link_speed, &link_up, false);
-		if (status != IXGBE_SUCCESS)
-			return status;
-
-		if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
-			goto out;
-
-		/* Set the module link speed */
-		switch (hw->phy.media_type) {
-		case ixgbe_media_type_fiber:
-			esdp_reg &= ~IXGBE_ESDP_SDP5;
-			esdp_reg |= IXGBE_ESDP_SDP5_DIR;
-			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
-			IXGBE_WRITE_FLUSH(hw);
-			break;
-		case ixgbe_media_type_fiber_qsfp:
-			/* QSFP module automatically detects link speed */
-			break;
-		default:
-			DEBUGOUT("Unexpected media type.\n");
-			break;
-		}
-
-		/* Allow module to change analog characteristics (10G->1G) */
-		msec_delay(40);
-
-		status = ixgbe_setup_mac_link_82599(hw,
-						    IXGBE_LINK_SPEED_1GB_FULL,
-						    autoneg_wait_to_complete);
-		if (status != IXGBE_SUCCESS)
-			return status;
-
-		/* Flap the Tx laser if it has not already been done */
-		ixgbe_flap_tx_laser(hw);
-
-		/* Wait for the link partner to also set speed */
-		msec_delay(100);
-
-		/* If we have link, just jump out */
-		status = ixgbe_check_link(hw, &link_speed, &link_up, false);
-		if (status != IXGBE_SUCCESS)
-			return status;
-
-		if (link_up)
-			goto out;
-	}
-
-	/*
-	 * We didn't get link.  Configure back to the highest speed we tried,
-	 * (if there was more than one).  We call ourselves back with just the
-	 * single highest speed that the user requested.
-	 */
-	if (speedcnt > 1)
-		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
-			highest_link_speed, autoneg_wait_to_complete);
-
-out:
-	/* Set autoneg_advertised value based on input link speed */
-	hw->phy.autoneg_advertised = 0;
-
-	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
-		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
-
-	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
-		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
-
-	return status;
-}
-
-/**
- *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
- *  @hw: pointer to hardware structure
- *  @speed: new link speed
- *  @autoneg_wait_to_complete: true when waiting for completion is needed
- *
- *  Implements the Intel SmartSpeed algorithm.
- **/
-s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
-				    ixgbe_link_speed speed,
-				    bool autoneg_wait_to_complete)
-{
-	s32 status = IXGBE_SUCCESS;
-	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
-	s32 i, j;
-	bool link_up = false;
-	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-
-	DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
-
-	 /* Set autoneg_advertised value based on input link speed */
-	hw->phy.autoneg_advertised = 0;
-
-	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
-		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
-
-	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
-		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
-
-	if (speed & IXGBE_LINK_SPEED_100_FULL)
-		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
-
-	/*
-	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
-	 * autoneg advertisement if link is unable to be established at the
-	 * highest negotiated rate.  This can sometimes happen due to integrity
-	 * issues with the physical media connection.
-	 */
-
-	/* First, try to get link with full advertisement */
-	hw->phy.smart_speed_active = false;
-	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
-		status = ixgbe_setup_mac_link_82599(hw, speed,
-						    autoneg_wait_to_complete);
-		if (status != IXGBE_SUCCESS)
-			goto out;
-
-		/*
-		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
-		 * Section 73.10.2, we may have to wait up to 500ms if KR is
-		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
-		 * Table 9 in the AN MAS.
-		 */
-		for (i = 0; i < 5; i++) {
-			msec_delay(100);
-
-			/* If we have link, just jump out */
-			status = ixgbe_check_link(hw, &link_speed, &link_up,
-						  false);
-			if (status != IXGBE_SUCCESS)
-				goto out;
-
-			if (link_up)
-				goto out;
-		}
-	}
-
-	/*
-	 * We didn't get link.  If we advertised KR plus one of KX4/KX
-	 * (or BX4/BX), then disable KR and try again.
-	 */
-	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
-	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
-		goto out;
-
-	/* Turn SmartSpeed on to disable KR support */
-	hw->phy.smart_speed_active = true;
-	status = ixgbe_setup_mac_link_82599(hw, speed,
-					    autoneg_wait_to_complete);
-	if (status != IXGBE_SUCCESS)
-		goto out;
-
-	/*
-	 * Wait for the controller to acquire link.  600ms will allow for
-	 * the AN link_fail_inhibit_timer as well for multiple cycles of
-	 * parallel detect, both 10g and 1g. This allows for the maximum
-	 * connect attempts as defined in the AN MAS table 73-7.
-	 */
-	for (i = 0; i < 6; i++) {
-		msec_delay(100);
-
-		/* If we have link, just jump out */
-		status = ixgbe_check_link(hw, &link_speed, &link_up, false);
-		if (status != IXGBE_SUCCESS)
-			goto out;
-
-		if (link_up)
-			goto out;
-	}
-
-	/* We didn't get link.  Turn SmartSpeed back off. */
-	hw->phy.smart_speed_active = false;
-	status = ixgbe_setup_mac_link_82599(hw, speed,
-					    autoneg_wait_to_complete);
-
-out:
-	if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
-		DEBUGOUT("Smartspeed has downgraded the link speed "
-		"from the maximum advertised\n");
-	return status;
-}
-
-/**
- *  ixgbe_setup_mac_link_82599 - Set MAC link speed
- *  @hw: pointer to hardware structure
- *  @speed: new link speed
- *  @autoneg_wait_to_complete: true when waiting for completion is needed
- *
- *  Set the link speed in the AUTOC register and restarts link.
- **/
-s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
-			       ixgbe_link_speed speed,
-			       bool autoneg_wait_to_complete)
-{
-	bool autoneg = false;
-	s32 status = IXGBE_SUCCESS;
-	u32 pma_pmd_1g, link_mode;
-	u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */
-	u32 orig_autoc = 0; /* holds the cached value of AUTOC register */
-	u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */
-	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
-	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
-	u32 links_reg;
-	u32 i;
-	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
-
-	DEBUGFUNC("ixgbe_setup_mac_link_82599");
-
-	/* Check to see if speed passed in is supported. */
-	status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
-	if (status)
-		goto out;
-
-	speed &= link_capabilities;
-
-	if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
-		status = IXGBE_ERR_LINK_SETUP;
-		goto out;
-	}
-
-	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
-	if (hw->mac.orig_link_settings_stored)
-		orig_autoc = hw->mac.orig_autoc;
-	else
-		orig_autoc = autoc;
-
-	link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
-	pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
-
-	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
-	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
-	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
-		/* Set KX4/KX/KR support according to speed requested */
-		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
-		if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
-			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
-				autoc |= IXGBE_AUTOC_KX4_SUPP;
-			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
-			    (hw->phy.smart_speed_active == false))
-				autoc |= IXGBE_AUTOC_KR_SUPP;
-		}
-		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
-			autoc |= IXGBE_AUTOC_KX_SUPP;
-	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
-		   (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
-		    link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
-		/* Switch from 1G SFI to 10G SFI if requested */
-		if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
-		    (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
-			autoc &= ~IXGBE_AUTOC_LMS_MASK;
-			autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
-		}
-	} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
-		   (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
-		/* Switch from 10G SFI to 1G SFI if requested */
-		if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
-		    (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
-			autoc &= ~IXGBE_AUTOC_LMS_MASK;
-			if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel)
-				autoc |= IXGBE_AUTOC_LMS_1G_AN;
-			else
-				autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
-		}
-	}
-
-	if (autoc != current_autoc) {
-		/* Restart link */
-		status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
-		if (status != IXGBE_SUCCESS)
-			goto out;
-
-		/* Only poll for autoneg to complete if specified to do so */
-		if (autoneg_wait_to_complete) {
-			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
-			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
-			    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
-				links_reg = 0; /*Just in case Autoneg time=0*/
-				for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
-					links_reg =
-					       IXGBE_READ_REG(hw, IXGBE_LINKS);
-					if (links_reg & IXGBE_LINKS_KX_AN_COMP)
-						break;
-					msec_delay(100);
-				}
-				if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
-					status =
-						IXGBE_ERR_AUTONEG_NOT_COMPLETE;
-					DEBUGOUT("Autoneg did not complete.\n");
-				}
-			}
-		}
-
-		/* Add delay to filter out noises during initial link setup */
-		msec_delay(50);
-	}
-
-out:
-	return status;
-}
-
-/**
- *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
- *  @hw: pointer to hardware structure
- *  @speed: new link speed
- *  @autoneg_wait_to_complete: true if waiting is needed to complete
- *
- *  Restarts link on PHY and MAC based on settings passed in.
- **/
-STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
-					 ixgbe_link_speed speed,
-					 bool autoneg_wait_to_complete)
-{
-	s32 status;
-
-	DEBUGFUNC("ixgbe_setup_copper_link_82599");
-
-	/* Setup the PHY according to input speed */
-	status = hw->phy.ops.setup_link_speed(hw, speed,
-					      autoneg_wait_to_complete);
-	/* Set up MAC */
-	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
-
-	return status;
-}
-
-/**
- *  ixgbe_reset_hw_82599 - Perform hardware reset
- *  @hw: pointer to hardware structure
- *
- *  Resets the hardware by resetting the transmit and receive units, masks
- *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
- *  reset.
- **/
-s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
-{
-	ixgbe_link_speed link_speed;
-	s32 status;
-	u32 ctrl = 0;
-	u32 i, autoc, autoc2;
-	u32 curr_lms;
-	bool link_up = false;
-
-	DEBUGFUNC("ixgbe_reset_hw_82599");
-
-	/* Call adapter stop to disable tx/rx and clear interrupts */
-	status = hw->mac.ops.stop_adapter(hw);
-	if (status != IXGBE_SUCCESS)
-		goto reset_hw_out;
-
-	/* flush pending Tx transactions */
-	ixgbe_clear_tx_pending(hw);
-
-	/* PHY ops must be identified and initialized prior to reset */
-
-	/* Identify PHY and related function pointers */
-	status = hw->phy.ops.init(hw);
-
-	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
-		goto reset_hw_out;
-
-	/* Setup SFP module if there is one present. */
-	if (hw->phy.sfp_setup_needed) {
-		status = hw->mac.ops.setup_sfp(hw);
-		hw->phy.sfp_setup_needed = false;
-	}
-
-	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
-		goto reset_hw_out;
-
-	/* Reset PHY */
-	if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
-		hw->phy.ops.reset(hw);
-
-	/* remember AUTOC from before we reset */
-	curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
-
-mac_reset_top:
-	/*
-	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
-	 * If link reset is used when link is up, it might reset the PHY when
-	 * mng is using it.  If link is down or the flag to force full link
-	 * reset is set, then perform link reset.
-	 */
-	ctrl = IXGBE_CTRL_LNK_RST;
-	if (!hw->force_full_reset) {
-		hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
-		if (link_up)
-			ctrl = IXGBE_CTRL_RST;
-	}
-
-	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
-	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
-	IXGBE_WRITE_FLUSH(hw);
-
-	/* Poll for reset bit to self-clear meaning reset is complete */
-	for (i = 0; i < 10; i++) {
-		usec_delay(1);
-		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
-		if (!(ctrl & IXGBE_CTRL_RST_MASK))
-			break;
-	}
-
-	if (ctrl & IXGBE_CTRL_RST_MASK) {
-		status = IXGBE_ERR_RESET_FAILED;
-		DEBUGOUT("Reset polling failed to complete.\n");
-	}
-
-	msec_delay(50);
-
-	/*
-	 * Double resets are required for recovery from certain error
-	 * conditions.  Between resets, it is necessary to stall to
-	 * allow time for any pending HW events to complete.
-	 */
-	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
-		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
-		goto mac_reset_top;
-	}
-
-	/*
-	 * Store the original AUTOC/AUTOC2 values if they have not been
-	 * stored off yet.  Otherwise restore the stored original
-	 * values since the reset operation sets back to defaults.
-	 */
-	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
-
-	/* Enable link if disabled in NVM */
-	if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
-		autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
-		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
-		IXGBE_WRITE_FLUSH(hw);
-	}
-
-	if (hw->mac.orig_link_settings_stored == false) {
-		hw->mac.orig_autoc = autoc;
-		hw->mac.orig_autoc2 = autoc2;
-		hw->mac.orig_link_settings_stored = true;
-	} else {
-
-		/* If MNG FW is running on a multi-speed device that
-		 * doesn't autoneg with out driver support we need to
-		 * leave LMS in the state it was before we MAC reset.
-		 * Likewise if we support WoL we don't want change the
-		 * LMS state.
-		 */
-		if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
-		    hw->wol_enabled)
-			hw->mac.orig_autoc =
-				(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
-				curr_lms;
-
-		if (autoc != hw->mac.orig_autoc) {
-			status = hw->mac.ops.prot_autoc_write(hw,
-							hw->mac.orig_autoc,
-							false);
-			if (status != IXGBE_SUCCESS)
-				goto reset_hw_out;
-		}
-
-		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
-		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
-			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
-			autoc2 |= (hw->mac.orig_autoc2 &
-				   IXGBE_AUTOC2_UPPER_MASK);
-			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
-		}
-	}
-
-	/* Store the permanent mac address */
-	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
-
-	/*
-	 * Store MAC address from RAR0, clear receive address registers, and
-	 * clear the multicast table.  Also reset num_rar_entries to 128,
-	 * since we modify this value when programming the SAN MAC address.
-	 */
-	hw->mac.num_rar_entries = 128;
-	hw->mac.ops.init_rx_addrs(hw);
-
-	/* Store the permanent SAN mac address */
-	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
-
-	/* Add the SAN MAC address to the RAR only if it's a valid address */
-	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
-		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
-				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
-
-		/* Save the SAN MAC RAR index */
-		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
-
-		/* Reserve the last RAR for the SAN MAC address */
-		hw->mac.num_rar_entries--;
-	}
-
-	/* Store the alternative WWNN/WWPN prefix */
-	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
-				   &hw->mac.wwpn_prefix);
-
-reset_hw_out:
-	return status;
-}
-
-/**
- * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
- * @hw: pointer to hardware structure
- * @fdircmd: current value of FDIRCMD register
- */
-STATIC s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
-{
-	int i;
-
-	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
-		*fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
-		if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
-			return IXGBE_SUCCESS;
-		usec_delay(10);
-	}
-
-	return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
-}
-
-/**
- *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
- *  @hw: pointer to hardware structure
- **/
-s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
-{
-	s32 err;
-	int i;
-	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
-	u32 fdircmd;
-	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
-
-	DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
-
-	/*
-	 * Before starting reinitialization process,
-	 * FDIRCMD.CMD must be zero.
-	 */
-	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
-	if (err) {
-		DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
-		return err;
-	}
-
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
-	IXGBE_WRITE_FLUSH(hw);
-	/*
-	 * 82599 adapters flow director init flow cannot be restarted,
-	 * Workaround 82599 silicon errata by performing the following steps
-	 * before re-writing the FDIRCTRL control register with the same value.
-	 * - write 1 to bit 8 of FDIRCMD register &
-	 * - write 0 to bit 8 of FDIRCMD register
-	 */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
-			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
-			 IXGBE_FDIRCMD_CLEARHT));
-	IXGBE_WRITE_FLUSH(hw);
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
-			(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
-			 ~IXGBE_FDIRCMD_CLEARHT));
-	IXGBE_WRITE_FLUSH(hw);
-	/*
-	 * Clear FDIR Hash register to clear any leftover hashes
-	 * waiting to be programmed.
-	 */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
-	IXGBE_WRITE_FLUSH(hw);
-
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
-	IXGBE_WRITE_FLUSH(hw);
-
-	/* Poll init-done after we write FDIRCTRL register */
-	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
-		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
-				   IXGBE_FDIRCTRL_INIT_DONE)
-			break;
-		msec_delay(1);
-	}
-	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
-		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
-		return IXGBE_ERR_FDIR_REINIT_FAILED;
-	}
-
-	/* Clear FDIR statistics registers (read to clear) */
-	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
-	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
-	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
-	IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
-	IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
- *  @hw: pointer to hardware structure
- *  @fdirctrl: value to write to flow director control register
- **/
-STATIC void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
-{
-	int i;
-
-	DEBUGFUNC("ixgbe_fdir_enable_82599");
-
-	/* Prime the keys for hashing */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
-
-	/*
-	 * Poll init-done after we write the register.  Estimated times:
-	 *      10G: PBALLOC = 11b, timing is 60us
-	 *       1G: PBALLOC = 11b, timing is 600us
-	 *     100M: PBALLOC = 11b, timing is 6ms
-	 *
-	 *     Multiple these timings by 4 if under full Rx load
-	 *
-	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
-	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
-	 * this might not finish in our poll time, but we can live with that
-	 * for now.
-	 */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
-	IXGBE_WRITE_FLUSH(hw);
-	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
-		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
-				   IXGBE_FDIRCTRL_INIT_DONE)
-			break;
-		msec_delay(1);
-	}
-
-	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
-		DEBUGOUT("Flow Director poll time exceeded!\n");
-}
-
-/**
- *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
- *  @hw: pointer to hardware structure
- *  @fdirctrl: value to write to flow director control register, initially
- *	     contains just the value of the Rx packet buffer allocation
- **/
-s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
-{
-	DEBUGFUNC("ixgbe_init_fdir_signature_82599");
-
-	/*
-	 * Continue setup of fdirctrl register bits:
-	 *  Move the flexible bytes to use the ethertype - shift 6 words
-	 *  Set the maximum length per hash bucket to 0xA filters
-	 *  Send interrupt when 64 filters are left
-	 */
-	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
-		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
-		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
-
-	/* write hashes and fdirctrl register, poll for completion */
-	ixgbe_fdir_enable_82599(hw, fdirctrl);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
- *  @hw: pointer to hardware structure
- *  @fdirctrl: value to write to flow director control register, initially
- *	     contains just the value of the Rx packet buffer allocation
- *  @cloud_mode: true - cloud mode, false - other mode
- **/
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
-			bool cloud_mode)
-{
-	DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
-
-	/*
-	 * Continue setup of fdirctrl register bits:
-	 *  Turn perfect match filtering on
-	 *  Report hash in RSS field of Rx wb descriptor
-	 *  Initialize the drop queue
-	 *  Move the flexible bytes to use the ethertype - shift 6 words
-	 *  Set the maximum length per hash bucket to 0xA filters
-	 *  Send interrupt when 64 (0x4 * 16) filters are left
-	 */
-	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
-		    IXGBE_FDIRCTRL_REPORT_STATUS |
-		    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
-		    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
-		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
-		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
-
-	if (cloud_mode)
-		fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD <<
-					IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
-
-	/* write hashes and fdirctrl register, poll for completion */
-	ixgbe_fdir_enable_82599(hw, fdirctrl);
-
-	return IXGBE_SUCCESS;
-}
-
-/*
- * These defines allow us to quickly generate all of the necessary instructions
- * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
- * for values 0 through 15
- */
-#define IXGBE_ATR_COMMON_HASH_KEY \
-		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
-#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
-do { \
-	u32 n = (_n); \
-	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
-		common_hash ^= lo_hash_dword >> n; \
-	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
-		bucket_hash ^= lo_hash_dword >> n; \
-	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
-		sig_hash ^= lo_hash_dword << (16 - n); \
-	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
-		common_hash ^= hi_hash_dword >> n; \
-	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
-		bucket_hash ^= hi_hash_dword >> n; \
-	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
-		sig_hash ^= hi_hash_dword << (16 - n); \
-} while (0)
-
-/**
- *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
- *  @stream: input bitstream to compute the hash on
- *
- *  This function is almost identical to the function above but contains
- *  several optimizations such as unwinding all of the loops, letting the
- *  compiler work out all of the conditional ifs since the keys are static
- *  defines, and computing two keys at once since the hashed dword stream
- *  will be the same for both keys.
- **/
-u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
-				     union ixgbe_atr_hash_dword common)
-{
-	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
-	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
-
-	/* record the flow_vm_vlan bits as they are a key part to the hash */
-	flow_vm_vlan = IXGBE_NTOHL(input.dword);
-
-	/* generate common hash dword */
-	hi_hash_dword = IXGBE_NTOHL(common.dword);
-
-	/* low dword is word swapped version of common */
-	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
-
-	/* apply flow ID/VM pool/VLAN ID bits to hash words */
-	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
-
-	/* Process bits 0 and 16 */
-	IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
-
-	/*
-	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
-	 * delay this because bit 0 of the stream should not be processed
-	 * so we do not add the VLAN until after bit 0 was processed
-	 */
-	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
-
-	/* Process remaining 30 bit of the key */
-	IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
-	IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
-	IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
-	IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
-	IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
-	IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
-	IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
-	IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
-	IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
-	IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
-	IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
-	IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
-	IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
-	IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
-	IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
-
-	/* combine common_hash result with signature and bucket hashes */
-	bucket_hash ^= common_hash;
-	bucket_hash &= IXGBE_ATR_HASH_MASK;
-
-	sig_hash ^= common_hash << 16;
-	sig_hash &= IXGBE_ATR_HASH_MASK << 16;
-
-	/* return completed signature hash */
-	return sig_hash ^ bucket_hash;
-}
-
-/**
- *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
- *  @hw: pointer to hardware structure
- *  @input: unique input dword
- *  @common: compressed common input dword
- *  @queue: queue index to direct traffic to
- *
- * Note that the tunnel bit in input must not be set when the hardware
- * tunneling support does not exist.
- **/
-s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
-					  union ixgbe_atr_hash_dword input,
-					  union ixgbe_atr_hash_dword common,
-					  u8 queue)
-{
-	u64 fdirhashcmd;
-	u8 flow_type;
-	bool tunnel;
-	u32 fdircmd;
-	s32 err;
-
-	DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
-
-	/*
-	 * Get the flow_type in order to program FDIRCMD properly
-	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
-	 * fifth is FDIRCMD.TUNNEL_FILTER
-	 */
-	tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
-	flow_type = input.formatted.flow_type &
-		    (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
-	switch (flow_type) {
-	case IXGBE_ATR_FLOW_TYPE_TCPV4:
-	case IXGBE_ATR_FLOW_TYPE_UDPV4:
-	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
-	case IXGBE_ATR_FLOW_TYPE_TCPV6:
-	case IXGBE_ATR_FLOW_TYPE_UDPV6:
-	case IXGBE_ATR_FLOW_TYPE_SCTPV6:
-		break;
-	default:
-		DEBUGOUT(" Error on flow type input\n");
-		return IXGBE_ERR_CONFIG;
-	}
-
-	/* configure FDIRCMD register */
-	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
-		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
-	fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
-	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
-	if (tunnel)
-		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
-
-	/*
-	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
-	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
-	 */
-	fdirhashcmd = (u64)fdircmd << 32;
-	fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
-	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
-
-	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
-	if (err) {
-		DEBUGOUT("Flow Director command did not complete!\n");
-		return err;
-	}
-
-	DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
-
-	return IXGBE_SUCCESS;
-}
-
-#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
-do { \
-	u32 n = (_n); \
-	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
-		bucket_hash ^= lo_hash_dword >> n; \
-	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
-		bucket_hash ^= hi_hash_dword >> n; \
-} while (0)
-
-/**
- *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
- *  @atr_input: input bitstream to compute the hash on
- *  @input_mask: mask for the input bitstream
- *
- *  This function serves two main purposes.  First it applies the input_mask
- *  to the atr_input resulting in a cleaned up atr_input data stream.
- *  Secondly it computes the hash and stores it in the bkt_hash field at
- *  the end of the input byte stream.  This way it will be available for
- *  future use without needing to recompute the hash.
- **/
-void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
-					  union ixgbe_atr_input *input_mask)
-{
-
-	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
-	u32 bucket_hash = 0;
-	u32 hi_dword = 0;
-	u32 i = 0;
-
-	/* Apply masks to input data */
-	for (i = 0; i < 14; i++)
-		input->dword_stream[i]  &= input_mask->dword_stream[i];
-
-	/* record the flow_vm_vlan bits as they are a key part to the hash */
-	flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
-
-	/* generate common hash dword */
-	for (i = 1; i <= 13; i++)
-		hi_dword ^= input->dword_stream[i];
-	hi_hash_dword = IXGBE_NTOHL(hi_dword);
-
-	/* low dword is word swapped version of common */
-	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
-
-	/* apply flow ID/VM pool/VLAN ID bits to hash words */
-	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
-
-	/* Process bits 0 and 16 */
-	IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
-
-	/*
-	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
-	 * delay this because bit 0 of the stream should not be processed
-	 * so we do not add the VLAN until after bit 0 was processed
-	 */
-	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
-
-	/* Process remaining 30 bit of the key */
-	for (i = 1; i <= 15; i++)
-		IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
-
-	/*
-	 * Limit hash to 13 bits since max bucket count is 8K.
-	 * Store result at the end of the input stream.
-	 */
-	input->formatted.bkt_hash = bucket_hash & 0x1FFF;
-}
-
-/**
- *  ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks
- *  @input_mask: mask to be bit swapped
- *
- *  The source and destination port masks for flow director are bit swapped
- *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
- *  generate a correctly swapped value we need to bit swap the mask and that
- *  is what is accomplished by this function.
- **/
-STATIC u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
-{
-	u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
-	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
-	mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
-	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
-	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
-	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
-	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
-}
-
-/*
- * These two macros are meant to address the fact that we have registers
- * that are either all or in part big-endian.  As a result on big-endian
- * systems we will end up byte swapping the value to little-endian before
- * it is byte swapped again and written to the hardware in the original
- * big-endian format.
- */
-#define IXGBE_STORE_AS_BE32(_value) \
-	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
-	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
-
-#define IXGBE_WRITE_REG_BE32(a, reg, value) \
-	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
-
-#define IXGBE_STORE_AS_BE16(_value) \
-	IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
-
-s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
-				    union ixgbe_atr_input *input_mask, bool cloud_mode)
-{
-	/* mask IPv6 since it is currently not supported */
-	u32 fdirm = IXGBE_FDIRM_DIPv6;
-	u32 fdirtcpm;
-	u32 fdirip6m;
-	DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
-
-	/*
-	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
-	 * are zero, then assume a full mask for that field.  Also assume that
-	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
-	 * cannot be masked out in this implementation.
-	 *
-	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
-	 * point in time.
-	 */
-
-	/* verify bucket hash is cleared on hash generation */
-	if (input_mask->formatted.bkt_hash)
-		DEBUGOUT(" bucket hash should always be 0 in mask\n");
-
-	/* Program FDIRM and verify partial masks */
-	switch (input_mask->formatted.vm_pool & 0x7F) {
-	case 0x0:
-		fdirm |= IXGBE_FDIRM_POOL;
-	case 0x7F:
-		break;
-	default:
-		DEBUGOUT(" Error on vm pool mask\n");
-		return IXGBE_ERR_CONFIG;
-	}
-
-	switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
-	case 0x0:
-		fdirm |= IXGBE_FDIRM_L4P;
-		if (input_mask->formatted.dst_port ||
-		    input_mask->formatted.src_port) {
-			DEBUGOUT(" Error on src/dst port mask\n");
-			return IXGBE_ERR_CONFIG;
-		}
-	case IXGBE_ATR_L4TYPE_MASK:
-		break;
-	default:
-		DEBUGOUT(" Error on flow type mask\n");
-		return IXGBE_ERR_CONFIG;
-	}
-
-	switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
-	case 0x0000:
-		/* mask VLAN ID, fall through to mask VLAN priority */
-		fdirm |= IXGBE_FDIRM_VLANID;
-	case 0x0FFF:
-		/* mask VLAN priority */
-		fdirm |= IXGBE_FDIRM_VLANP;
-		break;
-	case 0xE000:
-		/* mask VLAN ID only, fall through */
-		fdirm |= IXGBE_FDIRM_VLANID;
-	case 0xEFFF:
-		/* no VLAN fields masked */
-		break;
-	default:
-		DEBUGOUT(" Error on VLAN mask\n");
-		return IXGBE_ERR_CONFIG;
-	}
-
-	switch (input_mask->formatted.flex_bytes & 0xFFFF) {
-	case 0x0000:
-		/* Mask Flex Bytes, fall through */
-		fdirm |= IXGBE_FDIRM_FLEX;
-	case 0xFFFF:
-		break;
-	default:
-		DEBUGOUT(" Error on flexible byte mask\n");
-		return IXGBE_ERR_CONFIG;
-	}
-
-	if (cloud_mode) {
-		fdirm |= IXGBE_FDIRM_L3P;
-		fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
-		fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
-
-		switch (input_mask->formatted.inner_mac[0] & 0xFF) {
-		case 0x00:
-			/* Mask inner MAC, fall through */
-			fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC;
-		case 0xFF:
-			break;
-		default:
-			DEBUGOUT(" Error on inner_mac byte mask\n");
-			return IXGBE_ERR_CONFIG;
-		}
-
-		switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) {
-		case 0x0:
-			/* Mask vxlan id */
-			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI;
-			break;
-		case 0x00FFFFFF:
-			fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
-			break;
-		case 0xFFFFFFFF:
-			break;
-		default:
-			DEBUGOUT(" Error on TNI/VNI byte mask\n");
-			return IXGBE_ERR_CONFIG;
-		}
-
-		switch (input_mask->formatted.tunnel_type & 0xFFFF) {
-		case 0x0:
-			/* Mask turnnel type, fall through */
-			fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
-		case 0xFFFF:
-			break;
-		default:
-			DEBUGOUT(" Error on tunnel type byte mask\n");
-			return IXGBE_ERR_CONFIG;
-		}
-		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m);
-
-		/* Set all bits in FDIRSIP4M and FDIRDIP4M cloud mode */
-		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
-		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
-	}
-
-	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
-
-	if (!cloud_mode) {
-		/* store the TCP/UDP port masks, bit reversed from port
-		 * layout */
-		fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
-
-		/* write both the same so that UDP and TCP use the same mask */
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
-		/* also use it for SCTP */
-		switch (hw->mac.type) {
-		case ixgbe_mac_X550:
-		case ixgbe_mac_X550EM_x:
-			IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
-			break;
-		default:
-			break;
-		}
-
-		/* store source and destination IP masks (big-enian) */
-		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
-				     ~input_mask->formatted.src_ip[0]);
-		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
-				     ~input_mask->formatted.dst_ip[0]);
-	}
-	return IXGBE_SUCCESS;
-}
-
-s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
-					  union ixgbe_atr_input *input,
-					  u16 soft_id, u8 queue, bool cloud_mode)
-{
-	u32 fdirport, fdirvlan, fdirhash, fdircmd;
-	u32 addr_low, addr_high;
-	u32 cloud_type = 0;
-	s32 err;
-
-	DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
-	if (!cloud_mode) {
-		/* currently IPv6 is not supported, must be programmed with 0 */
-		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
-				     input->formatted.src_ip[0]);
-		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
-				     input->formatted.src_ip[1]);
-		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
-				     input->formatted.src_ip[2]);
-
-		/* record the source address (big-endian) */
-		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA,
-			input->formatted.src_ip[0]);
-
-		/* record the first 32 bits of the destination address
-		 * (big-endian) */
-		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA,
-			input->formatted.dst_ip[0]);
-
-		/* record source and destination port (little-endian)*/
-		fdirport = IXGBE_NTOHS(input->formatted.dst_port);
-		fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
-		fdirport |= IXGBE_NTOHS(input->formatted.src_port);
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
-	}
-
-	/* record VLAN (little-endian) and flex_bytes(big-endian) */
-	fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
-	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
-	fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
-
-	if (cloud_mode) {
-		if (input->formatted.tunnel_type != 0)
-			cloud_type = 0x80000000;
-
-		addr_low = ((u32)input->formatted.inner_mac[0] |
-				((u32)input->formatted.inner_mac[1] << 8) |
-				((u32)input->formatted.inner_mac[2] << 16) |
-				((u32)input->formatted.inner_mac[3] << 24));
-		addr_high = ((u32)input->formatted.inner_mac[4] |
-				((u32)input->formatted.inner_mac[5] << 8));
-		cloud_type |= addr_high;
-		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low);
-		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type);
-		IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni);
-	}
-
-	/* configure FDIRHASH register */
-	fdirhash = input->formatted.bkt_hash;
-	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
-
-	/*
-	 * flush all previous writes to make certain registers are
-	 * programmed prior to issuing the command
-	 */
-	IXGBE_WRITE_FLUSH(hw);
-
-	/* configure FDIRCMD register */
-	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
-		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
-	if (queue == IXGBE_FDIR_DROP_QUEUE)
-		fdircmd |= IXGBE_FDIRCMD_DROP;
-	if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK)
-		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
-	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
-	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
-	fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
-
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
-	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
-	if (err) {
-		DEBUGOUT("Flow Director command did not complete!\n");
-		return err;
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
-					  union ixgbe_atr_input *input,
-					  u16 soft_id)
-{
-	u32 fdirhash;
-	u32 fdircmd;
-	s32 err;
-
-	/* configure FDIRHASH register */
-	fdirhash = input->formatted.bkt_hash;
-	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
-
-	/* flush hash to HW */
-	IXGBE_WRITE_FLUSH(hw);
-
-	/* Query if filter is present */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
-
-	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
-	if (err) {
-		DEBUGOUT("Flow Director command did not complete!\n");
-		return err;
-	}
-
-	/* if filter exists in hardware then remove it */
-	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
-		IXGBE_WRITE_FLUSH(hw);
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
-				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
- *  @hw: pointer to hardware structure
- *  @input: input bitstream
- *  @input_mask: mask for the input bitstream
- *  @soft_id: software index for the filters
- *  @queue: queue index to direct traffic to
- *
- *  Note that the caller to this function must lock before calling, since the
- *  hardware writes must be protected from one another.
- **/
-s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
-					union ixgbe_atr_input *input,
-					union ixgbe_atr_input *input_mask,
-					u16 soft_id, u8 queue, bool cloud_mode)
-{
-	s32 err = IXGBE_ERR_CONFIG;
-
-	DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
-
-	/*
-	 * Check flow_type formatting, and bail out before we touch the hardware
-	 * if there's a configuration issue
-	 */
-	switch (input->formatted.flow_type) {
-	case IXGBE_ATR_FLOW_TYPE_IPV4:
-	case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
-		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
-		if (input->formatted.dst_port || input->formatted.src_port) {
-			DEBUGOUT(" Error on src/dst port\n");
-			return IXGBE_ERR_CONFIG;
-		}
-		break;
-	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
-	case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
-		if (input->formatted.dst_port || input->formatted.src_port) {
-			DEBUGOUT(" Error on src/dst port\n");
-			return IXGBE_ERR_CONFIG;
-		}
-	case IXGBE_ATR_FLOW_TYPE_TCPV4:
-	case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
-	case IXGBE_ATR_FLOW_TYPE_UDPV4:
-	case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
-		input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
-						  IXGBE_ATR_L4TYPE_MASK;
-		break;
-	default:
-		DEBUGOUT(" Error on flow type input\n");
-		return err;
-	}
-
-	/* program input mask into the HW */
-	err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode);
-	if (err)
-		return err;
-
-	/* apply mask and compute/store hash */
-	ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
-
-	/* program filters to filter memory */
-	return ixgbe_fdir_write_perfect_filter_82599(hw, input,
-						     soft_id, queue, cloud_mode);
-}
-
-/**
- *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
- *  @hw: pointer to hardware structure
- *  @reg: analog register to read
- *  @val: read value
- *
- *  Performs read operation to Omer analog register specified.
- **/
-s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
-{
-	u32  core_ctl;
-
-	DEBUGFUNC("ixgbe_read_analog_reg8_82599");
-
-	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
-			(reg << 8));
-	IXGBE_WRITE_FLUSH(hw);
-	usec_delay(10);
-	core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
-	*val = (u8)core_ctl;
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
- *  @hw: pointer to hardware structure
- *  @reg: atlas register to write
- *  @val: value to write
- *
- *  Performs write operation to Omer analog register specified.
- **/
-s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
-{
-	u32  core_ctl;
-
-	DEBUGFUNC("ixgbe_write_analog_reg8_82599");
-
-	core_ctl = (reg << 8) | val;
-	IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
-	IXGBE_WRITE_FLUSH(hw);
-	usec_delay(10);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
- *  @hw: pointer to hardware structure
- *
- *  Starts the hardware using the generic start_hw function
- *  and the generation start_hw function.
- *  Then performs revision-specific operations, if any.
- **/
-s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
-{
-	s32 ret_val = IXGBE_SUCCESS;
-
-	DEBUGFUNC("ixgbe_start_hw_82599");
-
-	ret_val = ixgbe_start_hw_generic(hw);
-	if (ret_val != IXGBE_SUCCESS)
-		goto out;
-
-	ret_val = ixgbe_start_hw_gen2(hw);
-	if (ret_val != IXGBE_SUCCESS)
-		goto out;
-
-	/* We need to run link autotry after the driver loads */
-	hw->mac.autotry_restart = true;
-
-	if (ret_val == IXGBE_SUCCESS)
-		ret_val = ixgbe_verify_fw_version_82599(hw);
-out:
-	return ret_val;
-}
-
-/**
- *  ixgbe_identify_phy_82599 - Get physical layer module
- *  @hw: pointer to hardware structure
- *
- *  Determines the physical layer module found on the current adapter.
- *  If PHY already detected, maintains current PHY type in hw struct,
- *  otherwise executes the PHY detection routine.
- **/
-s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
-{
-	s32 status;
-
-	DEBUGFUNC("ixgbe_identify_phy_82599");
-
-	/* Detect PHY if not unknown - returns success if already detected. */
-	status = ixgbe_identify_phy_generic(hw);
-	if (status != IXGBE_SUCCESS) {
-		/* 82599 10GBASE-T requires an external PHY */
-		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
-			return status;
-		else
-			status = ixgbe_identify_module_generic(hw);
-	}
-
-	/* Set PHY type none if no PHY detected */
-	if (hw->phy.type == ixgbe_phy_unknown) {
-		hw->phy.type = ixgbe_phy_none;
-		return IXGBE_SUCCESS;
-	}
-
-	/* Return error if SFP module has been detected but is not supported */
-	if (hw->phy.type == ixgbe_phy_sfp_unsupported)
-		return IXGBE_ERR_SFP_NOT_SUPPORTED;
-
-	return status;
-}
-
-/**
- *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
- *  @hw: pointer to hardware structure
- *
- *  Determines physical layer capabilities of the current configuration.
- **/
-u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
-{
-	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
-	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
-	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
-	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
-	u16 ext_ability = 0;
-
-	DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
-
-	hw->phy.ops.identify(hw);
-
-	switch (hw->phy.type) {
-	case ixgbe_phy_tn:
-	case ixgbe_phy_cu_unknown:
-		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
-		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
-		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
-		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
-		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
-		goto out;
-	default:
-		break;
-	}
-
-	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
-	case IXGBE_AUTOC_LMS_1G_AN:
-	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
-		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
-			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
-			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
-			goto out;
-		} else
-			/* SFI mode so read SFP module */
-			goto sfp_check;
-		break;
-	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
-		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
-		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
-		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
-		goto out;
-		break;
-	case IXGBE_AUTOC_LMS_10G_SERIAL:
-		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
-			goto out;
-		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
-			goto sfp_check;
-		break;
-	case IXGBE_AUTOC_LMS_KX4_KX_KR:
-	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
-		if (autoc & IXGBE_AUTOC_KX_SUPP)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
-		if (autoc & IXGBE_AUTOC_KX4_SUPP)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
-		if (autoc & IXGBE_AUTOC_KR_SUPP)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
-		goto out;
-		break;
-	default:
-		goto out;
-		break;
-	}
-
-sfp_check:
-	/* SFP check must be done last since DA modules are sometimes used to
-	 * test KR mode -  we need to id KR mode correctly before SFP module.
-	 * Call identify_sfp because the pluggable module may have changed */
-	physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
-out:
-	return physical_layer;
-}
-
-/**
- *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
- *  @hw: pointer to hardware structure
- *  @regval: register value to write to RXCTRL
- *
- *  Enables the Rx DMA unit for 82599
- **/
-s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
-{
-
-	DEBUGFUNC("ixgbe_enable_rx_dma_82599");
-
-	/*
-	 * Workaround for 82599 silicon errata when enabling the Rx datapath.
-	 * If traffic is incoming before we enable the Rx unit, it could hang
-	 * the Rx DMA unit.  Therefore, make sure the security engine is
-	 * completely disabled prior to enabling the Rx unit.
-	 */
-
-	hw->mac.ops.disable_sec_rx_path(hw);
-
-	if (regval & IXGBE_RXCTRL_RXEN)
-		ixgbe_enable_rx(hw);
-	else
-		ixgbe_disable_rx(hw);
-
-	hw->mac.ops.enable_sec_rx_path(hw);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_verify_fw_version_82599 - verify FW version for 82599
- *  @hw: pointer to hardware structure
- *
- *  Verifies that installed the firmware version is 0.6 or higher
- *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
- *
- *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
- *  if the FW version is not supported.
- **/
-STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
-{
-	s32 status = IXGBE_ERR_EEPROM_VERSION;
-	u16 fw_offset, fw_ptp_cfg_offset;
-	u16 fw_version;
-
-	DEBUGFUNC("ixgbe_verify_fw_version_82599");
-
-	/* firmware check is only necessary for SFI devices */
-	if (hw->phy.media_type != ixgbe_media_type_fiber) {
-		status = IXGBE_SUCCESS;
-		goto fw_version_out;
-	}
-
-	/* get the offset to the Firmware Module block */
-	if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
-		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
-			      "eeprom read at offset %d failed", IXGBE_FW_PTR);
-		return IXGBE_ERR_EEPROM_VERSION;
-	}
-
-	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
-		goto fw_version_out;
-
-	/* get the offset to the Pass Through Patch Configuration block */
-	if (hw->eeprom.ops.read(hw, (fw_offset +
-				 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
-				 &fw_ptp_cfg_offset)) {
-		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
-			      "eeprom read at offset %d failed",
-			      fw_offset +
-			      IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
-		return IXGBE_ERR_EEPROM_VERSION;
-	}
-
-	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
-		goto fw_version_out;
-
-	/* get the firmware version */
-	if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
-			    IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
-		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
-			      "eeprom read at offset %d failed",
-			      fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
-		return IXGBE_ERR_EEPROM_VERSION;
-	}
-
-	if (fw_version > 0x5)
-		status = IXGBE_SUCCESS;
-
-fw_version_out:
-	return status;
-}
-
-/**
- *  ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
- *  @hw: pointer to hardware structure
- *
- *  Returns true if the LESM FW module is present and enabled. Otherwise
- *  returns false. Smart Speed must be disabled if LESM FW module is enabled.
- **/
-bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
-{
-	bool lesm_enabled = false;
-	u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
-	s32 status;
-
-	DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
-
-	/* get the offset to the Firmware Module block */
-	status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
-
-	if ((status != IXGBE_SUCCESS) ||
-	    (fw_offset == 0) || (fw_offset == 0xFFFF))
-		goto out;
-
-	/* get the offset to the LESM Parameters block */
-	status = hw->eeprom.ops.read(hw, (fw_offset +
-				     IXGBE_FW_LESM_PARAMETERS_PTR),
-				     &fw_lesm_param_offset);
-
-	if ((status != IXGBE_SUCCESS) ||
-	    (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
-		goto out;
-
-	/* get the LESM state word */
-	status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
-				     IXGBE_FW_LESM_STATE_1),
-				     &fw_lesm_state);
-
-	if ((status == IXGBE_SUCCESS) &&
-	    (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
-		lesm_enabled = true;
-
-out:
-	return lesm_enabled;
-}
-
-/**
- *  ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
- *  fastest available method
- *
- *  @hw: pointer to hardware structure
- *  @offset: offset of  word in EEPROM to read
- *  @words: number of words
- *  @data: word(s) read from the EEPROM
- *
- *  Retrieves 16 bit word(s) read from EEPROM
- **/
-STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
-					  u16 words, u16 *data)
-{
-	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
-	s32 ret_val = IXGBE_ERR_CONFIG;
-
-	DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
-
-	/*
-	 * If EEPROM is detected and can be addressed using 14 bits,
-	 * use EERD otherwise use bit bang
-	 */
-	if ((eeprom->type == ixgbe_eeprom_spi) &&
-	    (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
-		ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
-							 data);
-	else
-		ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
-								    words,
-								    data);
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_read_eeprom_82599 - Read EEPROM word using
- *  fastest available method
- *
- *  @hw: pointer to hardware structure
- *  @offset: offset of  word in the EEPROM to read
- *  @data: word read from the EEPROM
- *
- *  Reads a 16 bit word from the EEPROM
- **/
-STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
-				   u16 offset, u16 *data)
-{
-	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
-	s32 ret_val = IXGBE_ERR_CONFIG;
-
-	DEBUGFUNC("ixgbe_read_eeprom_82599");
-
-	/*
-	 * If EEPROM is detected and can be addressed using 14 bits,
-	 * use EERD otherwise use bit bang
-	 */
-	if ((eeprom->type == ixgbe_eeprom_spi) &&
-	    (offset <= IXGBE_EERD_MAX_ADDR))
-		ret_val = ixgbe_read_eerd_generic(hw, offset, data);
-	else
-		ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
-
-	return ret_val;
-}
-
-/**
- * ixgbe_reset_pipeline_82599 - perform pipeline reset
- *
- *  @hw: pointer to hardware structure
- *
- * Reset pipeline by asserting Restart_AN together with LMS change to ensure
- * full pipeline reset.  This function assumes the SW/FW lock is held.
- **/
-s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
-{
-	s32 ret_val;
-	u32 anlp1_reg = 0;
-	u32 i, autoc_reg, autoc2_reg;
-
-	/* Enable link if disabled in NVM */
-	autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
-	if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
-		autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
-		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
-		IXGBE_WRITE_FLUSH(hw);
-	}
-
-	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
-	/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
-	IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
-			autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
-	/* Wait for AN to leave state 0 */
-	for (i = 0; i < 10; i++) {
-		msec_delay(4);
-		anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
-		if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
-			break;
-	}
-
-	if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
-		DEBUGOUT("auto negotiation not completed\n");
-		ret_val = IXGBE_ERR_RESET_FAILED;
-		goto reset_pipeline_out;
-	}
-
-	ret_val = IXGBE_SUCCESS;
-
-reset_pipeline_out:
-	/* Write AUTOC register with original LMS field and Restart_AN */
-	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
-	IXGBE_WRITE_FLUSH(hw);
-
-	return ret_val;
-}
-
-
-/**
- *  ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
- *  @hw: pointer to hardware structure
- *  @byte_offset: byte offset to read
- *  @data: value read
- *
- *  Performs byte read operation to SFP module's EEPROM over I2C interface at
- *  a specified device address.
- **/
-STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
-				u8 dev_addr, u8 *data)
-{
-	u32 esdp;
-	s32 status;
-	s32 timeout = 200;
-
-	DEBUGFUNC("ixgbe_read_i2c_byte_82599");
-
-	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
-		/* Acquire I2C bus ownership. */
-		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
-		esdp |= IXGBE_ESDP_SDP0;
-		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
-		IXGBE_WRITE_FLUSH(hw);
-
-		while (timeout) {
-			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
-			if (esdp & IXGBE_ESDP_SDP1)
-				break;
-
-			msec_delay(5);
-			timeout--;
-		}
-
-		if (!timeout) {
-			DEBUGOUT("Driver can't access resource,"
-				 " acquiring I2C bus timeout.\n");
-			status = IXGBE_ERR_I2C;
-			goto release_i2c_access;
-		}
-	}
-
-	status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
-
-release_i2c_access:
-
-	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
-		/* Release I2C bus ownership. */
-		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
-		esdp &= ~IXGBE_ESDP_SDP0;
-		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
-		IXGBE_WRITE_FLUSH(hw);
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
- *  @hw: pointer to hardware structure
- *  @byte_offset: byte offset to write
- *  @data: value to write
- *
- *  Performs byte write operation to SFP module's EEPROM over I2C interface at
- *  a specified device address.
- **/
-STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
-				 u8 dev_addr, u8 data)
-{
-	u32 esdp;
-	s32 status;
-	s32 timeout = 200;
-
-	DEBUGFUNC("ixgbe_write_i2c_byte_82599");
-
-	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
-		/* Acquire I2C bus ownership. */
-		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
-		esdp |= IXGBE_ESDP_SDP0;
-		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
-		IXGBE_WRITE_FLUSH(hw);
-
-		while (timeout) {
-			esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
-			if (esdp & IXGBE_ESDP_SDP1)
-				break;
-
-			msec_delay(5);
-			timeout--;
-		}
-
-		if (!timeout) {
-			DEBUGOUT("Driver can't access resource,"
-				 " acquiring I2C bus timeout.\n");
-			status = IXGBE_ERR_I2C;
-			goto release_i2c_access;
-		}
-	}
-
-	status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
-
-release_i2c_access:
-
-	if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
-		/* Release I2C bus ownership. */
-		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
-		esdp &= ~IXGBE_ESDP_SDP0;
-		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
-		IXGBE_WRITE_FLUSH(hw);
-	}
-
-	return status;
-}
-
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82599.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82599.h
deleted file mode 100644
index 39316a9..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82599.h
+++ /dev/null
@@ -1,65 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#ifndef _IXGBE_82599_H_
-#define _IXGBE_82599_H_
-
-s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
-				      ixgbe_link_speed *speed, bool *autoneg);
-enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
-void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
-					  ixgbe_link_speed speed,
-					  bool autoneg_wait_to_complete);
-s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
-				    ixgbe_link_speed speed,
-				    bool autoneg_wait_to_complete);
-s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
-			       bool autoneg_wait_to_complete);
-s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed,
-			       bool autoneg_wait_to_complete);
-s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
-void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
-s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
-s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
-s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
-s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw);
-s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
-s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
-u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
-s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val);
-s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 reg_val, bool locked);
-#endif /* _IXGBE_82599_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.c
deleted file mode 100644
index c704b69..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.c
+++ /dev/null
@@ -1,1477 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#include "ixgbe_api.h"
-#include "ixgbe_common.h"
-
-/**
- * ixgbe_dcb_get_rtrup2tc - read rtrup2tc reg
- * @hw: pointer to hardware structure
- * @map: pointer to u8 arr for returning map
- *
- * Read the rtrup2tc HW register and resolve its content into map
- **/
-void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
-{
-	if (hw->mac.ops.get_rtrup2tc)
-		hw->mac.ops.get_rtrup2tc(hw, map);
-}
-
-/**
- *  ixgbe_init_shared_code - Initialize the shared code
- *  @hw: pointer to hardware structure
- *
- *  This will assign function pointers and assign the MAC type and PHY code.
- *  Does not touch the hardware. This function must be called prior to any
- *  other function in the shared code. The ixgbe_hw structure should be
- *  memset to 0 prior to calling this function.  The following fields in
- *  hw structure should be filled in prior to calling this function:
- *  hw_addr, back, device_id, vendor_id, subsystem_device_id,
- *  subsystem_vendor_id, and revision_id
- **/
-s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
-{
-	s32 status;
-
-	DEBUGFUNC("ixgbe_init_shared_code");
-
-	/*
-	 * Set the mac type
-	 */
-	ixgbe_set_mac_type(hw);
-
-	switch (hw->mac.type) {
-	case ixgbe_mac_82598EB:
-		status = ixgbe_init_ops_82598(hw);
-		break;
-	case ixgbe_mac_82599EB:
-		status = ixgbe_init_ops_82599(hw);
-		break;
-	case ixgbe_mac_X540:
-		status = ixgbe_init_ops_X540(hw);
-		break;
-	case ixgbe_mac_X550:
-		status = ixgbe_init_ops_X550(hw);
-		break;
-	case ixgbe_mac_X550EM_x:
-		status = ixgbe_init_ops_X550EM(hw);
-		break;
-	case ixgbe_mac_82599_vf:
-	case ixgbe_mac_X540_vf:
-	case ixgbe_mac_X550_vf:
-	case ixgbe_mac_X550EM_x_vf:
-		status = ixgbe_init_ops_vf(hw);
-		break;
-	default:
-		status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
-		break;
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_set_mac_type - Sets MAC type
- *  @hw: pointer to the HW structure
- *
- *  This function sets the mac type of the adapter based on the
- *  vendor ID and device ID stored in the hw structure.
- **/
-s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
-{
-	s32 ret_val = IXGBE_SUCCESS;
-
-	DEBUGFUNC("ixgbe_set_mac_type\n");
-
-	if (hw->vendor_id != IXGBE_INTEL_VENDOR_ID) {
-		ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
-			     "Unsupported vendor id: %x", hw->vendor_id);
-		return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
-	}
-
-	switch (hw->device_id) {
-	case IXGBE_DEV_ID_82598:
-	case IXGBE_DEV_ID_82598_BX:
-	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
-	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
-	case IXGBE_DEV_ID_82598AT:
-	case IXGBE_DEV_ID_82598AT2:
-	case IXGBE_DEV_ID_82598EB_CX4:
-	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
-	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
-	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
-	case IXGBE_DEV_ID_82598EB_XF_LR:
-	case IXGBE_DEV_ID_82598EB_SFP_LOM:
-		hw->mac.type = ixgbe_mac_82598EB;
-		break;
-	case IXGBE_DEV_ID_82599_KX4:
-	case IXGBE_DEV_ID_82599_KX4_MEZZ:
-	case IXGBE_DEV_ID_82599_XAUI_LOM:
-	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
-	case IXGBE_DEV_ID_82599_KR:
-	case IXGBE_DEV_ID_82599_SFP:
-	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
-	case IXGBE_DEV_ID_82599_SFP_FCOE:
-	case IXGBE_DEV_ID_82599_SFP_EM:
-	case IXGBE_DEV_ID_82599_SFP_SF2:
-	case IXGBE_DEV_ID_82599_SFP_SF_QP:
-	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
-	case IXGBE_DEV_ID_82599EN_SFP:
-	case IXGBE_DEV_ID_82599_CX4:
-	case IXGBE_DEV_ID_82599_LS:
-	case IXGBE_DEV_ID_82599_T3_LOM:
-		hw->mac.type = ixgbe_mac_82599EB;
-		break;
-	case IXGBE_DEV_ID_82599_VF:
-	case IXGBE_DEV_ID_82599_VF_HV:
-		hw->mac.type = ixgbe_mac_82599_vf;
-		break;
-	case IXGBE_DEV_ID_X540_VF:
-	case IXGBE_DEV_ID_X540_VF_HV:
-		hw->mac.type = ixgbe_mac_X540_vf;
-		break;
-	case IXGBE_DEV_ID_X540T:
-	case IXGBE_DEV_ID_X540T1:
-		hw->mac.type = ixgbe_mac_X540;
-		break;
-	case IXGBE_DEV_ID_X550T:
-		hw->mac.type = ixgbe_mac_X550;
-		break;
-	case IXGBE_DEV_ID_X550EM_X_KX4:
-	case IXGBE_DEV_ID_X550EM_X_KR:
-	case IXGBE_DEV_ID_X550EM_X_10G_T:
-	case IXGBE_DEV_ID_X550EM_X_1G_T:
-	case IXGBE_DEV_ID_X550EM_X_SFP:
-		hw->mac.type = ixgbe_mac_X550EM_x;
-		break;
-	case IXGBE_DEV_ID_X550_VF:
-	case IXGBE_DEV_ID_X550_VF_HV:
-		hw->mac.type = ixgbe_mac_X550_vf;
-		break;
-	case IXGBE_DEV_ID_X550EM_X_VF:
-	case IXGBE_DEV_ID_X550EM_X_VF_HV:
-		hw->mac.type = ixgbe_mac_X550EM_x_vf;
-		break;
-	default:
-		ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
-		ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
-			     "Unsupported device id: %x",
-			     hw->device_id);
-		break;
-	}
-
-	DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n",
-		  hw->mac.type, ret_val);
-	return ret_val;
-}
-
-/**
- *  ixgbe_init_hw - Initialize the hardware
- *  @hw: pointer to hardware structure
- *
- *  Initialize the hardware by resetting and then starting the hardware
- **/
-s32 ixgbe_init_hw(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_reset_hw - Performs a hardware reset
- *  @hw: pointer to hardware structure
- *
- *  Resets the hardware by resetting the transmit and receive units, masks and
- *  clears all interrupts, performs a PHY reset, and performs a MAC reset
- **/
-s32 ixgbe_reset_hw(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_start_hw - Prepares hardware for Rx/Tx
- *  @hw: pointer to hardware structure
- *
- *  Starts the hardware by filling the bus info structure and media type,
- *  clears all on chip counters, initializes receive address registers,
- *  multicast table, VLAN filter table, calls routine to setup link and
- *  flow control settings, and leaves transmit and receive units disabled
- *  and uninitialized.
- **/
-s32 ixgbe_start_hw(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_enable_relaxed_ordering - Enables tx relaxed ordering,
- *  which is disabled by default in ixgbe_start_hw();
- *
- *  @hw: pointer to hardware structure
- *
- *   Enable relaxed ordering;
- **/
-void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw)
-{
-	if (hw->mac.ops.enable_relaxed_ordering)
-		hw->mac.ops.enable_relaxed_ordering(hw);
-}
-
-/**
- *  ixgbe_clear_hw_cntrs - Clear hardware counters
- *  @hw: pointer to hardware structure
- *
- *  Clears all hardware statistics counters by reading them from the hardware
- *  Statistics counters are clear on read.
- **/
-s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_get_media_type - Get media type
- *  @hw: pointer to hardware structure
- *
- *  Returns the media type (fiber, copper, backplane)
- **/
-enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw),
-			       ixgbe_media_type_unknown);
-}
-
-/**
- *  ixgbe_get_mac_addr - Get MAC address
- *  @hw: pointer to hardware structure
- *  @mac_addr: Adapter MAC address
- *
- *  Reads the adapter's MAC address from the first Receive Address Register
- *  (RAR0) A reset of the adapter must have been performed prior to calling
- *  this function in order for the MAC address to have been loaded from the
- *  EEPROM into RAR0
- **/
-s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr,
-			       (hw, mac_addr), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_get_san_mac_addr - Get SAN MAC address
- *  @hw: pointer to hardware structure
- *  @san_mac_addr: SAN MAC address
- *
- *  Reads the SAN MAC address from the EEPROM, if it's available.  This is
- *  per-port, so set_lan_id() must be called before reading the addresses.
- **/
-s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.get_san_mac_addr,
-			       (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_set_san_mac_addr - Write a SAN MAC address
- *  @hw: pointer to hardware structure
- *  @san_mac_addr: SAN MAC address
- *
- *  Writes A SAN MAC address to the EEPROM.
- **/
-s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.set_san_mac_addr,
-			       (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_get_device_caps - Get additional device capabilities
- *  @hw: pointer to hardware structure
- *  @device_caps: the EEPROM word for device capabilities
- *
- *  Reads the extra device capabilities from the EEPROM
- **/
-s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.get_device_caps,
-			       (hw, device_caps), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from the EEPROM
- *  @hw: pointer to hardware structure
- *  @wwnn_prefix: the alternative WWNN prefix
- *  @wwpn_prefix: the alternative WWPN prefix
- *
- *  This function will read the EEPROM from the alternative SAN MAC address
- *  block to check the support for the alternative WWNN/WWPN prefix support.
- **/
-s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
-			 u16 *wwpn_prefix)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.get_wwn_prefix,
-			       (hw, wwnn_prefix, wwpn_prefix),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_get_fcoe_boot_status -  Get FCOE boot status from EEPROM
- *  @hw: pointer to hardware structure
- *  @bs: the fcoe boot status
- *
- *  This function will read the FCOE boot status from the iSCSI FCOE block
- **/
-s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.get_fcoe_boot_status,
-			       (hw, bs),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_get_bus_info - Set PCI bus info
- *  @hw: pointer to hardware structure
- *
- *  Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
- **/
-s32 ixgbe_get_bus_info(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_get_num_of_tx_queues - Get Tx queues
- *  @hw: pointer to hardware structure
- *
- *  Returns the number of transmit queues for the given adapter.
- **/
-u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw)
-{
-	return hw->mac.max_tx_queues;
-}
-
-/**
- *  ixgbe_get_num_of_rx_queues - Get Rx queues
- *  @hw: pointer to hardware structure
- *
- *  Returns the number of receive queues for the given adapter.
- **/
-u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw)
-{
-	return hw->mac.max_rx_queues;
-}
-
-/**
- *  ixgbe_stop_adapter - Disable Rx/Tx units
- *  @hw: pointer to hardware structure
- *
- *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
- *  disables transmit and receive units. The adapter_stopped flag is used by
- *  the shared code and drivers to determine if the adapter is in a stopped
- *  state and should not touch the hardware.
- **/
-s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_read_pba_string - Reads part number string from EEPROM
- *  @hw: pointer to hardware structure
- *  @pba_num: stores the part number string from the EEPROM
- *  @pba_num_size: part number string buffer length
- *
- *  Reads the part number string from the EEPROM.
- **/
-s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
-{
-	return ixgbe_read_pba_string_generic(hw, pba_num, pba_num_size);
-}
-
-/**
- *  ixgbe_read_pba_num - Reads part number from EEPROM
- *  @hw: pointer to hardware structure
- *  @pba_num: stores the part number from the EEPROM
- *
- *  Reads the part number from the EEPROM.
- **/
-s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num)
-{
-	return ixgbe_read_pba_num_generic(hw, pba_num);
-}
-
-/**
- *  ixgbe_identify_phy - Get PHY type
- *  @hw: pointer to hardware structure
- *
- *  Determines the physical layer module found on the current adapter.
- **/
-s32 ixgbe_identify_phy(struct ixgbe_hw *hw)
-{
-	s32 status = IXGBE_SUCCESS;
-
-	if (hw->phy.type == ixgbe_phy_unknown) {
-		status = ixgbe_call_func(hw, hw->phy.ops.identify, (hw),
-					 IXGBE_NOT_IMPLEMENTED);
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_reset_phy - Perform a PHY reset
- *  @hw: pointer to hardware structure
- **/
-s32 ixgbe_reset_phy(struct ixgbe_hw *hw)
-{
-	s32 status = IXGBE_SUCCESS;
-
-	if (hw->phy.type == ixgbe_phy_unknown) {
-		if (ixgbe_identify_phy(hw) != IXGBE_SUCCESS)
-			status = IXGBE_ERR_PHY;
-	}
-
-	if (status == IXGBE_SUCCESS) {
-		status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw),
-					 IXGBE_NOT_IMPLEMENTED);
-	}
-	return status;
-}
-
-/**
- *  ixgbe_get_phy_firmware_version -
- *  @hw: pointer to hardware structure
- *  @firmware_version: pointer to firmware version
- **/
-s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version)
-{
-	s32 status = IXGBE_SUCCESS;
-
-	status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version,
-				 (hw, firmware_version),
-				 IXGBE_NOT_IMPLEMENTED);
-	return status;
-}
-
-/**
- *  ixgbe_read_phy_reg - Read PHY register
- *  @hw: pointer to hardware structure
- *  @reg_addr: 32 bit address of PHY register to read
- *  @phy_data: Pointer to read data from PHY register
- *
- *  Reads a value from a specified PHY register
- **/
-s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
-		       u16 *phy_data)
-{
-	if (hw->phy.id == 0)
-		ixgbe_identify_phy(hw);
-
-	return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr,
-			       device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_write_phy_reg - Write PHY register
- *  @hw: pointer to hardware structure
- *  @reg_addr: 32 bit PHY register to write
- *  @phy_data: Data to write to the PHY register
- *
- *  Writes a value to specified PHY register
- **/
-s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
-			u16 phy_data)
-{
-	if (hw->phy.id == 0)
-		ixgbe_identify_phy(hw);
-
-	return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr,
-			       device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_setup_phy_link - Restart PHY autoneg
- *  @hw: pointer to hardware structure
- *
- *  Restart autonegotiation and PHY and waits for completion.
- **/
-s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_setup_internal_phy - Configure integrated PHY
- * @hw: pointer to hardware structure
- *
- * Reconfigure the integrated PHY in order to enable talk to the external PHY.
- * Returns success if not implemented, since nothing needs to be done in this
- * case.
- */
-s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->phy.ops.setup_internal_link, (hw),
-			       IXGBE_SUCCESS);
-}
-
-/**
- *  ixgbe_check_phy_link - Determine link and speed status
- *  @hw: pointer to hardware structure
- *
- *  Reads a PHY register to determine if link is up and the current speed for
- *  the PHY.
- **/
-s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
-			 bool *link_up)
-{
-	return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed,
-			       link_up), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_setup_phy_link_speed - Set auto advertise
- *  @hw: pointer to hardware structure
- *  @speed: new link speed
- *
- *  Sets the auto advertised capabilities
- **/
-s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
-			       bool autoneg_wait_to_complete)
-{
-	return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed,
-			       autoneg_wait_to_complete),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_set_phy_power - Control the phy power state
- * @hw: pointer to hardware structure
- * @on: true for on, false for off
- */
-s32 ixgbe_set_phy_power(struct ixgbe_hw *hw, bool on)
-{
-	return ixgbe_call_func(hw, hw->phy.ops.set_phy_power, (hw, on),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_check_link - Get link and speed status
- *  @hw: pointer to hardware structure
- *
- *  Reads the links register to determine if link is up and the current speed
- **/
-s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
-		     bool *link_up, bool link_up_wait_to_complete)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed,
-			       link_up, link_up_wait_to_complete),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_disable_tx_laser - Disable Tx laser
- *  @hw: pointer to hardware structure
- *
- *  If the driver needs to disable the laser on SFI optics.
- **/
-void ixgbe_disable_tx_laser(struct ixgbe_hw *hw)
-{
-	if (hw->mac.ops.disable_tx_laser)
-		hw->mac.ops.disable_tx_laser(hw);
-}
-
-/**
- *  ixgbe_enable_tx_laser - Enable Tx laser
- *  @hw: pointer to hardware structure
- *
- *  If the driver needs to enable the laser on SFI optics.
- **/
-void ixgbe_enable_tx_laser(struct ixgbe_hw *hw)
-{
-	if (hw->mac.ops.enable_tx_laser)
-		hw->mac.ops.enable_tx_laser(hw);
-}
-
-/**
- *  ixgbe_flap_tx_laser - flap Tx laser to start autotry process
- *  @hw: pointer to hardware structure
- *
- *  When the driver changes the link speeds that it can support then
- *  flap the tx laser to alert the link partner to start autotry
- *  process on its end.
- **/
-void ixgbe_flap_tx_laser(struct ixgbe_hw *hw)
-{
-	if (hw->mac.ops.flap_tx_laser)
-		hw->mac.ops.flap_tx_laser(hw);
-}
-
-/**
- *  ixgbe_setup_link - Set link speed
- *  @hw: pointer to hardware structure
- *  @speed: new link speed
- *
- *  Configures link settings.  Restarts the link.
- *  Performs autonegotiation if needed.
- **/
-s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
-		     bool autoneg_wait_to_complete)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed,
-			       autoneg_wait_to_complete),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_get_link_capabilities - Returns link capabilities
- *  @hw: pointer to hardware structure
- *
- *  Determines the link capabilities of the current configuration.
- **/
-s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
-				bool *autoneg)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw,
-			       speed, autoneg), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_led_on - Turn on LEDs
- *  @hw: pointer to hardware structure
- *  @index: led number to turn on
- *
- *  Turns on the software controllable LEDs.
- **/
-s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_led_off - Turn off LEDs
- *  @hw: pointer to hardware structure
- *  @index: led number to turn off
- *
- *  Turns off the software controllable LEDs.
- **/
-s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_blink_led_start - Blink LEDs
- *  @hw: pointer to hardware structure
- *  @index: led number to blink
- *
- *  Blink LED based on index.
- **/
-s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_blink_led_stop - Stop blinking LEDs
- *  @hw: pointer to hardware structure
- *
- *  Stop blinking LED based on index.
- **/
-s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_init_eeprom_params - Initialize EEPROM parameters
- *  @hw: pointer to hardware structure
- *
- *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
- *  ixgbe_hw struct in order to set up EEPROM access.
- **/
-s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-
-/**
- *  ixgbe_write_eeprom - Write word to EEPROM
- *  @hw: pointer to hardware structure
- *  @offset: offset within the EEPROM to be written to
- *  @data: 16 bit word to be written to the EEPROM
- *
- *  Writes 16 bit value to EEPROM. If ixgbe_eeprom_update_checksum is not
- *  called after this function, the EEPROM will most likely contain an
- *  invalid checksum.
- **/
-s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data)
-{
-	return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_write_eeprom_buffer - Write word(s) to EEPROM
- *  @hw: pointer to hardware structure
- *  @offset: offset within the EEPROM to be written to
- *  @data: 16 bit word(s) to be written to the EEPROM
- *  @words: number of words
- *
- *  Writes 16 bit word(s) to EEPROM. If ixgbe_eeprom_update_checksum is not
- *  called after this function, the EEPROM will most likely contain an
- *  invalid checksum.
- **/
-s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, u16 words,
-			      u16 *data)
-{
-	return ixgbe_call_func(hw, hw->eeprom.ops.write_buffer,
-			       (hw, offset, words, data),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_read_eeprom - Read word from EEPROM
- *  @hw: pointer to hardware structure
- *  @offset: offset within the EEPROM to be read
- *  @data: read 16 bit value from EEPROM
- *
- *  Reads 16 bit value from EEPROM
- **/
-s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data)
-{
-	return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_read_eeprom_buffer - Read word(s) from EEPROM
- *  @hw: pointer to hardware structure
- *  @offset: offset within the EEPROM to be read
- *  @data: read 16 bit word(s) from EEPROM
- *  @words: number of words
- *
- *  Reads 16 bit word(s) from EEPROM
- **/
-s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
-			     u16 words, u16 *data)
-{
-	return ixgbe_call_func(hw, hw->eeprom.ops.read_buffer,
-			       (hw, offset, words, data),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_validate_eeprom_checksum - Validate EEPROM checksum
- *  @hw: pointer to hardware structure
- *  @checksum_val: calculated checksum
- *
- *  Performs checksum calculation and validates the EEPROM checksum
- **/
-s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
-{
-	return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum,
-			       (hw, checksum_val), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_eeprom_update_checksum - Updates the EEPROM checksum
- *  @hw: pointer to hardware structure
- **/
-s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_insert_mac_addr - Find a RAR for this mac address
- *  @hw: pointer to hardware structure
- *  @addr: Address to put into receive address register
- *  @vmdq: VMDq pool to assign
- *
- *  Puts an ethernet address into a receive address register, or
- *  finds the rar that it is aleady in; adds to the pool list
- **/
-s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.insert_mac_addr,
-			       (hw, addr, vmdq),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_set_rar - Set Rx address register
- *  @hw: pointer to hardware structure
- *  @index: Receive address register to write
- *  @addr: Address to put into receive address register
- *  @vmdq: VMDq "set"
- *  @enable_addr: set flag that address is active
- *
- *  Puts an ethernet address into a receive address register.
- **/
-s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
-		  u32 enable_addr)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq,
-			       enable_addr), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_clear_rar - Clear Rx address register
- *  @hw: pointer to hardware structure
- *  @index: Receive address register to write
- *
- *  Puts an ethernet address into a receive address register.
- **/
-s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_set_vmdq - Associate a VMDq index with a receive address
- *  @hw: pointer to hardware structure
- *  @rar: receive address register index to associate with VMDq index
- *  @vmdq: VMDq set or pool index
- **/
-s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq),
-			       IXGBE_NOT_IMPLEMENTED);
-
-}
-
-/**
- *  ixgbe_set_vmdq_san_mac - Associate VMDq index 127 with a receive address
- *  @hw: pointer to hardware structure
- *  @vmdq: VMDq default pool index
- **/
-s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.set_vmdq_san_mac,
-			       (hw, vmdq), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address
- *  @hw: pointer to hardware structure
- *  @rar: receive address register index to disassociate with VMDq index
- *  @vmdq: VMDq set or pool index
- **/
-s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_init_rx_addrs - Initializes receive address filters.
- *  @hw: pointer to hardware structure
- *
- *  Places the MAC address in receive address register 0 and clears the rest
- *  of the receive address registers. Clears the multicast table. Assumes
- *  the receiver is in reset when the routine is called.
- **/
-s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_get_num_rx_addrs - Returns the number of RAR entries.
- *  @hw: pointer to hardware structure
- **/
-u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw)
-{
-	return hw->mac.num_rar_entries;
-}
-
-/**
- *  ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses
- *  @hw: pointer to hardware structure
- *  @addr_list: the list of new multicast addresses
- *  @addr_count: number of addresses
- *  @func: iterator function to walk the multicast address list
- *
- *  The given list replaces any existing list. Clears the secondary addrs from
- *  receive address registers. Uses unused receive address registers for the
- *  first secondary addresses, and falls back to promiscuous mode as needed.
- **/
-s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
-			      u32 addr_count, ixgbe_mc_addr_itr func)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw,
-			       addr_list, addr_count, func),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_update_mc_addr_list - Updates the MAC's list of multicast addresses
- *  @hw: pointer to hardware structure
- *  @mc_addr_list: the list of new multicast addresses
- *  @mc_addr_count: number of addresses
- *  @func: iterator function to walk the multicast address list
- *
- *  The given list replaces any existing list. Clears the MC addrs from receive
- *  address registers and the multicast table. Uses unused receive address
- *  registers for the first multicast addresses, and hashes the rest into the
- *  multicast table.
- **/
-s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
-			      u32 mc_addr_count, ixgbe_mc_addr_itr func,
-			      bool clear)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw,
-			       mc_addr_list, mc_addr_count, func, clear),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_enable_mc - Enable multicast address in RAR
- *  @hw: pointer to hardware structure
- *
- *  Enables multicast address in RAR and the use of the multicast hash table.
- **/
-s32 ixgbe_enable_mc(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_disable_mc - Disable multicast address in RAR
- *  @hw: pointer to hardware structure
- *
- *  Disables multicast address in RAR and the use of the multicast hash table.
- **/
-s32 ixgbe_disable_mc(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_clear_vfta - Clear VLAN filter table
- *  @hw: pointer to hardware structure
- *
- *  Clears the VLAN filer table, and the VMDq index associated with the filter
- **/
-s32 ixgbe_clear_vfta(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_set_vfta - Set VLAN filter table
- *  @hw: pointer to hardware structure
- *  @vlan: VLAN id to write to VLAN filter
- *  @vind: VMDq output index that maps queue to VLAN id in VFTA
- *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
- *
- *  Turn on/off specified VLAN in the VLAN filter table.
- **/
-s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind,
-			       vlan_on), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_set_vlvf - Set VLAN Pool Filter
- *  @hw: pointer to hardware structure
- *  @vlan: VLAN id to write to VLAN filter
- *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
- *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
- *  @vfta_changed: pointer to boolean flag which indicates whether VFTA
- *                 should be changed
- *
- *  Turn on/off specified bit in VLVF table.
- **/
-s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
-		    bool *vfta_changed)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.set_vlvf, (hw, vlan, vind,
-			       vlan_on, vfta_changed), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_fc_enable - Enable flow control
- *  @hw: pointer to hardware structure
- *
- *  Configures the flow control settings based on SW configuration.
- **/
-s32 ixgbe_fc_enable(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_set_fw_drv_ver - Try to send the driver version number FW
- * @hw: pointer to hardware structure
- * @maj: driver major number to be sent to firmware
- * @min: driver minor number to be sent to firmware
- * @build: driver build number to be sent to firmware
- * @ver: driver version number to be sent to firmware
- **/
-s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
-			 u8 ver)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.set_fw_drv_ver, (hw, maj, min,
-			       build, ver), IXGBE_NOT_IMPLEMENTED);
-}
-
-
-/**
- *  ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
- *  @hw: pointer to hardware structure
- *
- *  Updates the temperatures in mac.thermal_sensor_data
- **/
-s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.get_thermal_sensor_data, (hw),
-				IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds
- *  @hw: pointer to hardware structure
- *
- *  Inits the thermal sensor thresholds according to the NVM map
- **/
-s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.init_thermal_sensor_thresh, (hw),
-				IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_dmac_config - Configure DMA Coalescing registers.
- *  @hw: pointer to hardware structure
- *
- *  Configure DMA coalescing. If enabling dmac, dmac is activated.
- *  When disabling dmac, dmac enable dmac bit is cleared.
- **/
-s32 ixgbe_dmac_config(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.dmac_config, (hw),
-				IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_dmac_update_tcs - Configure DMA Coalescing registers.
- *  @hw: pointer to hardware structure
- *
- *  Disables dmac, updates per TC settings, and then enable dmac.
- **/
-s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.dmac_update_tcs, (hw),
-				IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_dmac_config_tcs - Configure DMA Coalescing registers.
- *  @hw: pointer to hardware structure
- *
- *  Configure DMA coalescing threshold per TC and set high priority bit for
- *  FCOE TC. The dmac enable bit must be cleared before configuring.
- **/
-s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.dmac_config_tcs, (hw),
-				IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_setup_eee - Enable/disable EEE support
- *  @hw: pointer to the HW structure
- *  @enable_eee: boolean flag to enable EEE
- *
- *  Enable/disable EEE based on enable_ee flag.
- *  Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C
- *  are modified.
- *
- **/
-s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.setup_eee, (hw, enable_eee),
-			IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_set_source_address_pruning - Enable/Disable source address pruning
- * @hw: pointer to hardware structure
- * @enbale: enable or disable source address pruning
- * @pool: Rx pool - Rx pool to toggle source address pruning
- **/
-void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable,
-				      unsigned int pool)
-{
-	if (hw->mac.ops.set_source_address_pruning)
-		hw->mac.ops.set_source_address_pruning(hw, enable, pool);
-}
-
-/**
- *  ixgbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing
- *  @hw: pointer to hardware structure
- *  @enable: enable or disable switch for Ethertype anti-spoofing
- *  @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
- *
- **/
-void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
-{
-	if (hw->mac.ops.set_ethertype_anti_spoofing)
-		hw->mac.ops.set_ethertype_anti_spoofing(hw, enable, vf);
-}
-
-/**
- *  ixgbe_read_iosf_sb_reg - Read 32 bit PHY register
- *  @hw: pointer to hardware structure
- *  @reg_addr: 32 bit address of PHY register to read
- *  @device_type: type of device you want to communicate with
- *  @phy_data: Pointer to read data from PHY register
- *
- *  Reads a value from a specified PHY register
- **/
-s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
-			   u32 device_type, u32 *phy_data)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.read_iosf_sb_reg, (hw, reg_addr,
-			       device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_write_iosf_sb_reg - Write 32 bit register through IOSF Sideband
- *  @hw: pointer to hardware structure
- *  @reg_addr: 32 bit PHY register to write
- *  @device_type: type of device you want to communicate with
- *  @phy_data: Data to write to the PHY register
- *
- *  Writes a value to specified PHY register
- **/
-s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
-			    u32 device_type, u32 phy_data)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.write_iosf_sb_reg, (hw, reg_addr,
-			       device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_disable_mdd - Disable malicious driver detection
- *  @hw: pointer to hardware structure
- *
- **/
-void ixgbe_disable_mdd(struct ixgbe_hw *hw)
-{
-	if (hw->mac.ops.disable_mdd)
-		hw->mac.ops.disable_mdd(hw);
-}
-
-/**
- *  ixgbe_enable_mdd - Enable malicious driver detection
- *  @hw: pointer to hardware structure
- *
- **/
-void ixgbe_enable_mdd(struct ixgbe_hw *hw)
-{
-	if (hw->mac.ops.enable_mdd)
-		hw->mac.ops.enable_mdd(hw);
-}
-
-/**
- *  ixgbe_mdd_event - Handle malicious driver detection event
- *  @hw: pointer to hardware structure
- *  @vf_bitmap: vf bitmap of malicious vfs
- *
- **/
-void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap)
-{
-	if (hw->mac.ops.mdd_event)
-		hw->mac.ops.mdd_event(hw, vf_bitmap);
-}
-
-/**
- *  ixgbe_restore_mdd_vf - Restore VF that was disabled during malicious driver
- *  detection event
- *  @hw: pointer to hardware structure
- *  @vf: vf index
- *
- **/
-void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf)
-{
-	if (hw->mac.ops.restore_mdd_vf)
-		hw->mac.ops.restore_mdd_vf(hw, vf);
-}
-
-/**
- *  ixgbe_read_analog_reg8 - Reads 8 bit analog register
- *  @hw: pointer to hardware structure
- *  @reg: analog register to read
- *  @val: read value
- *
- *  Performs write operation to analog register specified.
- **/
-s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg,
-			       val), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_write_analog_reg8 - Writes 8 bit analog register
- *  @hw: pointer to hardware structure
- *  @reg: analog register to write
- *  @val: value to write
- *
- *  Performs write operation to Atlas analog register specified.
- **/
-s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg,
-			       val), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_init_uta_tables - Initializes Unicast Table Arrays.
- *  @hw: pointer to hardware structure
- *
- *  Initializes the Unicast Table Arrays to zero on device load.  This
- *  is part of the Rx init addr execution path.
- **/
-s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address
- *  @hw: pointer to hardware structure
- *  @byte_offset: byte offset to read
- *  @dev_addr: I2C bus address to read from
- *  @data: value read
- *
- *  Performs byte read operation to SFP module's EEPROM over I2C interface.
- **/
-s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
-			u8 *data)
-{
-	return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte, (hw, byte_offset,
-			       dev_addr, data), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_read_i2c_combined - Perform I2C read combined operation
- * @hw: pointer to the hardware structure
- * @addr: I2C bus address to read from
- * @reg: I2C device register to read from
- * @val: pointer to location to receive read value
- *
- * Returns an error code on error.
- */
-s32 ixgbe_read_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val)
-{
-	return ixgbe_call_func(hw, hw->phy.ops.read_i2c_combined, (hw, addr,
-			       reg, val), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_write_i2c_byte - Writes 8 bit word over I2C
- *  @hw: pointer to hardware structure
- *  @byte_offset: byte offset to write
- *  @dev_addr: I2C bus address to write to
- *  @data: value to write
- *
- *  Performs byte write operation to SFP module's EEPROM over I2C interface
- *  at a specified device address.
- **/
-s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
-			 u8 data)
-{
-	return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte, (hw, byte_offset,
-			       dev_addr, data), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- * ixgbe_write_i2c_combined - Perform I2C write combined operation
- * @hw: pointer to the hardware structure
- * @addr: I2C bus address to write to
- * @reg: I2C device register to write to
- * @val: value to write
- *
- * Returns an error code on error.
- */
-s32 ixgbe_write_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val)
-{
-	return ixgbe_call_func(hw, hw->phy.ops.write_i2c_combined, (hw, addr,
-			       reg, val), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface
- *  @hw: pointer to hardware structure
- *  @byte_offset: EEPROM byte offset to write
- *  @eeprom_data: value to write
- *
- *  Performs byte write operation to SFP module's EEPROM over I2C interface.
- **/
-s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw,
-			   u8 byte_offset, u8 eeprom_data)
-{
-	return ixgbe_call_func(hw, hw->phy.ops.write_i2c_eeprom,
-			       (hw, byte_offset, eeprom_data),
-			       IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface
- *  @hw: pointer to hardware structure
- *  @byte_offset: EEPROM byte offset to read
- *  @eeprom_data: value read
- *
- *  Performs byte read operation to SFP module's EEPROM over I2C interface.
- **/
-s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data)
-{
-	return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom,
-			      (hw, byte_offset, eeprom_data),
-			      IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_get_supported_physical_layer - Returns physical layer type
- *  @hw: pointer to hardware structure
- *
- *  Determines physical layer capabilities of the current configuration.
- **/
-u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer,
-			       (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN);
-}
-
-/**
- *  ixgbe_enable_rx_dma - Enables Rx DMA unit, dependent on device specifics
- *  @hw: pointer to hardware structure
- *  @regval: bitfield to write to the Rx DMA register
- *
- *  Enables the Rx DMA unit of the device.
- **/
-s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.enable_rx_dma,
-			       (hw, regval), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_disable_sec_rx_path - Stops the receive data path
- *  @hw: pointer to hardware structure
- *
- *  Stops the receive data path.
- **/
-s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.disable_sec_rx_path,
-				(hw), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_enable_sec_rx_path - Enables the receive data path
- *  @hw: pointer to hardware structure
- *
- *  Enables the receive data path.
- **/
-s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.enable_sec_rx_path,
-				(hw), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore
- *  @hw: pointer to hardware structure
- *  @mask: Mask to specify which semaphore to acquire
- *
- *  Acquires the SWFW semaphore through SW_FW_SYNC register for the specified
- *  function (CSR, PHY0, PHY1, EEPROM, Flash)
- **/
-s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask)
-{
-	return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync,
-			       (hw, mask), IXGBE_NOT_IMPLEMENTED);
-}
-
-/**
- *  ixgbe_release_swfw_semaphore - Release SWFW semaphore
- *  @hw: pointer to hardware structure
- *  @mask: Mask to specify which semaphore to release
- *
- *  Releases the SWFW semaphore through SW_FW_SYNC register for the specified
- *  function (CSR, PHY0, PHY1, EEPROM, Flash)
- **/
-void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask)
-{
-	if (hw->mac.ops.release_swfw_sync)
-		hw->mac.ops.release_swfw_sync(hw, mask);
-}
-
-
-void ixgbe_disable_rx(struct ixgbe_hw *hw)
-{
-	if (hw->mac.ops.disable_rx)
-		hw->mac.ops.disable_rx(hw);
-}
-
-void ixgbe_enable_rx(struct ixgbe_hw *hw)
-{
-	if (hw->mac.ops.enable_rx)
-		hw->mac.ops.enable_rx(hw);
-}
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.h
deleted file mode 100644
index 8386e29..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.h
+++ /dev/null
@@ -1,206 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#ifndef _IXGBE_API_H_
-#define _IXGBE_API_H_
-
-#include "ixgbe_type.h"
-
-void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map);
-
-s32 ixgbe_init_shared_code(struct ixgbe_hw *hw);
-
-extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
-
-s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
-s32 ixgbe_init_hw(struct ixgbe_hw *hw);
-s32 ixgbe_reset_hw(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw(struct ixgbe_hw *hw);
-void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw);
-s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
-enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw);
-s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr);
-s32 ixgbe_get_bus_info(struct ixgbe_hw *hw);
-u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw);
-u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw);
-s32 ixgbe_stop_adapter(struct ixgbe_hw *hw);
-s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num);
-s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size);
-
-s32 ixgbe_identify_phy(struct ixgbe_hw *hw);
-s32 ixgbe_reset_phy(struct ixgbe_hw *hw);
-s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
-		       u16 *phy_data);
-s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
-			u16 phy_data);
-
-s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
-s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw);
-s32 ixgbe_check_phy_link(struct ixgbe_hw *hw,
-			 ixgbe_link_speed *speed,
-			 bool *link_up);
-s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
-			       ixgbe_link_speed speed,
-			       bool autoneg_wait_to_complete);
-s32 ixgbe_set_phy_power(struct ixgbe_hw *, bool on);
-void ixgbe_disable_tx_laser(struct ixgbe_hw *hw);
-void ixgbe_enable_tx_laser(struct ixgbe_hw *hw);
-void ixgbe_flap_tx_laser(struct ixgbe_hw *hw);
-s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
-		     bool autoneg_wait_to_complete);
-s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
-		     bool *link_up, bool link_up_wait_to_complete);
-s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
-				bool *autoneg);
-s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index);
-
-s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw);
-s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
-			      u16 words, u16 *data);
-s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
-s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
-			     u16 words, u16 *data);
-
-s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
-s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw);
-
-s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
-s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
-		  u32 enable_addr);
-s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq);
-s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
-u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw);
-s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
-			      u32 addr_count, ixgbe_mc_addr_itr func);
-s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
-			      u32 mc_addr_count, ixgbe_mc_addr_itr func,
-			      bool clear);
-void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr_list, u32 vmdq);
-s32 ixgbe_enable_mc(struct ixgbe_hw *hw);
-s32 ixgbe_disable_mc(struct ixgbe_hw *hw);
-s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
-s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
-		   u32 vind, bool vlan_on);
-s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
-		   bool vlan_on, bool *vfta_changed);
-s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
-s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
-			 u8 ver);
-s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw);
-s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw);
-void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
-s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw,
-				   u16 *firmware_version);
-s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
-s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
-s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw);
-s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data);
-u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
-s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw);
-s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw);
-s32 ixgbe_mng_fw_enabled(struct ixgbe_hw *hw);
-s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
-s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
-					bool cloud_mode);
-s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
-					  union ixgbe_atr_hash_dword input,
-					  union ixgbe_atr_hash_dword common,
-					  u8 queue);
-s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
-				    union ixgbe_atr_input *input_mask, bool cloud_mode);
-s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
-					  union ixgbe_atr_input *input,
-					  u16 soft_id, u8 queue, bool cloud_mode);
-s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
-					  union ixgbe_atr_input *input,
-					  u16 soft_id);
-s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
-					union ixgbe_atr_input *input,
-					union ixgbe_atr_input *mask,
-					u16 soft_id,
-					u8 queue,
-					bool cloud_mode);
-void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
-					  union ixgbe_atr_input *mask);
-u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
-				     union ixgbe_atr_hash_dword common);
-bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
-s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
-			u8 *data);
-s32 ixgbe_read_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val);
-s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
-			 u8 data);
-s32 ixgbe_write_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val);
-s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data);
-s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
-s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
-s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps);
-s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask);
-void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask);
-s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
-			 u16 *wwpn_prefix);
-s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs);
-s32 ixgbe_dmac_config(struct ixgbe_hw *hw);
-s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw);
-s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw);
-s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee);
-void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable,
-				      unsigned int vf);
-void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable,
-				       int vf);
-s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
-			u32 device_type, u32 *phy_data);
-s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
-			u32 device_type, u32 phy_data);
-void ixgbe_disable_mdd(struct ixgbe_hw *hw);
-void ixgbe_enable_mdd(struct ixgbe_hw *hw);
-void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap);
-void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf);
-void ixgbe_disable_rx(struct ixgbe_hw *hw);
-void ixgbe_enable_rx(struct ixgbe_hw *hw);
-
-#endif /* _IXGBE_API_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.c
deleted file mode 100644
index d801de3..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.c
+++ /dev/null
@@ -1,4940 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#include "ixgbe_common.h"
-#include "ixgbe_phy.h"
-#include "ixgbe_dcb.h"
-#include "ixgbe_dcb_82599.h"
-#include "ixgbe_api.h"
-
-STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
-STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
-STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
-STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
-STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
-STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
-					u16 count);
-STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
-STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
-STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
-STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw);
-
-STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
-STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
-					 u16 *san_mac_offset);
-STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
-					     u16 words, u16 *data);
-STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
-					      u16 words, u16 *data);
-STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
-						 u16 offset);
-
-/**
- *  ixgbe_init_ops_generic - Inits function ptrs
- *  @hw: pointer to the hardware structure
- *
- *  Initialize the function pointers.
- **/
-s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
-{
-	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
-	struct ixgbe_mac_info *mac = &hw->mac;
-	u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-
-	DEBUGFUNC("ixgbe_init_ops_generic");
-
-	/* EEPROM */
-	eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
-	/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
-	if (eec & IXGBE_EEC_PRES) {
-		eeprom->ops.read = ixgbe_read_eerd_generic;
-		eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
-	} else {
-		eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
-		eeprom->ops.read_buffer =
-				 ixgbe_read_eeprom_buffer_bit_bang_generic;
-	}
-	eeprom->ops.write = ixgbe_write_eeprom_generic;
-	eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
-	eeprom->ops.validate_checksum =
-				      ixgbe_validate_eeprom_checksum_generic;
-	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
-	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
-
-	/* MAC */
-	mac->ops.init_hw = ixgbe_init_hw_generic;
-	mac->ops.reset_hw = NULL;
-	mac->ops.start_hw = ixgbe_start_hw_generic;
-	mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
-	mac->ops.get_media_type = NULL;
-	mac->ops.get_supported_physical_layer = NULL;
-	mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
-	mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
-	mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
-	mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
-	mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
-	mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
-	mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
-	mac->ops.prot_autoc_read = prot_autoc_read_generic;
-	mac->ops.prot_autoc_write = prot_autoc_write_generic;
-
-	/* LEDs */
-	mac->ops.led_on = ixgbe_led_on_generic;
-	mac->ops.led_off = ixgbe_led_off_generic;
-	mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
-	mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
-
-	/* RAR, Multicast, VLAN */
-	mac->ops.set_rar = ixgbe_set_rar_generic;
-	mac->ops.clear_rar = ixgbe_clear_rar_generic;
-	mac->ops.insert_mac_addr = NULL;
-	mac->ops.set_vmdq = NULL;
-	mac->ops.clear_vmdq = NULL;
-	mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
-	mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
-	mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
-	mac->ops.enable_mc = ixgbe_enable_mc_generic;
-	mac->ops.disable_mc = ixgbe_disable_mc_generic;
-	mac->ops.clear_vfta = NULL;
-	mac->ops.set_vfta = NULL;
-	mac->ops.set_vlvf = NULL;
-	mac->ops.init_uta_tables = NULL;
-	mac->ops.enable_rx = ixgbe_enable_rx_generic;
-	mac->ops.disable_rx = ixgbe_disable_rx_generic;
-
-	/* Flow Control */
-	mac->ops.fc_enable = ixgbe_fc_enable_generic;
-
-	/* Link */
-	mac->ops.get_link_capabilities = NULL;
-	mac->ops.setup_link = NULL;
-	mac->ops.check_link = NULL;
-	mac->ops.dmac_config = NULL;
-	mac->ops.dmac_update_tcs = NULL;
-	mac->ops.dmac_config_tcs = NULL;
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
- * of flow control
- * @hw: pointer to hardware structure
- *
- * This function returns true if the device supports flow control
- * autonegotiation, and false if it does not.
- *
- **/
-bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
-{
-	bool supported = false;
-	ixgbe_link_speed speed;
-	bool link_up;
-
-	DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
-
-	switch (hw->phy.media_type) {
-	case ixgbe_media_type_fiber_qsfp:
-	case ixgbe_media_type_fiber:
-		hw->mac.ops.check_link(hw, &speed, &link_up, false);
-		/* if link is down, assume supported */
-		if (link_up)
-			supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
-				true : false;
-		else
-			supported = true;
-		break;
-	case ixgbe_media_type_backplane:
-		supported = true;
-		break;
-	case ixgbe_media_type_copper:
-		/* only some copper devices support flow control autoneg */
-		switch (hw->device_id) {
-		case IXGBE_DEV_ID_82599_T3_LOM:
-		case IXGBE_DEV_ID_X540T:
-		case IXGBE_DEV_ID_X540T1:
-		case IXGBE_DEV_ID_X550T:
-			supported = true;
-			break;
-		default:
-			supported = false;
-		}
-	default:
-		break;
-	}
-
-	ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
-		      "Device %x does not support flow control autoneg",
-		      hw->device_id);
-	return supported;
-}
-
-/**
- *  ixgbe_setup_fc - Set up flow control
- *  @hw: pointer to hardware structure
- *
- *  Called at init time to set up flow control.
- **/
-STATIC s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
-{
-	s32 ret_val = IXGBE_SUCCESS;
-	u32 reg = 0, reg_bp = 0;
-	u16 reg_cu = 0;
-	bool locked = false;
-
-	DEBUGFUNC("ixgbe_setup_fc");
-
-	/* Validate the requested mode */
-	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
-		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
-			   "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
-		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-		goto out;
-	}
-
-	/*
-	 * 10gig parts do not have a word in the EEPROM to determine the
-	 * default flow control setting, so we explicitly set it to full.
-	 */
-	if (hw->fc.requested_mode == ixgbe_fc_default)
-		hw->fc.requested_mode = ixgbe_fc_full;
-
-	/*
-	 * Set up the 1G and 10G flow control advertisement registers so the
-	 * HW will be able to do fc autoneg once the cable is plugged in.  If
-	 * we link at 10G, the 1G advertisement is harmless and vice versa.
-	 */
-	switch (hw->phy.media_type) {
-	case ixgbe_media_type_backplane:
-		/* some MAC's need RMW protection on AUTOC */
-		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
-		if (ret_val != IXGBE_SUCCESS)
-			goto out;
-
-		/* only backplane uses autoc so fall though */
-	case ixgbe_media_type_fiber_qsfp:
-	case ixgbe_media_type_fiber:
-		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
-
-		break;
-	case ixgbe_media_type_copper:
-		hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
-				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
-		break;
-	default:
-		break;
-	}
-
-	/*
-	 * The possible values of fc.requested_mode are:
-	 * 0: Flow control is completely disabled
-	 * 1: Rx flow control is enabled (we can receive pause frames,
-	 *    but not send pause frames).
-	 * 2: Tx flow control is enabled (we can send pause frames but
-	 *    we do not support receiving pause frames).
-	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
-	 * other: Invalid.
-	 */
-	switch (hw->fc.requested_mode) {
-	case ixgbe_fc_none:
-		/* Flow control completely disabled by software override. */
-		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
-		if (hw->phy.media_type == ixgbe_media_type_backplane)
-			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
-				    IXGBE_AUTOC_ASM_PAUSE);
-		else if (hw->phy.media_type == ixgbe_media_type_copper)
-			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
-		break;
-	case ixgbe_fc_tx_pause:
-		/*
-		 * Tx Flow control is enabled, and Rx Flow control is
-		 * disabled by software override.
-		 */
-		reg |= IXGBE_PCS1GANA_ASM_PAUSE;
-		reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
-		if (hw->phy.media_type == ixgbe_media_type_backplane) {
-			reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
-			reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
-		} else if (hw->phy.media_type == ixgbe_media_type_copper) {
-			reg_cu |= IXGBE_TAF_ASM_PAUSE;
-			reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
-		}
-		break;
-	case ixgbe_fc_rx_pause:
-		/*
-		 * Rx Flow control is enabled and Tx Flow control is
-		 * disabled by software override. Since there really
-		 * isn't a way to advertise that we are capable of RX
-		 * Pause ONLY, we will advertise that we support both
-		 * symmetric and asymmetric Rx PAUSE, as such we fall
-		 * through to the fc_full statement.  Later, we will
-		 * disable the adapter's ability to send PAUSE frames.
-		 */
-	case ixgbe_fc_full:
-		/* Flow control (both Rx and Tx) is enabled by SW override. */
-		reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
-		if (hw->phy.media_type == ixgbe_media_type_backplane)
-			reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
-				  IXGBE_AUTOC_ASM_PAUSE;
-		else if (hw->phy.media_type == ixgbe_media_type_copper)
-			reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
-		break;
-	default:
-		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
-			     "Flow control param set incorrectly\n");
-		ret_val = IXGBE_ERR_CONFIG;
-		goto out;
-		break;
-	}
-
-	if (hw->mac.type < ixgbe_mac_X540) {
-		/*
-		 * Enable auto-negotiation between the MAC & PHY;
-		 * the MAC will advertise clause 37 flow control.
-		 */
-		IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
-		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
-
-		/* Disable AN timeout */
-		if (hw->fc.strict_ieee)
-			reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
-
-		IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
-		DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
-	}
-
-	/*
-	 * AUTOC restart handles negotiation of 1G and 10G on backplane
-	 * and copper. There is no need to set the PCS1GCTL register.
-	 *
-	 */
-	if (hw->phy.media_type == ixgbe_media_type_backplane) {
-		reg_bp |= IXGBE_AUTOC_AN_RESTART;
-		ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
-		if (ret_val)
-			goto out;
-	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
-		    (ixgbe_device_supports_autoneg_fc(hw))) {
-		hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
-				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
-	}
-
-	DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
-out:
-	return ret_val;
-}
-
-/**
- *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
- *  @hw: pointer to hardware structure
- *
- *  Starts the hardware by filling the bus info structure and media type, clears
- *  all on chip counters, initializes receive address registers, multicast
- *  table, VLAN filter table, calls routine to set up link and flow control
- *  settings, and leaves transmit and receive units disabled and uninitialized
- **/
-s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
-{
-	s32 ret_val;
-	u32 ctrl_ext;
-
-	DEBUGFUNC("ixgbe_start_hw_generic");
-
-	/* Set the media type */
-	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
-
-	/* PHY ops initialization must be done in reset_hw() */
-
-	/* Clear the VLAN filter table */
-	hw->mac.ops.clear_vfta(hw);
-
-	/* Clear statistics registers */
-	hw->mac.ops.clear_hw_cntrs(hw);
-
-	/* Set No Snoop Disable */
-	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
-	ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
-	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
-	IXGBE_WRITE_FLUSH(hw);
-
-	/* Setup flow control */
-	ret_val = ixgbe_setup_fc(hw);
-	if (ret_val != IXGBE_SUCCESS)
-		goto out;
-
-	/* Clear adapter stopped flag */
-	hw->adapter_stopped = false;
-
-out:
-	return ret_val;
-}
-
-/**
- *  ixgbe_start_hw_gen2 - Init sequence for common device family
- *  @hw: pointer to hw structure
- *
- * Performs the init sequence common to the second generation
- * of 10 GbE devices.
- * Devices in the second generation:
- *     82599
- *     X540
- **/
-s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
-{
-	u32 i;
-	u32 regval;
-
-	/* Clear the rate limiters */
-	for (i = 0; i < hw->mac.max_tx_queues; i++) {
-		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
-		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
-	}
-	IXGBE_WRITE_FLUSH(hw);
-
-	/* Disable relaxed ordering */
-	for (i = 0; i < hw->mac.max_tx_queues; i++) {
-		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
-		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
-		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
-	}
-
-	for (i = 0; i < hw->mac.max_rx_queues; i++) {
-		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
-		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
-			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
-		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_init_hw_generic - Generic hardware initialization
- *  @hw: pointer to hardware structure
- *
- *  Initialize the hardware by resetting the hardware, filling the bus info
- *  structure and media type, clears all on chip counters, initializes receive
- *  address registers, multicast table, VLAN filter table, calls routine to set
- *  up link and flow control settings, and leaves transmit and receive units
- *  disabled and uninitialized
- **/
-s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
-{
-	s32 status;
-
-	DEBUGFUNC("ixgbe_init_hw_generic");
-
-	/* Reset the hardware */
-	status = hw->mac.ops.reset_hw(hw);
-
-	if (status == IXGBE_SUCCESS) {
-		/* Start the HW */
-		status = hw->mac.ops.start_hw(hw);
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
- *  @hw: pointer to hardware structure
- *
- *  Clears all hardware statistics counters by reading them from the hardware
- *  Statistics counters are clear on read.
- **/
-s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
-{
-	u16 i = 0;
-
-	DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
-
-	IXGBE_READ_REG(hw, IXGBE_CRCERRS);
-	IXGBE_READ_REG(hw, IXGBE_ILLERRC);
-	IXGBE_READ_REG(hw, IXGBE_ERRBC);
-	IXGBE_READ_REG(hw, IXGBE_MSPDC);
-	for (i = 0; i < 8; i++)
-		IXGBE_READ_REG(hw, IXGBE_MPC(i));
-
-	IXGBE_READ_REG(hw, IXGBE_MLFC);
-	IXGBE_READ_REG(hw, IXGBE_MRFC);
-	IXGBE_READ_REG(hw, IXGBE_RLEC);
-	IXGBE_READ_REG(hw, IXGBE_LXONTXC);
-	IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
-	if (hw->mac.type >= ixgbe_mac_82599EB) {
-		IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
-		IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
-	} else {
-		IXGBE_READ_REG(hw, IXGBE_LXONRXC);
-		IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
-	}
-
-	for (i = 0; i < 8; i++) {
-		IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
-		IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
-		if (hw->mac.type >= ixgbe_mac_82599EB) {
-			IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
-			IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
-		} else {
-			IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
-			IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
-		}
-	}
-	if (hw->mac.type >= ixgbe_mac_82599EB)
-		for (i = 0; i < 8; i++)
-			IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
-	IXGBE_READ_REG(hw, IXGBE_PRC64);
-	IXGBE_READ_REG(hw, IXGBE_PRC127);
-	IXGBE_READ_REG(hw, IXGBE_PRC255);
-	IXGBE_READ_REG(hw, IXGBE_PRC511);
-	IXGBE_READ_REG(hw, IXGBE_PRC1023);
-	IXGBE_READ_REG(hw, IXGBE_PRC1522);
-	IXGBE_READ_REG(hw, IXGBE_GPRC);
-	IXGBE_READ_REG(hw, IXGBE_BPRC);
-	IXGBE_READ_REG(hw, IXGBE_MPRC);
-	IXGBE_READ_REG(hw, IXGBE_GPTC);
-	IXGBE_READ_REG(hw, IXGBE_GORCL);
-	IXGBE_READ_REG(hw, IXGBE_GORCH);
-	IXGBE_READ_REG(hw, IXGBE_GOTCL);
-	IXGBE_READ_REG(hw, IXGBE_GOTCH);
-	if (hw->mac.type == ixgbe_mac_82598EB)
-		for (i = 0; i < 8; i++)
-			IXGBE_READ_REG(hw, IXGBE_RNBC(i));
-	IXGBE_READ_REG(hw, IXGBE_RUC);
-	IXGBE_READ_REG(hw, IXGBE_RFC);
-	IXGBE_READ_REG(hw, IXGBE_ROC);
-	IXGBE_READ_REG(hw, IXGBE_RJC);
-	IXGBE_READ_REG(hw, IXGBE_MNGPRC);
-	IXGBE_READ_REG(hw, IXGBE_MNGPDC);
-	IXGBE_READ_REG(hw, IXGBE_MNGPTC);
-	IXGBE_READ_REG(hw, IXGBE_TORL);
-	IXGBE_READ_REG(hw, IXGBE_TORH);
-	IXGBE_READ_REG(hw, IXGBE_TPR);
-	IXGBE_READ_REG(hw, IXGBE_TPT);
-	IXGBE_READ_REG(hw, IXGBE_PTC64);
-	IXGBE_READ_REG(hw, IXGBE_PTC127);
-	IXGBE_READ_REG(hw, IXGBE_PTC255);
-	IXGBE_READ_REG(hw, IXGBE_PTC511);
-	IXGBE_READ_REG(hw, IXGBE_PTC1023);
-	IXGBE_READ_REG(hw, IXGBE_PTC1522);
-	IXGBE_READ_REG(hw, IXGBE_MPTC);
-	IXGBE_READ_REG(hw, IXGBE_BPTC);
-	for (i = 0; i < 16; i++) {
-		IXGBE_READ_REG(hw, IXGBE_QPRC(i));
-		IXGBE_READ_REG(hw, IXGBE_QPTC(i));
-		if (hw->mac.type >= ixgbe_mac_82599EB) {
-			IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
-			IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
-			IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
-			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
-			IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
-		} else {
-			IXGBE_READ_REG(hw, IXGBE_QBRC(i));
-			IXGBE_READ_REG(hw, IXGBE_QBTC(i));
-		}
-	}
-
-	if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
-		if (hw->phy.id == 0)
-			ixgbe_identify_phy(hw);
-		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
-				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
-		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
-				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
-		hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
-				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
-		hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
-				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_read_pba_string_generic - Reads part number string from EEPROM
- *  @hw: pointer to hardware structure
- *  @pba_num: stores the part number string from the EEPROM
- *  @pba_num_size: part number string buffer length
- *
- *  Reads the part number string from the EEPROM.
- **/
-s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
-				  u32 pba_num_size)
-{
-	s32 ret_val;
-	u16 data;
-	u16 pba_ptr;
-	u16 offset;
-	u16 length;
-
-	DEBUGFUNC("ixgbe_read_pba_string_generic");
-
-	if (pba_num == NULL) {
-		DEBUGOUT("PBA string buffer was null\n");
-		return IXGBE_ERR_INVALID_ARGUMENT;
-	}
-
-	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
-	if (ret_val) {
-		DEBUGOUT("NVM Read Error\n");
-		return ret_val;
-	}
-
-	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
-	if (ret_val) {
-		DEBUGOUT("NVM Read Error\n");
-		return ret_val;
-	}
-
-	/*
-	 * if data is not ptr guard the PBA must be in legacy format which
-	 * means pba_ptr is actually our second data word for the PBA number
-	 * and we can decode it into an ascii string
-	 */
-	if (data != IXGBE_PBANUM_PTR_GUARD) {
-		DEBUGOUT("NVM PBA number is not stored as string\n");
-
-		/* we will need 11 characters to store the PBA */
-		if (pba_num_size < 11) {
-			DEBUGOUT("PBA string buffer too small\n");
-			return IXGBE_ERR_NO_SPACE;
-		}
-
-		/* extract hex string from data and pba_ptr */
-		pba_num[0] = (data >> 12) & 0xF;
-		pba_num[1] = (data >> 8) & 0xF;
-		pba_num[2] = (data >> 4) & 0xF;
-		pba_num[3] = data & 0xF;
-		pba_num[4] = (pba_ptr >> 12) & 0xF;
-		pba_num[5] = (pba_ptr >> 8) & 0xF;
-		pba_num[6] = '-';
-		pba_num[7] = 0;
-		pba_num[8] = (pba_ptr >> 4) & 0xF;
-		pba_num[9] = pba_ptr & 0xF;
-
-		/* put a null character on the end of our string */
-		pba_num[10] = '\0';
-
-		/* switch all the data but the '-' to hex char */
-		for (offset = 0; offset < 10; offset++) {
-			if (pba_num[offset] < 0xA)
-				pba_num[offset] += '0';
-			else if (pba_num[offset] < 0x10)
-				pba_num[offset] += 'A' - 0xA;
-		}
-
-		return IXGBE_SUCCESS;
-	}
-
-	ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
-	if (ret_val) {
-		DEBUGOUT("NVM Read Error\n");
-		return ret_val;
-	}
-
-	if (length == 0xFFFF || length == 0) {
-		DEBUGOUT("NVM PBA number section invalid length\n");
-		return IXGBE_ERR_PBA_SECTION;
-	}
-
-	/* check if pba_num buffer is big enough */
-	if (pba_num_size  < (((u32)length * 2) - 1)) {
-		DEBUGOUT("PBA string buffer too small\n");
-		return IXGBE_ERR_NO_SPACE;
-	}
-
-	/* trim pba length from start of string */
-	pba_ptr++;
-	length--;
-
-	for (offset = 0; offset < length; offset++) {
-		ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
-		if (ret_val) {
-			DEBUGOUT("NVM Read Error\n");
-			return ret_val;
-		}
-		pba_num[offset * 2] = (u8)(data >> 8);
-		pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
-	}
-	pba_num[offset * 2] = '\0';
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_read_pba_num_generic - Reads part number from EEPROM
- *  @hw: pointer to hardware structure
- *  @pba_num: stores the part number from the EEPROM
- *
- *  Reads the part number from the EEPROM.
- **/
-s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
-{
-	s32 ret_val;
-	u16 data;
-
-	DEBUGFUNC("ixgbe_read_pba_num_generic");
-
-	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
-	if (ret_val) {
-		DEBUGOUT("NVM Read Error\n");
-		return ret_val;
-	} else if (data == IXGBE_PBANUM_PTR_GUARD) {
-		DEBUGOUT("NVM Not supported\n");
-		return IXGBE_NOT_IMPLEMENTED;
-	}
-	*pba_num = (u32)(data << 16);
-
-	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
-	if (ret_val) {
-		DEBUGOUT("NVM Read Error\n");
-		return ret_val;
-	}
-	*pba_num |= data;
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_read_pba_raw
- *  @hw: pointer to the HW structure
- *  @eeprom_buf: optional pointer to EEPROM image
- *  @eeprom_buf_size: size of EEPROM image in words
- *  @max_pba_block_size: PBA block size limit
- *  @pba: pointer to output PBA structure
- *
- *  Reads PBA from EEPROM image when eeprom_buf is not NULL.
- *  Reads PBA from physical EEPROM device when eeprom_buf is NULL.
- *
- **/
-s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
-		       u32 eeprom_buf_size, u16 max_pba_block_size,
-		       struct ixgbe_pba *pba)
-{
-	s32 ret_val;
-	u16 pba_block_size;
-
-	if (pba == NULL)
-		return IXGBE_ERR_PARAM;
-
-	if (eeprom_buf == NULL) {
-		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
-						     &pba->word[0]);
-		if (ret_val)
-			return ret_val;
-	} else {
-		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
-			pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
-			pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
-		} else {
-			return IXGBE_ERR_PARAM;
-		}
-	}
-
-	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
-		if (pba->pba_block == NULL)
-			return IXGBE_ERR_PARAM;
-
-		ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
-						   eeprom_buf_size,
-						   &pba_block_size);
-		if (ret_val)
-			return ret_val;
-
-		if (pba_block_size > max_pba_block_size)
-			return IXGBE_ERR_PARAM;
-
-		if (eeprom_buf == NULL) {
-			ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
-							     pba_block_size,
-							     pba->pba_block);
-			if (ret_val)
-				return ret_val;
-		} else {
-			if (eeprom_buf_size > (u32)(pba->word[1] +
-					      pba_block_size)) {
-				memcpy(pba->pba_block,
-				       &eeprom_buf[pba->word[1]],
-				       pba_block_size * sizeof(u16));
-			} else {
-				return IXGBE_ERR_PARAM;
-			}
-		}
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_write_pba_raw
- *  @hw: pointer to the HW structure
- *  @eeprom_buf: optional pointer to EEPROM image
- *  @eeprom_buf_size: size of EEPROM image in words
- *  @pba: pointer to PBA structure
- *
- *  Writes PBA to EEPROM image when eeprom_buf is not NULL.
- *  Writes PBA to physical EEPROM device when eeprom_buf is NULL.
- *
- **/
-s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
-			u32 eeprom_buf_size, struct ixgbe_pba *pba)
-{
-	s32 ret_val;
-
-	if (pba == NULL)
-		return IXGBE_ERR_PARAM;
-
-	if (eeprom_buf == NULL) {
-		ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
-						      &pba->word[0]);
-		if (ret_val)
-			return ret_val;
-	} else {
-		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
-			eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
-			eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
-		} else {
-			return IXGBE_ERR_PARAM;
-		}
-	}
-
-	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
-		if (pba->pba_block == NULL)
-			return IXGBE_ERR_PARAM;
-
-		if (eeprom_buf == NULL) {
-			ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
-							      pba->pba_block[0],
-							      pba->pba_block);
-			if (ret_val)
-				return ret_val;
-		} else {
-			if (eeprom_buf_size > (u32)(pba->word[1] +
-					      pba->pba_block[0])) {
-				memcpy(&eeprom_buf[pba->word[1]],
-				       pba->pba_block,
-				       pba->pba_block[0] * sizeof(u16));
-			} else {
-				return IXGBE_ERR_PARAM;
-			}
-		}
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_get_pba_block_size
- *  @hw: pointer to the HW structure
- *  @eeprom_buf: optional pointer to EEPROM image
- *  @eeprom_buf_size: size of EEPROM image in words
- *  @pba_data_size: pointer to output variable
- *
- *  Returns the size of the PBA block in words. Function operates on EEPROM
- *  image if the eeprom_buf pointer is not NULL otherwise it accesses physical
- *  EEPROM device.
- *
- **/
-s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
-			     u32 eeprom_buf_size, u16 *pba_block_size)
-{
-	s32 ret_val;
-	u16 pba_word[2];
-	u16 length;
-
-	DEBUGFUNC("ixgbe_get_pba_block_size");
-
-	if (eeprom_buf == NULL) {
-		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
-						     &pba_word[0]);
-		if (ret_val)
-			return ret_val;
-	} else {
-		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
-			pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
-			pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
-		} else {
-			return IXGBE_ERR_PARAM;
-		}
-	}
-
-	if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
-		if (eeprom_buf == NULL) {
-			ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
-						      &length);
-			if (ret_val)
-				return ret_val;
-		} else {
-			if (eeprom_buf_size > pba_word[1])
-				length = eeprom_buf[pba_word[1] + 0];
-			else
-				return IXGBE_ERR_PARAM;
-		}
-
-		if (length == 0xFFFF || length == 0)
-			return IXGBE_ERR_PBA_SECTION;
-	} else {
-		/* PBA number in legacy format, there is no PBA Block. */
-		length = 0;
-	}
-
-	if (pba_block_size != NULL)
-		*pba_block_size = length;
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_get_mac_addr_generic - Generic get MAC address
- *  @hw: pointer to hardware structure
- *  @mac_addr: Adapter MAC address
- *
- *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
- *  A reset of the adapter must be performed prior to calling this function
- *  in order for the MAC address to have been loaded from the EEPROM into RAR0
- **/
-s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
-{
-	u32 rar_high;
-	u32 rar_low;
-	u16 i;
-
-	DEBUGFUNC("ixgbe_get_mac_addr_generic");
-
-	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
-	rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
-
-	for (i = 0; i < 4; i++)
-		mac_addr[i] = (u8)(rar_low >> (i*8));
-
-	for (i = 0; i < 2; i++)
-		mac_addr[i+4] = (u8)(rar_high >> (i*8));
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_set_pci_config_data_generic - Generic store PCI bus info
- *  @hw: pointer to hardware structure
- *  @link_status: the link status returned by the PCI config space
- *
- *  Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
- **/
-void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
-{
-	struct ixgbe_mac_info *mac = &hw->mac;
-
-	if (hw->bus.type == ixgbe_bus_type_unknown)
-		hw->bus.type = ixgbe_bus_type_pci_express;
-
-	switch (link_status & IXGBE_PCI_LINK_WIDTH) {
-	case IXGBE_PCI_LINK_WIDTH_1:
-		hw->bus.width = ixgbe_bus_width_pcie_x1;
-		break;
-	case IXGBE_PCI_LINK_WIDTH_2:
-		hw->bus.width = ixgbe_bus_width_pcie_x2;
-		break;
-	case IXGBE_PCI_LINK_WIDTH_4:
-		hw->bus.width = ixgbe_bus_width_pcie_x4;
-		break;
-	case IXGBE_PCI_LINK_WIDTH_8:
-		hw->bus.width = ixgbe_bus_width_pcie_x8;
-		break;
-	default:
-		hw->bus.width = ixgbe_bus_width_unknown;
-		break;
-	}
-
-	switch (link_status & IXGBE_PCI_LINK_SPEED) {
-	case IXGBE_PCI_LINK_SPEED_2500:
-		hw->bus.speed = ixgbe_bus_speed_2500;
-		break;
-	case IXGBE_PCI_LINK_SPEED_5000:
-		hw->bus.speed = ixgbe_bus_speed_5000;
-		break;
-	case IXGBE_PCI_LINK_SPEED_8000:
-		hw->bus.speed = ixgbe_bus_speed_8000;
-		break;
-	default:
-		hw->bus.speed = ixgbe_bus_speed_unknown;
-		break;
-	}
-
-	mac->ops.set_lan_id(hw);
-}
-
-/**
- *  ixgbe_get_bus_info_generic - Generic set PCI bus info
- *  @hw: pointer to hardware structure
- *
- *  Gets the PCI bus info (speed, width, type) then calls helper function to
- *  store this data within the ixgbe_hw structure.
- **/
-s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
-{
-	u16 link_status;
-
-	DEBUGFUNC("ixgbe_get_bus_info_generic");
-
-	/* Get the negotiated link width and speed from PCI config space */
-	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
-
-	ixgbe_set_pci_config_data_generic(hw, link_status);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
- *  @hw: pointer to the HW structure
- *
- *  Determines the LAN function id by reading memory-mapped registers
- *  and swaps the port value if requested.
- **/
-void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
-{
-	struct ixgbe_bus_info *bus = &hw->bus;
-	u32 reg;
-
-	DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
-
-	reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
-	bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
-	bus->lan_id = bus->func;
-
-	/* check for a port swap */
-	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
-	if (reg & IXGBE_FACTPS_LFS)
-		bus->func ^= 0x1;
-}
-
-/**
- *  ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
- *  @hw: pointer to hardware structure
- *
- *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
- *  disables transmit and receive units. The adapter_stopped flag is used by
- *  the shared code and drivers to determine if the adapter is in a stopped
- *  state and should not touch the hardware.
- **/
-s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
-{
-	u32 reg_val;
-	u16 i;
-
-	DEBUGFUNC("ixgbe_stop_adapter_generic");
-
-	/*
-	 * Set the adapter_stopped flag so other driver functions stop touching
-	 * the hardware
-	 */
-	hw->adapter_stopped = true;
-
-	/* Disable the receive unit */
-	ixgbe_disable_rx(hw);
-
-	/* Clear interrupt mask to stop interrupts from being generated */
-	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
-
-	/* Clear any pending interrupts, flush previous writes */
-	IXGBE_READ_REG(hw, IXGBE_EICR);
-
-	/* Disable the transmit unit.  Each queue must be disabled. */
-	for (i = 0; i < hw->mac.max_tx_queues; i++)
-		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
-
-	/* Disable the receive unit by stopping each queue */
-	for (i = 0; i < hw->mac.max_rx_queues; i++) {
-		reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
-		reg_val &= ~IXGBE_RXDCTL_ENABLE;
-		reg_val |= IXGBE_RXDCTL_SWFLSH;
-		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
-	}
-
-	/* flush all queues disables */
-	IXGBE_WRITE_FLUSH(hw);
-	msec_delay(2);
-
-	/*
-	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
-	 * access and verify no pending requests
-	 */
-	return ixgbe_disable_pcie_master(hw);
-}
-
-/**
- *  ixgbe_led_on_generic - Turns on the software controllable LEDs.
- *  @hw: pointer to hardware structure
- *  @index: led number to turn on
- **/
-s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
-{
-	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
-
-	DEBUGFUNC("ixgbe_led_on_generic");
-
-	/* To turn on the LED, set mode to ON. */
-	led_reg &= ~IXGBE_LED_MODE_MASK(index);
-	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
-	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
-	IXGBE_WRITE_FLUSH(hw);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_led_off_generic - Turns off the software controllable LEDs.
- *  @hw: pointer to hardware structure
- *  @index: led number to turn off
- **/
-s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
-{
-	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
-
-	DEBUGFUNC("ixgbe_led_off_generic");
-
-	/* To turn off the LED, set mode to OFF. */
-	led_reg &= ~IXGBE_LED_MODE_MASK(index);
-	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
-	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
-	IXGBE_WRITE_FLUSH(hw);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_init_eeprom_params_generic - Initialize EEPROM params
- *  @hw: pointer to hardware structure
- *
- *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
- *  ixgbe_hw struct in order to set up EEPROM access.
- **/
-s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
-{
-	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
-	u32 eec;
-	u16 eeprom_size;
-
-	DEBUGFUNC("ixgbe_init_eeprom_params_generic");
-
-	if (eeprom->type == ixgbe_eeprom_uninitialized) {
-		eeprom->type = ixgbe_eeprom_none;
-		/* Set default semaphore delay to 10ms which is a well
-		 * tested value */
-		eeprom->semaphore_delay = 10;
-		/* Clear EEPROM page size, it will be initialized as needed */
-		eeprom->word_page_size = 0;
-
-		/*
-		 * Check for EEPROM present first.
-		 * If not present leave as none
-		 */
-		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-		if (eec & IXGBE_EEC_PRES) {
-			eeprom->type = ixgbe_eeprom_spi;
-
-			/*
-			 * SPI EEPROM is assumed here.  This code would need to
-			 * change if a future EEPROM is not SPI.
-			 */
-			eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
-					    IXGBE_EEC_SIZE_SHIFT);
-			eeprom->word_size = 1 << (eeprom_size +
-					     IXGBE_EEPROM_WORD_SIZE_SHIFT);
-		}
-
-		if (eec & IXGBE_EEC_ADDR_SIZE)
-			eeprom->address_bits = 16;
-		else
-			eeprom->address_bits = 8;
-		DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
-			  "%d\n", eeprom->type, eeprom->word_size,
-			  eeprom->address_bits);
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
- *  @hw: pointer to hardware structure
- *  @offset: offset within the EEPROM to write
- *  @words: number of word(s)
- *  @data: 16 bit word(s) to write to EEPROM
- *
- *  Reads 16 bit word(s) from EEPROM through bit-bang method
- **/
-s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
-					       u16 words, u16 *data)
-{
-	s32 status = IXGBE_SUCCESS;
-	u16 i, count;
-
-	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
-
-	hw->eeprom.ops.init_params(hw);
-
-	if (words == 0) {
-		status = IXGBE_ERR_INVALID_ARGUMENT;
-		goto out;
-	}
-
-	if (offset + words > hw->eeprom.word_size) {
-		status = IXGBE_ERR_EEPROM;
-		goto out;
-	}
-
-	/*
-	 * The EEPROM page size cannot be queried from the chip. We do lazy
-	 * initialization. It is worth to do that when we write large buffer.
-	 */
-	if ((hw->eeprom.word_page_size == 0) &&
-	    (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
-		ixgbe_detect_eeprom_page_size_generic(hw, offset);
-
-	/*
-	 * We cannot hold synchronization semaphores for too long
-	 * to avoid other entity starvation. However it is more efficient
-	 * to read in bursts than synchronizing access for each word.
-	 */
-	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
-		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
-			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
-		status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
-							    count, &data[i]);
-
-		if (status != IXGBE_SUCCESS)
-			break;
-	}
-
-out:
-	return status;
-}
-
-/**
- *  ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
- *  @hw: pointer to hardware structure
- *  @offset: offset within the EEPROM to be written to
- *  @words: number of word(s)
- *  @data: 16 bit word(s) to be written to the EEPROM
- *
- *  If ixgbe_eeprom_update_checksum is not called after this function, the
- *  EEPROM will most likely contain an invalid checksum.
- **/
-STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
-					      u16 words, u16 *data)
-{
-	s32 status;
-	u16 word;
-	u16 page_size;
-	u16 i;
-	u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
-
-	DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
-
-	/* Prepare the EEPROM for writing  */
-	status = ixgbe_acquire_eeprom(hw);
-
-	if (status == IXGBE_SUCCESS) {
-		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
-			ixgbe_release_eeprom(hw);
-			status = IXGBE_ERR_EEPROM;
-		}
-	}
-
-	if (status == IXGBE_SUCCESS) {
-		for (i = 0; i < words; i++) {
-			ixgbe_standby_eeprom(hw);
-
-			/*  Send the WRITE ENABLE command (8 bit opcode )  */
-			ixgbe_shift_out_eeprom_bits(hw,
-						   IXGBE_EEPROM_WREN_OPCODE_SPI,
-						   IXGBE_EEPROM_OPCODE_BITS);
-
-			ixgbe_standby_eeprom(hw);
-
-			/*
-			 * Some SPI eeproms use the 8th address bit embedded
-			 * in the opcode
-			 */
-			if ((hw->eeprom.address_bits == 8) &&
-			    ((offset + i) >= 128))
-				write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
-
-			/* Send the Write command (8-bit opcode + addr) */
-			ixgbe_shift_out_eeprom_bits(hw, write_opcode,
-						    IXGBE_EEPROM_OPCODE_BITS);
-			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
-						    hw->eeprom.address_bits);
-
-			page_size = hw->eeprom.word_page_size;
-
-			/* Send the data in burst via SPI*/
-			do {
-				word = data[i];
-				word = (word >> 8) | (word << 8);
-				ixgbe_shift_out_eeprom_bits(hw, word, 16);
-
-				if (page_size == 0)
-					break;
-
-				/* do not wrap around page */
-				if (((offset + i) & (page_size - 1)) ==
-				    (page_size - 1))
-					break;
-			} while (++i < words);
-
-			ixgbe_standby_eeprom(hw);
-			msec_delay(10);
-		}
-		/* Done with writing - release the EEPROM */
-		ixgbe_release_eeprom(hw);
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
- *  @hw: pointer to hardware structure
- *  @offset: offset within the EEPROM to be written to
- *  @data: 16 bit word to be written to the EEPROM
- *
- *  If ixgbe_eeprom_update_checksum is not called after this function, the
- *  EEPROM will most likely contain an invalid checksum.
- **/
-s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
-{
-	s32 status;
-
-	DEBUGFUNC("ixgbe_write_eeprom_generic");
-
-	hw->eeprom.ops.init_params(hw);
-
-	if (offset >= hw->eeprom.word_size) {
-		status = IXGBE_ERR_EEPROM;
-		goto out;
-	}
-
-	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
-
-out:
-	return status;
-}
-
-/**
- *  ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
- *  @hw: pointer to hardware structure
- *  @offset: offset within the EEPROM to be read
- *  @data: read 16 bit words(s) from EEPROM
- *  @words: number of word(s)
- *
- *  Reads 16 bit word(s) from EEPROM through bit-bang method
- **/
-s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
-					      u16 words, u16 *data)
-{
-	s32 status = IXGBE_SUCCESS;
-	u16 i, count;
-
-	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
-
-	hw->eeprom.ops.init_params(hw);
-
-	if (words == 0) {
-		status = IXGBE_ERR_INVALID_ARGUMENT;
-		goto out;
-	}
-
-	if (offset + words > hw->eeprom.word_size) {
-		status = IXGBE_ERR_EEPROM;
-		goto out;
-	}
-
-	/*
-	 * We cannot hold synchronization semaphores for too long
-	 * to avoid other entity starvation. However it is more efficient
-	 * to read in bursts than synchronizing access for each word.
-	 */
-	for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
-		count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
-			IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
-
-		status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
-							   count, &data[i]);
-
-		if (status != IXGBE_SUCCESS)
-			break;
-	}
-
-out:
-	return status;
-}
-
-/**
- *  ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
- *  @hw: pointer to hardware structure
- *  @offset: offset within the EEPROM to be read
- *  @words: number of word(s)
- *  @data: read 16 bit word(s) from EEPROM
- *
- *  Reads 16 bit word(s) from EEPROM through bit-bang method
- **/
-STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
-					     u16 words, u16 *data)
-{
-	s32 status;
-	u16 word_in;
-	u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
-	u16 i;
-
-	DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
-
-	/* Prepare the EEPROM for reading  */
-	status = ixgbe_acquire_eeprom(hw);
-
-	if (status == IXGBE_SUCCESS) {
-		if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
-			ixgbe_release_eeprom(hw);
-			status = IXGBE_ERR_EEPROM;
-		}
-	}
-
-	if (status == IXGBE_SUCCESS) {
-		for (i = 0; i < words; i++) {
-			ixgbe_standby_eeprom(hw);
-			/*
-			 * Some SPI eeproms use the 8th address bit embedded
-			 * in the opcode
-			 */
-			if ((hw->eeprom.address_bits == 8) &&
-			    ((offset + i) >= 128))
-				read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
-
-			/* Send the READ command (opcode + addr) */
-			ixgbe_shift_out_eeprom_bits(hw, read_opcode,
-						    IXGBE_EEPROM_OPCODE_BITS);
-			ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
-						    hw->eeprom.address_bits);
-
-			/* Read the data. */
-			word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
-			data[i] = (word_in >> 8) | (word_in << 8);
-		}
-
-		/* End this read operation */
-		ixgbe_release_eeprom(hw);
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
- *  @hw: pointer to hardware structure
- *  @offset: offset within the EEPROM to be read
- *  @data: read 16 bit value from EEPROM
- *
- *  Reads 16 bit value from EEPROM through bit-bang method
- **/
-s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
-				       u16 *data)
-{
-	s32 status;
-
-	DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
-
-	hw->eeprom.ops.init_params(hw);
-
-	if (offset >= hw->eeprom.word_size) {
-		status = IXGBE_ERR_EEPROM;
-		goto out;
-	}
-
-	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
-
-out:
-	return status;
-}
-
-/**
- *  ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
- *  @hw: pointer to hardware structure
- *  @offset: offset of word in the EEPROM to read
- *  @words: number of word(s)
- *  @data: 16 bit word(s) from the EEPROM
- *
- *  Reads a 16 bit word(s) from the EEPROM using the EERD register.
- **/
-s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
-				   u16 words, u16 *data)
-{
-	u32 eerd;
-	s32 status = IXGBE_SUCCESS;
-	u32 i;
-
-	DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
-
-	hw->eeprom.ops.init_params(hw);
-
-	if (words == 0) {
-		status = IXGBE_ERR_INVALID_ARGUMENT;
-		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
-		goto out;
-	}
-
-	if (offset >= hw->eeprom.word_size) {
-		status = IXGBE_ERR_EEPROM;
-		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
-		goto out;
-	}
-
-	for (i = 0; i < words; i++) {
-		eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
-		       IXGBE_EEPROM_RW_REG_START;
-
-		IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
-		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
-
-		if (status == IXGBE_SUCCESS) {
-			data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
-				   IXGBE_EEPROM_RW_REG_DATA);
-		} else {
-			DEBUGOUT("Eeprom read timed out\n");
-			goto out;
-		}
-	}
-out:
-	return status;
-}
-
-/**
- *  ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
- *  @hw: pointer to hardware structure
- *  @offset: offset within the EEPROM to be used as a scratch pad
- *
- *  Discover EEPROM page size by writing marching data at given offset.
- *  This function is called only when we are writing a new large buffer
- *  at given offset so the data would be overwritten anyway.
- **/
-STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
-						 u16 offset)
-{
-	u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
-	s32 status = IXGBE_SUCCESS;
-	u16 i;
-
-	DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
-
-	for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
-		data[i] = i;
-
-	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
-	status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
-					     IXGBE_EEPROM_PAGE_SIZE_MAX, data);
-	hw->eeprom.word_page_size = 0;
-	if (status != IXGBE_SUCCESS)
-		goto out;
-
-	status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
-	if (status != IXGBE_SUCCESS)
-		goto out;
-
-	/*
-	 * When writing in burst more than the actual page size
-	 * EEPROM address wraps around current page.
-	 */
-	hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
-
-	DEBUGOUT1("Detected EEPROM page size = %d words.",
-		  hw->eeprom.word_page_size);
-out:
-	return status;
-}
-
-/**
- *  ixgbe_read_eerd_generic - Read EEPROM word using EERD
- *  @hw: pointer to hardware structure
- *  @offset: offset of  word in the EEPROM to read
- *  @data: word read from the EEPROM
- *
- *  Reads a 16 bit word from the EEPROM using the EERD register.
- **/
-s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
-{
-	return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
-}
-
-/**
- *  ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
- *  @hw: pointer to hardware structure
- *  @offset: offset of  word in the EEPROM to write
- *  @words: number of word(s)
- *  @data: word(s) write to the EEPROM
- *
- *  Write a 16 bit word(s) to the EEPROM using the EEWR register.
- **/
-s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
-				    u16 words, u16 *data)
-{
-	u32 eewr;
-	s32 status = IXGBE_SUCCESS;
-	u16 i;
-
-	DEBUGFUNC("ixgbe_write_eewr_generic");
-
-	hw->eeprom.ops.init_params(hw);
-
-	if (words == 0) {
-		status = IXGBE_ERR_INVALID_ARGUMENT;
-		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
-		goto out;
-	}
-
-	if (offset >= hw->eeprom.word_size) {
-		status = IXGBE_ERR_EEPROM;
-		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
-		goto out;
-	}
-
-	for (i = 0; i < words; i++) {
-		eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
-			(data[i] << IXGBE_EEPROM_RW_REG_DATA) |
-			IXGBE_EEPROM_RW_REG_START;
-
-		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
-		if (status != IXGBE_SUCCESS) {
-			DEBUGOUT("Eeprom write EEWR timed out\n");
-			goto out;
-		}
-
-		IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
-
-		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
-		if (status != IXGBE_SUCCESS) {
-			DEBUGOUT("Eeprom write EEWR timed out\n");
-			goto out;
-		}
-	}
-
-out:
-	return status;
-}
-
-/**
- *  ixgbe_write_eewr_generic - Write EEPROM word using EEWR
- *  @hw: pointer to hardware structure
- *  @offset: offset of  word in the EEPROM to write
- *  @data: word write to the EEPROM
- *
- *  Write a 16 bit word to the EEPROM using the EEWR register.
- **/
-s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
-{
-	return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
-}
-
-/**
- *  ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
- *  @hw: pointer to hardware structure
- *  @ee_reg: EEPROM flag for polling
- *
- *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the
- *  read or write is done respectively.
- **/
-s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
-{
-	u32 i;
-	u32 reg;
-	s32 status = IXGBE_ERR_EEPROM;
-
-	DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
-
-	for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
-		if (ee_reg == IXGBE_NVM_POLL_READ)
-			reg = IXGBE_READ_REG(hw, IXGBE_EERD);
-		else
-			reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
-
-		if (reg & IXGBE_EEPROM_RW_REG_DONE) {
-			status = IXGBE_SUCCESS;
-			break;
-		}
-		usec_delay(5);
-	}
-
-	if (i == IXGBE_EERD_EEWR_ATTEMPTS)
-		ERROR_REPORT1(IXGBE_ERROR_POLLING,
-			     "EEPROM read/write done polling timed out");
-
-	return status;
-}
-
-/**
- *  ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
- *  @hw: pointer to hardware structure
- *
- *  Prepares EEPROM for access using bit-bang method. This function should
- *  be called before issuing a command to the EEPROM.
- **/
-STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
-{
-	s32 status = IXGBE_SUCCESS;
-	u32 eec;
-	u32 i;
-
-	DEBUGFUNC("ixgbe_acquire_eeprom");
-
-	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
-	    != IXGBE_SUCCESS)
-		status = IXGBE_ERR_SWFW_SYNC;
-
-	if (status == IXGBE_SUCCESS) {
-		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-
-		/* Request EEPROM Access */
-		eec |= IXGBE_EEC_REQ;
-		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
-
-		for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
-			eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-			if (eec & IXGBE_EEC_GNT)
-				break;
-			usec_delay(5);
-		}
-
-		/* Release if grant not acquired */
-		if (!(eec & IXGBE_EEC_GNT)) {
-			eec &= ~IXGBE_EEC_REQ;
-			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
-			DEBUGOUT("Could not acquire EEPROM grant\n");
-
-			hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-			status = IXGBE_ERR_EEPROM;
-		}
-
-		/* Setup EEPROM for Read/Write */
-		if (status == IXGBE_SUCCESS) {
-			/* Clear CS and SK */
-			eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
-			IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
-			IXGBE_WRITE_FLUSH(hw);
-			usec_delay(1);
-		}
-	}
-	return status;
-}
-
-/**
- *  ixgbe_get_eeprom_semaphore - Get hardware semaphore
- *  @hw: pointer to hardware structure
- *
- *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method
- **/
-STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
-{
-	s32 status = IXGBE_ERR_EEPROM;
-	u32 timeout = 2000;
-	u32 i;
-	u32 swsm;
-
-	DEBUGFUNC("ixgbe_get_eeprom_semaphore");
-
-
-	/* Get SMBI software semaphore between device drivers first */
-	for (i = 0; i < timeout; i++) {
-		/*
-		 * If the SMBI bit is 0 when we read it, then the bit will be
-		 * set and we have the semaphore
-		 */
-		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
-		if (!(swsm & IXGBE_SWSM_SMBI)) {
-			status = IXGBE_SUCCESS;
-			break;
-		}
-		usec_delay(50);
-	}
-
-	if (i == timeout) {
-		DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
-			 "not granted.\n");
-		/*
-		 * this release is particularly important because our attempts
-		 * above to get the semaphore may have succeeded, and if there
-		 * was a timeout, we should unconditionally clear the semaphore
-		 * bits to free the driver to make progress
-		 */
-		ixgbe_release_eeprom_semaphore(hw);
-
-		usec_delay(50);
-		/*
-		 * one last try
-		 * If the SMBI bit is 0 when we read it, then the bit will be
-		 * set and we have the semaphore
-		 */
-		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
-		if (!(swsm & IXGBE_SWSM_SMBI))
-			status = IXGBE_SUCCESS;
-	}
-
-	/* Now get the semaphore between SW/FW through the SWESMBI bit */
-	if (status == IXGBE_SUCCESS) {
-		for (i = 0; i < timeout; i++) {
-			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
-
-			/* Set the SW EEPROM semaphore bit to request access */
-			swsm |= IXGBE_SWSM_SWESMBI;
-			IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
-
-			/*
-			 * If we set the bit successfully then we got the
-			 * semaphore.
-			 */
-			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
-			if (swsm & IXGBE_SWSM_SWESMBI)
-				break;
-
-			usec_delay(50);
-		}
-
-		/*
-		 * Release semaphores and return error if SW EEPROM semaphore
-		 * was not granted because we don't have access to the EEPROM
-		 */
-		if (i >= timeout) {
-			ERROR_REPORT1(IXGBE_ERROR_POLLING,
-			    "SWESMBI Software EEPROM semaphore not granted.\n");
-			ixgbe_release_eeprom_semaphore(hw);
-			status = IXGBE_ERR_EEPROM;
-		}
-	} else {
-		ERROR_REPORT1(IXGBE_ERROR_POLLING,
-			     "Software semaphore SMBI between device drivers "
-			     "not granted.\n");
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_release_eeprom_semaphore - Release hardware semaphore
- *  @hw: pointer to hardware structure
- *
- *  This function clears hardware semaphore bits.
- **/
-STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
-{
-	u32 swsm;
-
-	DEBUGFUNC("ixgbe_release_eeprom_semaphore");
-
-	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
-
-	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
-	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
-	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
-	IXGBE_WRITE_FLUSH(hw);
-}
-
-/**
- *  ixgbe_ready_eeprom - Polls for EEPROM ready
- *  @hw: pointer to hardware structure
- **/
-STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
-{
-	s32 status = IXGBE_SUCCESS;
-	u16 i;
-	u8 spi_stat_reg;
-
-	DEBUGFUNC("ixgbe_ready_eeprom");
-
-	/*
-	 * Read "Status Register" repeatedly until the LSB is cleared.  The
-	 * EEPROM will signal that the command has been completed by clearing
-	 * bit 0 of the internal status register.  If it's not cleared within
-	 * 5 milliseconds, then error out.
-	 */
-	for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
-		ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
-					    IXGBE_EEPROM_OPCODE_BITS);
-		spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
-		if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
-			break;
-
-		usec_delay(5);
-		ixgbe_standby_eeprom(hw);
-	};
-
-	/*
-	 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
-	 * devices (and only 0-5mSec on 5V devices)
-	 */
-	if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
-		DEBUGOUT("SPI EEPROM Status error\n");
-		status = IXGBE_ERR_EEPROM;
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
- *  @hw: pointer to hardware structure
- **/
-STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
-{
-	u32 eec;
-
-	DEBUGFUNC("ixgbe_standby_eeprom");
-
-	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-
-	/* Toggle CS to flush commands */
-	eec |= IXGBE_EEC_CS;
-	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
-	IXGBE_WRITE_FLUSH(hw);
-	usec_delay(1);
-	eec &= ~IXGBE_EEC_CS;
-	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
-	IXGBE_WRITE_FLUSH(hw);
-	usec_delay(1);
-}
-
-/**
- *  ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
- *  @hw: pointer to hardware structure
- *  @data: data to send to the EEPROM
- *  @count: number of bits to shift out
- **/
-STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
-					u16 count)
-{
-	u32 eec;
-	u32 mask;
-	u32 i;
-
-	DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
-
-	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-
-	/*
-	 * Mask is used to shift "count" bits of "data" out to the EEPROM
-	 * one bit at a time.  Determine the starting bit based on count
-	 */
-	mask = 0x01 << (count - 1);
-
-	for (i = 0; i < count; i++) {
-		/*
-		 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
-		 * "1", and then raising and then lowering the clock (the SK
-		 * bit controls the clock input to the EEPROM).  A "0" is
-		 * shifted out to the EEPROM by setting "DI" to "0" and then
-		 * raising and then lowering the clock.
-		 */
-		if (data & mask)
-			eec |= IXGBE_EEC_DI;
-		else
-			eec &= ~IXGBE_EEC_DI;
-
-		IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
-		IXGBE_WRITE_FLUSH(hw);
-
-		usec_delay(1);
-
-		ixgbe_raise_eeprom_clk(hw, &eec);
-		ixgbe_lower_eeprom_clk(hw, &eec);
-
-		/*
-		 * Shift mask to signify next bit of data to shift in to the
-		 * EEPROM
-		 */
-		mask = mask >> 1;
-	};
-
-	/* We leave the "DI" bit set to "0" when we leave this routine. */
-	eec &= ~IXGBE_EEC_DI;
-	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
-	IXGBE_WRITE_FLUSH(hw);
-}
-
-/**
- *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
- *  @hw: pointer to hardware structure
- **/
-STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
-{
-	u32 eec;
-	u32 i;
-	u16 data = 0;
-
-	DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
-
-	/*
-	 * In order to read a register from the EEPROM, we need to shift
-	 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
-	 * the clock input to the EEPROM (setting the SK bit), and then reading
-	 * the value of the "DO" bit.  During this "shifting in" process the
-	 * "DI" bit should always be clear.
-	 */
-	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-
-	eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
-
-	for (i = 0; i < count; i++) {
-		data = data << 1;
-		ixgbe_raise_eeprom_clk(hw, &eec);
-
-		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-
-		eec &= ~(IXGBE_EEC_DI);
-		if (eec & IXGBE_EEC_DO)
-			data |= 1;
-
-		ixgbe_lower_eeprom_clk(hw, &eec);
-	}
-
-	return data;
-}
-
-/**
- *  ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
- *  @hw: pointer to hardware structure
- *  @eec: EEC register's current value
- **/
-STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
-{
-	DEBUGFUNC("ixgbe_raise_eeprom_clk");
-
-	/*
-	 * Raise the clock input to the EEPROM
-	 * (setting the SK bit), then delay
-	 */
-	*eec = *eec | IXGBE_EEC_SK;
-	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
-	IXGBE_WRITE_FLUSH(hw);
-	usec_delay(1);
-}
-
-/**
- *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
- *  @hw: pointer to hardware structure
- *  @eecd: EECD's current value
- **/
-STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
-{
-	DEBUGFUNC("ixgbe_lower_eeprom_clk");
-
-	/*
-	 * Lower the clock input to the EEPROM (clearing the SK bit), then
-	 * delay
-	 */
-	*eec = *eec & ~IXGBE_EEC_SK;
-	IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
-	IXGBE_WRITE_FLUSH(hw);
-	usec_delay(1);
-}
-
-/**
- *  ixgbe_release_eeprom - Release EEPROM, release semaphores
- *  @hw: pointer to hardware structure
- **/
-STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw)
-{
-	u32 eec;
-
-	DEBUGFUNC("ixgbe_release_eeprom");
-
-	eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-
-	eec |= IXGBE_EEC_CS;  /* Pull CS high */
-	eec &= ~IXGBE_EEC_SK; /* Lower SCK */
-
-	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
-	IXGBE_WRITE_FLUSH(hw);
-
-	usec_delay(1);
-
-	/* Stop requesting EEPROM access */
-	eec &= ~IXGBE_EEC_REQ;
-	IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
-
-	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-
-	/* Delay before attempt to obtain semaphore again to allow FW access */
-	msec_delay(hw->eeprom.semaphore_delay);
-}
-
-/**
- *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
- *  @hw: pointer to hardware structure
- *
- *  Returns a negative error code on error, or the 16-bit checksum
- **/
-s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
-{
-	u16 i;
-	u16 j;
-	u16 checksum = 0;
-	u16 length = 0;
-	u16 pointer = 0;
-	u16 word = 0;
-
-	DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
-
-	/* Include 0x0-0x3F in the checksum */
-	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
-		if (hw->eeprom.ops.read(hw, i, &word)) {
-			DEBUGOUT("EEPROM read failed\n");
-			return IXGBE_ERR_EEPROM;
-		}
-		checksum += word;
-	}
-
-	/* Include all data from pointers except for the fw pointer */
-	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
-		if (hw->eeprom.ops.read(hw, i, &pointer)) {
-			DEBUGOUT("EEPROM read failed\n");
-			return IXGBE_ERR_EEPROM;
-		}
-
-		/* If the pointer seems invalid */
-		if (pointer == 0xFFFF || pointer == 0)
-			continue;
-
-		if (hw->eeprom.ops.read(hw, pointer, &length)) {
-			DEBUGOUT("EEPROM read failed\n");
-			return IXGBE_ERR_EEPROM;
-		}
-
-		if (length == 0xFFFF || length == 0)
-			continue;
-
-		for (j = pointer + 1; j <= pointer + length; j++) {
-			if (hw->eeprom.ops.read(hw, j, &word)) {
-				DEBUGOUT("EEPROM read failed\n");
-				return IXGBE_ERR_EEPROM;
-			}
-			checksum += word;
-		}
-	}
-
-	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
-
-	return (s32)checksum;
-}
-
-/**
- *  ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
- *  @hw: pointer to hardware structure
- *  @checksum_val: calculated checksum
- *
- *  Performs checksum calculation and validates the EEPROM checksum.  If the
- *  caller does not need checksum_val, the value can be NULL.
- **/
-s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
-					   u16 *checksum_val)
-{
-	s32 status;
-	u16 checksum;
-	u16 read_checksum = 0;
-
-	DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
-
-	/* Read the first word from the EEPROM. If this times out or fails, do
-	 * not continue or we could be in for a very long wait while every
-	 * EEPROM read fails
-	 */
-	status = hw->eeprom.ops.read(hw, 0, &checksum);
-	if (status) {
-		DEBUGOUT("EEPROM read failed\n");
-		return status;
-	}
-
-	status = hw->eeprom.ops.calc_checksum(hw);
-	if (status < 0)
-		return status;
-
-	checksum = (u16)(status & 0xffff);
-
-	status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
-	if (status) {
-		DEBUGOUT("EEPROM read failed\n");
-		return status;
-	}
-
-	/* Verify read checksum from EEPROM is the same as
-	 * calculated checksum
-	 */
-	if (read_checksum != checksum)
-		status = IXGBE_ERR_EEPROM_CHECKSUM;
-
-	/* If the user cares, return the calculated checksum */
-	if (checksum_val)
-		*checksum_val = checksum;
-
-	return status;
-}
-
-/**
- *  ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
- *  @hw: pointer to hardware structure
- **/
-s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
-{
-	s32 status;
-	u16 checksum;
-
-	DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
-
-	/* Read the first word from the EEPROM. If this times out or fails, do
-	 * not continue or we could be in for a very long wait while every
-	 * EEPROM read fails
-	 */
-	status = hw->eeprom.ops.read(hw, 0, &checksum);
-	if (status) {
-		DEBUGOUT("EEPROM read failed\n");
-		return status;
-	}
-
-	status = hw->eeprom.ops.calc_checksum(hw);
-	if (status < 0)
-		return status;
-
-	checksum = (u16)(status & 0xffff);
-
-	status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
-
-	return status;
-}
-
-/**
- *  ixgbe_validate_mac_addr - Validate MAC address
- *  @mac_addr: pointer to MAC address.
- *
- *  Tests a MAC address to ensure it is a valid Individual Address
- **/
-s32 ixgbe_validate_mac_addr(u8 *mac_addr)
-{
-	s32 status = IXGBE_SUCCESS;
-
-	DEBUGFUNC("ixgbe_validate_mac_addr");
-
-	/* Make sure it is not a multicast address */
-	if (IXGBE_IS_MULTICAST(mac_addr)) {
-		DEBUGOUT("MAC address is multicast\n");
-		status = IXGBE_ERR_INVALID_MAC_ADDR;
-	/* Not a broadcast address */
-	} else if (IXGBE_IS_BROADCAST(mac_addr)) {
-		DEBUGOUT("MAC address is broadcast\n");
-		status = IXGBE_ERR_INVALID_MAC_ADDR;
-	/* Reject the zero address */
-	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
-		   mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
-		DEBUGOUT("MAC address is all zeros\n");
-		status = IXGBE_ERR_INVALID_MAC_ADDR;
-	}
-	return status;
-}
-
-/**
- *  ixgbe_set_rar_generic - Set Rx address register
- *  @hw: pointer to hardware structure
- *  @index: Receive address register to write
- *  @addr: Address to put into receive address register
- *  @vmdq: VMDq "set" or "pool" index
- *  @enable_addr: set flag that address is active
- *
- *  Puts an ethernet address into a receive address register.
- **/
-s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
-			  u32 enable_addr)
-{
-	u32 rar_low, rar_high;
-	u32 rar_entries = hw->mac.num_rar_entries;
-
-	DEBUGFUNC("ixgbe_set_rar_generic");
-
-	/* Make sure we are using a valid rar index range */
-	if (index >= rar_entries) {
-		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
-			     "RAR index %d is out of range.\n", index);
-		return IXGBE_ERR_INVALID_ARGUMENT;
-	}
-
-	/* setup VMDq pool selection before this RAR gets enabled */
-	hw->mac.ops.set_vmdq(hw, index, vmdq);
-
-	/*
-	 * HW expects these in little endian so we reverse the byte
-	 * order from network order (big endian) to little endian
-	 */
-	rar_low = ((u32)addr[0] |
-		   ((u32)addr[1] << 8) |
-		   ((u32)addr[2] << 16) |
-		   ((u32)addr[3] << 24));
-	/*
-	 * Some parts put the VMDq setting in the extra RAH bits,
-	 * so save everything except the lower 16 bits that hold part
-	 * of the address and the address valid bit.
-	 */
-	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
-	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
-	rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
-
-	if (enable_addr != 0)
-		rar_high |= IXGBE_RAH_AV;
-
-	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
-	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_clear_rar_generic - Remove Rx address register
- *  @hw: pointer to hardware structure
- *  @index: Receive address register to write
- *
- *  Clears an ethernet address from a receive address register.
- **/
-s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
-{
-	u32 rar_high;
-	u32 rar_entries = hw->mac.num_rar_entries;
-
-	DEBUGFUNC("ixgbe_clear_rar_generic");
-
-	/* Make sure we are using a valid rar index range */
-	if (index >= rar_entries) {
-		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
-			     "RAR index %d is out of range.\n", index);
-		return IXGBE_ERR_INVALID_ARGUMENT;
-	}
-
-	/*
-	 * Some parts put the VMDq setting in the extra RAH bits,
-	 * so save everything except the lower 16 bits that hold part
-	 * of the address and the address valid bit.
-	 */
-	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
-	rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
-
-	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
-	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
-
-	/* clear VMDq pool/queue selection for this RAR */
-	hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_init_rx_addrs_generic - Initializes receive address filters.
- *  @hw: pointer to hardware structure
- *
- *  Places the MAC address in receive address register 0 and clears the rest
- *  of the receive address registers. Clears the multicast table. Assumes
- *  the receiver is in reset when the routine is called.
- **/
-s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
-{
-	u32 i;
-	u32 rar_entries = hw->mac.num_rar_entries;
-
-	DEBUGFUNC("ixgbe_init_rx_addrs_generic");
-
-	/*
-	 * If the current mac address is valid, assume it is a software override
-	 * to the permanent address.
-	 * Otherwise, use the permanent address from the eeprom.
-	 */
-	if (ixgbe_validate_mac_addr(hw->mac.addr) ==
-	    IXGBE_ERR_INVALID_MAC_ADDR) {
-		/* Get the MAC address from the RAR0 for later reference */
-		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
-
-		DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
-			  hw->mac.addr[0], hw->mac.addr[1],
-			  hw->mac.addr[2]);
-		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
-			  hw->mac.addr[4], hw->mac.addr[5]);
-	} else {
-		/* Setup the receive address. */
-		DEBUGOUT("Overriding MAC Address in RAR[0]\n");
-		DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
-			  hw->mac.addr[0], hw->mac.addr[1],
-			  hw->mac.addr[2]);
-		DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
-			  hw->mac.addr[4], hw->mac.addr[5]);
-
-		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
-
-		/* clear VMDq pool/queue selection for RAR 0 */
-		hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
-	}
-	hw->addr_ctrl.overflow_promisc = 0;
-
-	hw->addr_ctrl.rar_used_count = 1;
-
-	/* Zero out the other receive addresses. */
-	DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
-	for (i = 1; i < rar_entries; i++) {
-		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
-	}
-
-	/* Clear the MTA */
-	hw->addr_ctrl.mta_in_use = 0;
-	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
-
-	DEBUGOUT(" Clearing MTA\n");
-	for (i = 0; i < hw->mac.mcft_size; i++)
-		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
-
-	ixgbe_init_uta_tables(hw);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_add_uc_addr - Adds a secondary unicast address.
- *  @hw: pointer to hardware structure
- *  @addr: new address
- *
- *  Adds it to unused receive address register or goes into promiscuous mode.
- **/
-void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
-{
-	u32 rar_entries = hw->mac.num_rar_entries;
-	u32 rar;
-
-	DEBUGFUNC("ixgbe_add_uc_addr");
-
-	DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
-		  addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
-
-	/*
-	 * Place this address in the RAR if there is room,
-	 * else put the controller into promiscuous mode
-	 */
-	if (hw->addr_ctrl.rar_used_count < rar_entries) {
-		rar = hw->addr_ctrl.rar_used_count;
-		hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
-		DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
-		hw->addr_ctrl.rar_used_count++;
-	} else {
-		hw->addr_ctrl.overflow_promisc++;
-	}
-
-	DEBUGOUT("ixgbe_add_uc_addr Complete\n");
-}
-
-/**
- *  ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
- *  @hw: pointer to hardware structure
- *  @addr_list: the list of new addresses
- *  @addr_count: number of addresses
- *  @next: iterator function to walk the address list
- *
- *  The given list replaces any existing list.  Clears the secondary addrs from
- *  receive address registers.  Uses unused receive address registers for the
- *  first secondary addresses, and falls back to promiscuous mode as needed.
- *
- *  Drivers using secondary unicast addresses must set user_set_promisc when
- *  manually putting the device into promiscuous mode.
- **/
-s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
-				      u32 addr_count, ixgbe_mc_addr_itr next)
-{
-	u8 *addr;
-	u32 i;
-	u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
-	u32 uc_addr_in_use;
-	u32 fctrl;
-	u32 vmdq;
-
-	DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
-
-	/*
-	 * Clear accounting of old secondary address list,
-	 * don't count RAR[0]
-	 */
-	uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
-	hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
-	hw->addr_ctrl.overflow_promisc = 0;
-
-	/* Zero out the other receive addresses */
-	DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
-	for (i = 0; i < uc_addr_in_use; i++) {
-		IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
-	}
-
-	/* Add the new addresses */
-	for (i = 0; i < addr_count; i++) {
-		DEBUGOUT(" Adding the secondary addresses:\n");
-		addr = next(hw, &addr_list, &vmdq);
-		ixgbe_add_uc_addr(hw, addr, vmdq);
-	}
-
-	if (hw->addr_ctrl.overflow_promisc) {
-		/* enable promisc if not already in overflow or set by user */
-		if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
-			DEBUGOUT(" Entering address overflow promisc mode\n");
-			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-			fctrl |= IXGBE_FCTRL_UPE;
-			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-		}
-	} else {
-		/* only disable if set by overflow, not by user */
-		if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
-			DEBUGOUT(" Leaving address overflow promisc mode\n");
-			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-			fctrl &= ~IXGBE_FCTRL_UPE;
-			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-		}
-	}
-
-	DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
- *  @hw: pointer to hardware structure
- *  @mc_addr: the multicast address
- *
- *  Extracts the 12 bits, from a multicast address, to determine which
- *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
- *  incoming rx multicast addresses, to determine the bit-vector to check in
- *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
- *  by the MO field of the MCSTCTRL. The MO field is set during initialization
- *  to mc_filter_type.
- **/
-STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
-{
-	u32 vector = 0;
-
-	DEBUGFUNC("ixgbe_mta_vector");
-
-	switch (hw->mac.mc_filter_type) {
-	case 0:   /* use bits [47:36] of the address */
-		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
-		break;
-	case 1:   /* use bits [46:35] of the address */
-		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
-		break;
-	case 2:   /* use bits [45:34] of the address */
-		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
-		break;
-	case 3:   /* use bits [43:32] of the address */
-		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
-		break;
-	default:  /* Invalid mc_filter_type */
-		DEBUGOUT("MC filter type param set incorrectly\n");
-		ASSERT(0);
-		break;
-	}
-
-	/* vector can only be 12-bits or boundary will be exceeded */
-	vector &= 0xFFF;
-	return vector;
-}
-
-/**
- *  ixgbe_set_mta - Set bit-vector in multicast table
- *  @hw: pointer to hardware structure
- *  @hash_value: Multicast address hash value
- *
- *  Sets the bit-vector in the multicast table.
- **/
-void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
-{
-	u32 vector;
-	u32 vector_bit;
-	u32 vector_reg;
-
-	DEBUGFUNC("ixgbe_set_mta");
-
-	hw->addr_ctrl.mta_in_use++;
-
-	vector = ixgbe_mta_vector(hw, mc_addr);
-	DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
-
-	/*
-	 * The MTA is a register array of 128 32-bit registers. It is treated
-	 * like an array of 4096 bits.  We want to set bit
-	 * BitArray[vector_value]. So we figure out what register the bit is
-	 * in, read it, OR in the new bit, then write back the new value.  The
-	 * register is determined by the upper 7 bits of the vector value and
-	 * the bit within that register are determined by the lower 5 bits of
-	 * the value.
-	 */
-	vector_reg = (vector >> 5) & 0x7F;
-	vector_bit = vector & 0x1F;
-	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
-}
-
-/**
- *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
- *  @hw: pointer to hardware structure
- *  @mc_addr_list: the list of new multicast addresses
- *  @mc_addr_count: number of addresses
- *  @next: iterator function to walk the multicast address list
- *  @clear: flag, when set clears the table beforehand
- *
- *  When the clear flag is set, the given list replaces any existing list.
- *  Hashes the given addresses into the multicast table.
- **/
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
-				      u32 mc_addr_count, ixgbe_mc_addr_itr next,
-				      bool clear)
-{
-	u32 i;
-	u32 vmdq;
-
-	DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
-
-	/*
-	 * Set the new number of MC addresses that we are being requested to
-	 * use.
-	 */
-	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
-	hw->addr_ctrl.mta_in_use = 0;
-
-	/* Clear mta_shadow */
-	if (clear) {
-		DEBUGOUT(" Clearing MTA\n");
-		memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
-	}
-
-	/* Update mta_shadow */
-	for (i = 0; i < mc_addr_count; i++) {
-		DEBUGOUT(" Adding the multicast addresses:\n");
-		ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
-	}
-
-	/* Enable mta */
-	for (i = 0; i < hw->mac.mcft_size; i++)
-		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
-				      hw->mac.mta_shadow[i]);
-
-	if (hw->addr_ctrl.mta_in_use > 0)
-		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
-				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
-
-	DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_enable_mc_generic - Enable multicast address in RAR
- *  @hw: pointer to hardware structure
- *
- *  Enables multicast address in RAR and the use of the multicast hash table.
- **/
-s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
-{
-	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
-
-	DEBUGFUNC("ixgbe_enable_mc_generic");
-
-	if (a->mta_in_use > 0)
-		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
-				hw->mac.mc_filter_type);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_disable_mc_generic - Disable multicast address in RAR
- *  @hw: pointer to hardware structure
- *
- *  Disables multicast address in RAR and the use of the multicast hash table.
- **/
-s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
-{
-	struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
-
-	DEBUGFUNC("ixgbe_disable_mc_generic");
-
-	if (a->mta_in_use > 0)
-		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_fc_enable_generic - Enable flow control
- *  @hw: pointer to hardware structure
- *
- *  Enable flow control according to the current settings.
- **/
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
-{
-	s32 ret_val = IXGBE_SUCCESS;
-	u32 mflcn_reg, fccfg_reg;
-	u32 reg;
-	u32 fcrtl, fcrth;
-	int i;
-
-	DEBUGFUNC("ixgbe_fc_enable_generic");
-
-	/* Validate the water mark configuration */
-	if (!hw->fc.pause_time) {
-		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-		goto out;
-	}
-
-	/* Low water mark of zero causes XOFF floods */
-	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
-		    hw->fc.high_water[i]) {
-			if (!hw->fc.low_water[i] ||
-			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
-				DEBUGOUT("Invalid water mark configuration\n");
-				ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-				goto out;
-			}
-		}
-	}
-
-	/* Negotiate the fc mode to use */
-	ixgbe_fc_autoneg(hw);
-
-	/* Disable any previous flow control settings */
-	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
-	mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
-
-	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
-	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
-
-	/*
-	 * The possible values of fc.current_mode are:
-	 * 0: Flow control is completely disabled
-	 * 1: Rx flow control is enabled (we can receive pause frames,
-	 *    but not send pause frames).
-	 * 2: Tx flow control is enabled (we can send pause frames but
-	 *    we do not support receiving pause frames).
-	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
-	 * other: Invalid.
-	 */
-	switch (hw->fc.current_mode) {
-	case ixgbe_fc_none:
-		/*
-		 * Flow control is disabled by software override or autoneg.
-		 * The code below will actually disable it in the HW.
-		 */
-		break;
-	case ixgbe_fc_rx_pause:
-		/*
-		 * Rx Flow control is enabled and Tx Flow control is
-		 * disabled by software override. Since there really
-		 * isn't a way to advertise that we are capable of RX
-		 * Pause ONLY, we will advertise that we support both
-		 * symmetric and asymmetric Rx PAUSE.  Later, we will
-		 * disable the adapter's ability to send PAUSE frames.
-		 */
-		mflcn_reg |= IXGBE_MFLCN_RFCE;
-		break;
-	case ixgbe_fc_tx_pause:
-		/*
-		 * Tx Flow control is enabled, and Rx Flow control is
-		 * disabled by software override.
-		 */
-		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
-		break;
-	case ixgbe_fc_full:
-		/* Flow control (both Rx and Tx) is enabled by SW override. */
-		mflcn_reg |= IXGBE_MFLCN_RFCE;
-		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
-		break;
-	default:
-		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
-			     "Flow control param set incorrectly\n");
-		ret_val = IXGBE_ERR_CONFIG;
-		goto out;
-		break;
-	}
-
-	/* Set 802.3x based flow control settings. */
-	mflcn_reg |= IXGBE_MFLCN_DPF;
-	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
-	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
-
-
-	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
-	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
-		    hw->fc.high_water[i]) {
-			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
-			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
-			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
-		} else {
-			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
-			/*
-			 * In order to prevent Tx hangs when the internal Tx
-			 * switch is enabled we must set the high water mark
-			 * to the Rx packet buffer size - 24KB.  This allows
-			 * the Tx switch to function even under heavy Rx
-			 * workloads.
-			 */
-			fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
-		}
-
-		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
-	}
-
-	/* Configure pause time (2 TCs per register) */
-	reg = hw->fc.pause_time * 0x00010001;
-	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
-		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
-
-	/* Configure flow control refresh threshold value */
-	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
-
-out:
-	return ret_val;
-}
-
-/**
- *  ixgbe_negotiate_fc - Negotiate flow control
- *  @hw: pointer to hardware structure
- *  @adv_reg: flow control advertised settings
- *  @lp_reg: link partner's flow control settings
- *  @adv_sym: symmetric pause bit in advertisement
- *  @adv_asm: asymmetric pause bit in advertisement
- *  @lp_sym: symmetric pause bit in link partner advertisement
- *  @lp_asm: asymmetric pause bit in link partner advertisement
- *
- *  Find the intersection between advertised settings and link partner's
- *  advertised settings
- **/
-STATIC s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
-			      u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
-{
-	if ((!(adv_reg)) ||  (!(lp_reg))) {
-		ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
-			     "Local or link partner's advertised flow control "
-			     "settings are NULL. Local: %x, link partner: %x\n",
-			     adv_reg, lp_reg);
-		return IXGBE_ERR_FC_NOT_NEGOTIATED;
-	}
-
-	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
-		/*
-		 * Now we need to check if the user selected Rx ONLY
-		 * of pause frames.  In this case, we had to advertise
-		 * FULL flow control because we could not advertise RX
-		 * ONLY. Hence, we must now check to see if we need to
-		 * turn OFF the TRANSMISSION of PAUSE frames.
-		 */
-		if (hw->fc.requested_mode == ixgbe_fc_full) {
-			hw->fc.current_mode = ixgbe_fc_full;
-			DEBUGOUT("Flow Control = FULL.\n");
-		} else {
-			hw->fc.current_mode = ixgbe_fc_rx_pause;
-			DEBUGOUT("Flow Control=RX PAUSE frames only\n");
-		}
-	} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
-		   (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
-		hw->fc.current_mode = ixgbe_fc_tx_pause;
-		DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
-	} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
-		   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
-		hw->fc.current_mode = ixgbe_fc_rx_pause;
-		DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
-	} else {
-		hw->fc.current_mode = ixgbe_fc_none;
-		DEBUGOUT("Flow Control = NONE.\n");
-	}
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
- *  @hw: pointer to hardware structure
- *
- *  Enable flow control according on 1 gig fiber.
- **/
-STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
-{
-	u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
-	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
-
-	/*
-	 * On multispeed fiber at 1g, bail out if
-	 * - link is up but AN did not complete, or if
-	 * - link is up and AN completed but timed out
-	 */
-
-	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
-	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
-	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
-		DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
-		goto out;
-	}
-
-	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
-	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
-
-	ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
-				      pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
-				      IXGBE_PCS1GANA_ASM_PAUSE,
-				      IXGBE_PCS1GANA_SYM_PAUSE,
-				      IXGBE_PCS1GANA_ASM_PAUSE);
-
-out:
-	return ret_val;
-}
-
-/**
- *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
- *  @hw: pointer to hardware structure
- *
- *  Enable flow control according to IEEE clause 37.
- **/
-STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
-{
-	u32 links2, anlp1_reg, autoc_reg, links;
-	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
-
-	/*
-	 * On backplane, bail out if
-	 * - backplane autoneg was not completed, or if
-	 * - we are 82599 and link partner is not AN enabled
-	 */
-	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
-	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
-		DEBUGOUT("Auto-Negotiation did not complete\n");
-		goto out;
-	}
-
-	if (hw->mac.type == ixgbe_mac_82599EB) {
-		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
-		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
-			DEBUGOUT("Link partner is not AN enabled\n");
-			goto out;
-		}
-	}
-	/*
-	 * Read the 10g AN autoc and LP ability registers and resolve
-	 * local flow control settings accordingly
-	 */
-	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-	anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
-
-	ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
-		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
-		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
-
-out:
-	return ret_val;
-}
-
-/**
- *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
- *  @hw: pointer to hardware structure
- *
- *  Enable flow control according to IEEE clause 37.
- **/
-STATIC s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
-{
-	u16 technology_ability_reg = 0;
-	u16 lp_technology_ability_reg = 0;
-
-	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
-			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-			     &technology_ability_reg);
-	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
-			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-			     &lp_technology_ability_reg);
-
-	return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
-				  (u32)lp_technology_ability_reg,
-				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
-				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
-}
-
-/**
- *  ixgbe_fc_autoneg - Configure flow control
- *  @hw: pointer to hardware structure
- *
- *  Compares our advertised flow control capabilities to those advertised by
- *  our link partner, and determines the proper flow control mode to use.
- **/
-void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
-{
-	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
-	ixgbe_link_speed speed;
-	bool link_up;
-
-	DEBUGFUNC("ixgbe_fc_autoneg");
-
-	/*
-	 * AN should have completed when the cable was plugged in.
-	 * Look for reasons to bail out.  Bail out if:
-	 * - FC autoneg is disabled, or if
-	 * - link is not up.
-	 */
-	if (hw->fc.disable_fc_autoneg) {
-		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
-			     "Flow control autoneg is disabled");
-		goto out;
-	}
-
-	hw->mac.ops.check_link(hw, &speed, &link_up, false);
-	if (!link_up) {
-		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
-		goto out;
-	}
-
-	switch (hw->phy.media_type) {
-	/* Autoneg flow control on fiber adapters */
-	case ixgbe_media_type_fiber_qsfp:
-	case ixgbe_media_type_fiber:
-		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
-			ret_val = ixgbe_fc_autoneg_fiber(hw);
-		break;
-
-	/* Autoneg flow control on backplane adapters */
-	case ixgbe_media_type_backplane:
-		ret_val = ixgbe_fc_autoneg_backplane(hw);
-		break;
-
-	/* Autoneg flow control on copper adapters */
-	case ixgbe_media_type_copper:
-		if (ixgbe_device_supports_autoneg_fc(hw))
-			ret_val = ixgbe_fc_autoneg_copper(hw);
-		break;
-
-	default:
-		break;
-	}
-
-out:
-	if (ret_val == IXGBE_SUCCESS) {
-		hw->fc.fc_was_autonegged = true;
-	} else {
-		hw->fc.fc_was_autonegged = false;
-		hw->fc.current_mode = hw->fc.requested_mode;
-	}
-}
-
-/*
- * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
- * @hw: pointer to hardware structure
- *
- * System-wide timeout range is encoded in PCIe Device Control2 register.
- *
- * Add 10% to specified maximum and return the number of times to poll for
- * completion timeout, in units of 100 microsec.  Never return less than
- * 800 = 80 millisec.
- */
-STATIC u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
-{
-	s16 devctl2;
-	u32 pollcnt;
-
-	devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
-	devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
-
-	switch (devctl2) {
-	case IXGBE_PCIDEVCTRL2_65_130ms:
-		pollcnt = 1300;		/* 130 millisec */
-		break;
-	case IXGBE_PCIDEVCTRL2_260_520ms:
-		pollcnt = 5200;		/* 520 millisec */
-		break;
-	case IXGBE_PCIDEVCTRL2_1_2s:
-		pollcnt = 20000;	/* 2 sec */
-		break;
-	case IXGBE_PCIDEVCTRL2_4_8s:
-		pollcnt = 80000;	/* 8 sec */
-		break;
-	case IXGBE_PCIDEVCTRL2_17_34s:
-		pollcnt = 34000;	/* 34 sec */
-		break;
-	case IXGBE_PCIDEVCTRL2_50_100us:	/* 100 microsecs */
-	case IXGBE_PCIDEVCTRL2_1_2ms:		/* 2 millisecs */
-	case IXGBE_PCIDEVCTRL2_16_32ms:		/* 32 millisec */
-	case IXGBE_PCIDEVCTRL2_16_32ms_def:	/* 32 millisec default */
-	default:
-		pollcnt = 800;		/* 80 millisec minimum */
-		break;
-	}
-
-	/* add 10% to spec maximum */
-	return (pollcnt * 11) / 10;
-}
-
-/**
- *  ixgbe_disable_pcie_master - Disable PCI-express master access
- *  @hw: pointer to hardware structure
- *
- *  Disables PCI-Express master access and verifies there are no pending
- *  requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
- *  bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
- *  is returned signifying master requests disabled.
- **/
-s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
-{
-	s32 status = IXGBE_SUCCESS;
-	u32 i, poll;
-	u16 value;
-
-	DEBUGFUNC("ixgbe_disable_pcie_master");
-
-	/* Always set this bit to ensure any future transactions are blocked */
-	IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
-
-	/* Exit if master requests are blocked */
-	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
-	    IXGBE_REMOVED(hw->hw_addr))
-		goto out;
-
-	/* Poll for master request bit to clear */
-	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
-		usec_delay(100);
-		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
-			goto out;
-	}
-
-	/*
-	 * Two consecutive resets are required via CTRL.RST per datasheet
-	 * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
-	 * of this need.  The first reset prevents new master requests from
-	 * being issued by our device.  We then must wait 1usec or more for any
-	 * remaining completions from the PCIe bus to trickle in, and then reset
-	 * again to clear out any effects they may have had on our device.
-	 */
-	DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
-	hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
-
-	/*
-	 * Before proceeding, make sure that the PCIe block does not have
-	 * transactions pending.
-	 */
-	poll = ixgbe_pcie_timeout_poll(hw);
-	for (i = 0; i < poll; i++) {
-		usec_delay(100);
-		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
-		if (IXGBE_REMOVED(hw->hw_addr))
-			goto out;
-		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
-			goto out;
-	}
-
-	ERROR_REPORT1(IXGBE_ERROR_POLLING,
-		     "PCIe transaction pending bit also did not clear.\n");
-	status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
-
-out:
-	return status;
-}
-
-/**
- *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
- *  @hw: pointer to hardware structure
- *  @mask: Mask to specify which semaphore to acquire
- *
- *  Acquires the SWFW semaphore through the GSSR register for the specified
- *  function (CSR, PHY0, PHY1, EEPROM, Flash)
- **/
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
-{
-	u32 gssr = 0;
-	u32 swmask = mask;
-	u32 fwmask = mask << 5;
-	u32 timeout = 200;
-	u32 i;
-
-	DEBUGFUNC("ixgbe_acquire_swfw_sync");
-
-	for (i = 0; i < timeout; i++) {
-		/*
-		 * SW NVM semaphore bit is used for access to all
-		 * SW_FW_SYNC bits (not just NVM)
-		 */
-		if (ixgbe_get_eeprom_semaphore(hw))
-			return IXGBE_ERR_SWFW_SYNC;
-
-		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
-		if (!(gssr & (fwmask | swmask))) {
-			gssr |= swmask;
-			IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
-			ixgbe_release_eeprom_semaphore(hw);
-			return IXGBE_SUCCESS;
-		} else {
-			/* Resource is currently in use by FW or SW */
-			ixgbe_release_eeprom_semaphore(hw);
-			msec_delay(5);
-		}
-	}
-
-	/* If time expired clear the bits holding the lock and retry */
-	if (gssr & (fwmask | swmask))
-		ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
-
-	msec_delay(5);
-	return IXGBE_ERR_SWFW_SYNC;
-}
-
-/**
- *  ixgbe_release_swfw_sync - Release SWFW semaphore
- *  @hw: pointer to hardware structure
- *  @mask: Mask to specify which semaphore to release
- *
- *  Releases the SWFW semaphore through the GSSR register for the specified
- *  function (CSR, PHY0, PHY1, EEPROM, Flash)
- **/
-void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
-{
-	u32 gssr;
-	u32 swmask = mask;
-
-	DEBUGFUNC("ixgbe_release_swfw_sync");
-
-	ixgbe_get_eeprom_semaphore(hw);
-
-	gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
-	gssr &= ~swmask;
-	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
-
-	ixgbe_release_eeprom_semaphore(hw);
-}
-
-/**
- *  ixgbe_disable_sec_rx_path_generic - Stops the receive data path
- *  @hw: pointer to hardware structure
- *
- *  Stops the receive data path and waits for the HW to internally empty
- *  the Rx security block
- **/
-s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
-{
-#define IXGBE_MAX_SECRX_POLL 40
-
-	int i;
-	int secrxreg;
-
-	DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
-
-
-	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
-	secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
-	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
-	for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
-		secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
-		if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
-			break;
-		else
-			/* Use interrupt-safe sleep just in case */
-			usec_delay(1000);
-	}
-
-	/* For informational purposes only */
-	if (i >= IXGBE_MAX_SECRX_POLL)
-		DEBUGOUT("Rx unit being enabled before security "
-			 "path fully disabled.  Continuing with init.\n");
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
- *  @hw: pointer to hardware structure
- *  @reg_val: Value we read from AUTOC
- *
- *  The default case requires no protection so just to the register read.
- */
-s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
-{
-	*locked = false;
-	*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-	return IXGBE_SUCCESS;
-}
-
-/**
- * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
- * @hw: pointer to hardware structure
- * @reg_val: value to write to AUTOC
- * @locked: bool to indicate whether the SW/FW lock was already taken by
- *           previous read.
- *
- * The default case requires no protection so just to the register write.
- */
-s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
-{
-	UNREFERENCED_1PARAMETER(locked);
-
-	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_enable_sec_rx_path_generic - Enables the receive data path
- *  @hw: pointer to hardware structure
- *
- *  Enables the receive data path.
- **/
-s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
-{
-	int secrxreg;
-
-	DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
-
-	secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
-	secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
-	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
-	IXGBE_WRITE_FLUSH(hw);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
- *  @hw: pointer to hardware structure
- *  @regval: register value to write to RXCTRL
- *
- *  Enables the Rx DMA unit
- **/
-s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
-{
-	DEBUGFUNC("ixgbe_enable_rx_dma_generic");
-
-	if (regval & IXGBE_RXCTRL_RXEN)
-		ixgbe_enable_rx(hw);
-	else
-		ixgbe_disable_rx(hw);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_blink_led_start_generic - Blink LED based on index.
- *  @hw: pointer to hardware structure
- *  @index: led number to blink
- **/
-s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
-{
-	ixgbe_link_speed speed = 0;
-	bool link_up = 0;
-	u32 autoc_reg = 0;
-	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
-	s32 ret_val = IXGBE_SUCCESS;
-	bool locked = false;
-
-	DEBUGFUNC("ixgbe_blink_led_start_generic");
-
-	/*
-	 * Link must be up to auto-blink the LEDs;
-	 * Force it if link is down.
-	 */
-	hw->mac.ops.check_link(hw, &speed, &link_up, false);
-
-	if (!link_up) {
-		ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
-		if (ret_val != IXGBE_SUCCESS)
-			goto out;
-
-		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
-		autoc_reg |= IXGBE_AUTOC_FLU;
-
-		ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
-		if (ret_val != IXGBE_SUCCESS)
-			goto out;
-
-		IXGBE_WRITE_FLUSH(hw);
-		msec_delay(10);
-	}
-
-	led_reg &= ~IXGBE_LED_MODE_MASK(index);
-	led_reg |= IXGBE_LED_BLINK(index);
-	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
-	IXGBE_WRITE_FLUSH(hw);
-
-out:
-	return ret_val;
-}
-
-/**
- *  ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
- *  @hw: pointer to hardware structure
- *  @index: led number to stop blinking
- **/
-s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
-{
-	u32 autoc_reg = 0;
-	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
-	s32 ret_val = IXGBE_SUCCESS;
-	bool locked = false;
-
-	DEBUGFUNC("ixgbe_blink_led_stop_generic");
-
-	ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
-	if (ret_val != IXGBE_SUCCESS)
-		goto out;
-
-	autoc_reg &= ~IXGBE_AUTOC_FLU;
-	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
-
-	ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
-	if (ret_val != IXGBE_SUCCESS)
-		goto out;
-
-	led_reg &= ~IXGBE_LED_MODE_MASK(index);
-	led_reg &= ~IXGBE_LED_BLINK(index);
-	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
-	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
-	IXGBE_WRITE_FLUSH(hw);
-
-out:
-	return ret_val;
-}
-
-/**
- *  ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
- *  @hw: pointer to hardware structure
- *  @san_mac_offset: SAN MAC address offset
- *
- *  This function will read the EEPROM location for the SAN MAC address
- *  pointer, and returns the value at that location.  This is used in both
- *  get and set mac_addr routines.
- **/
-STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
-					 u16 *san_mac_offset)
-{
-	s32 ret_val;
-
-	DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
-
-	/*
-	 * First read the EEPROM pointer to see if the MAC addresses are
-	 * available.
-	 */
-	ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
-				      san_mac_offset);
-	if (ret_val) {
-		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
-			      "eeprom at offset %d failed",
-			      IXGBE_SAN_MAC_ADDR_PTR);
-	}
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
- *  @hw: pointer to hardware structure
- *  @san_mac_addr: SAN MAC address
- *
- *  Reads the SAN MAC address from the EEPROM, if it's available.  This is
- *  per-port, so set_lan_id() must be called before reading the addresses.
- *  set_lan_id() is called by identify_sfp(), but this cannot be relied
- *  upon for non-SFP connections, so we must call it here.
- **/
-s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
-{
-	u16 san_mac_data, san_mac_offset;
-	u8 i;
-	s32 ret_val;
-
-	DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
-
-	/*
-	 * First read the EEPROM pointer to see if the MAC addresses are
-	 * available.  If they're not, no point in calling set_lan_id() here.
-	 */
-	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
-	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
-		goto san_mac_addr_out;
-
-	/* make sure we know which port we need to program */
-	hw->mac.ops.set_lan_id(hw);
-	/* apply the port offset to the address offset */
-	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
-			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
-	for (i = 0; i < 3; i++) {
-		ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
-					      &san_mac_data);
-		if (ret_val) {
-			ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
-				      "eeprom read at offset %d failed",
-				      san_mac_offset);
-			goto san_mac_addr_out;
-		}
-		san_mac_addr[i * 2] = (u8)(san_mac_data);
-		san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
-		san_mac_offset++;
-	}
-	return IXGBE_SUCCESS;
-
-san_mac_addr_out:
-	/*
-	 * No addresses available in this EEPROM.  It's not an
-	 * error though, so just wipe the local address and return.
-	 */
-	for (i = 0; i < 6; i++)
-		san_mac_addr[i] = 0xFF;
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
- *  @hw: pointer to hardware structure
- *  @san_mac_addr: SAN MAC address
- *
- *  Write a SAN MAC address to the EEPROM.
- **/
-s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
-{
-	s32 ret_val;
-	u16 san_mac_data, san_mac_offset;
-	u8 i;
-
-	DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
-
-	/* Look for SAN mac address pointer.  If not defined, return */
-	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
-	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
-		return IXGBE_ERR_NO_SAN_ADDR_PTR;
-
-	/* Make sure we know which port we need to write */
-	hw->mac.ops.set_lan_id(hw);
-	/* Apply the port offset to the address offset */
-	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
-			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
-
-	for (i = 0; i < 3; i++) {
-		san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
-		san_mac_data |= (u16)(san_mac_addr[i * 2]);
-		hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
-		san_mac_offset++;
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
- *  @hw: pointer to hardware structure
- *
- *  Read PCIe configuration space, and get the MSI-X vector count from
- *  the capabilities table.
- **/
-u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
-{
-	u16 msix_count = 1;
-	u16 max_msix_count;
-	u16 pcie_offset;
-
-	switch (hw->mac.type) {
-	case ixgbe_mac_82598EB:
-		pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
-		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
-		break;
-	case ixgbe_mac_82599EB:
-	case ixgbe_mac_X540:
-	case ixgbe_mac_X550:
-	case ixgbe_mac_X550EM_x:
-		pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
-		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
-		break;
-	default:
-		return msix_count;
-	}
-
-	DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
-	msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
-	if (IXGBE_REMOVED(hw->hw_addr))
-		msix_count = 0;
-	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
-
-	/* MSI-X count is zero-based in HW */
-	msix_count++;
-
-	if (msix_count > max_msix_count)
-		msix_count = max_msix_count;
-
-	return msix_count;
-}
-
-/**
- *  ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
- *  @hw: pointer to hardware structure
- *  @addr: Address to put into receive address register
- *  @vmdq: VMDq pool to assign
- *
- *  Puts an ethernet address into a receive address register, or
- *  finds the rar that it is aleady in; adds to the pool list
- **/
-s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
-{
-	static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
-	u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
-	u32 rar;
-	u32 rar_low, rar_high;
-	u32 addr_low, addr_high;
-
-	DEBUGFUNC("ixgbe_insert_mac_addr_generic");
-
-	/* swap bytes for HW little endian */
-	addr_low  = addr[0] | (addr[1] << 8)
-			    | (addr[2] << 16)
-			    | (addr[3] << 24);
-	addr_high = addr[4] | (addr[5] << 8);
-
-	/*
-	 * Either find the mac_id in rar or find the first empty space.
-	 * rar_highwater points to just after the highest currently used
-	 * rar in order to shorten the search.  It grows when we add a new
-	 * rar to the top.
-	 */
-	for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
-		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
-
-		if (((IXGBE_RAH_AV & rar_high) == 0)
-		    && first_empty_rar == NO_EMPTY_RAR_FOUND) {
-			first_empty_rar = rar;
-		} else if ((rar_high & 0xFFFF) == addr_high) {
-			rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
-			if (rar_low == addr_low)
-				break;    /* found it already in the rars */
-		}
-	}
-
-	if (rar < hw->mac.rar_highwater) {
-		/* already there so just add to the pool bits */
-		ixgbe_set_vmdq(hw, rar, vmdq);
-	} else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
-		/* stick it into first empty RAR slot we found */
-		rar = first_empty_rar;
-		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
-	} else if (rar == hw->mac.rar_highwater) {
-		/* add it to the top of the list and inc the highwater mark */
-		ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
-		hw->mac.rar_highwater++;
-	} else if (rar >= hw->mac.num_rar_entries) {
-		return IXGBE_ERR_INVALID_MAC_ADDR;
-	}
-
-	/*
-	 * If we found rar[0], make sure the default pool bit (we use pool 0)
-	 * remains cleared to be sure default pool packets will get delivered
-	 */
-	if (rar == 0)
-		ixgbe_clear_vmdq(hw, rar, 0);
-
-	return rar;
-}
-
-/**
- *  ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
- *  @hw: pointer to hardware struct
- *  @rar: receive address register index to disassociate
- *  @vmdq: VMDq pool index to remove from the rar
- **/
-s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
-{
-	u32 mpsar_lo, mpsar_hi;
-	u32 rar_entries = hw->mac.num_rar_entries;
-
-	DEBUGFUNC("ixgbe_clear_vmdq_generic");
-
-	/* Make sure we are using a valid rar index range */
-	if (rar >= rar_entries) {
-		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
-			     "RAR index %d is out of range.\n", rar);
-		return IXGBE_ERR_INVALID_ARGUMENT;
-	}
-
-	mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
-	mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
-
-	if (IXGBE_REMOVED(hw->hw_addr))
-		goto done;
-
-	if (!mpsar_lo && !mpsar_hi)
-		goto done;
-
-	if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
-		if (mpsar_lo) {
-			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
-			mpsar_lo = 0;
-		}
-		if (mpsar_hi) {
-			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
-			mpsar_hi = 0;
-		}
-	} else if (vmdq < 32) {
-		mpsar_lo &= ~(1 << vmdq);
-		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
-	} else {
-		mpsar_hi &= ~(1 << (vmdq - 32));
-		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
-	}
-
-	/* was that the last pool using this rar? */
-	if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
-		hw->mac.ops.clear_rar(hw, rar);
-done:
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
- *  @hw: pointer to hardware struct
- *  @rar: receive address register index to associate with a VMDq index
- *  @vmdq: VMDq pool index
- **/
-s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
-{
-	u32 mpsar;
-	u32 rar_entries = hw->mac.num_rar_entries;
-
-	DEBUGFUNC("ixgbe_set_vmdq_generic");
-
-	/* Make sure we are using a valid rar index range */
-	if (rar >= rar_entries) {
-		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
-			     "RAR index %d is out of range.\n", rar);
-		return IXGBE_ERR_INVALID_ARGUMENT;
-	}
-
-	if (vmdq < 32) {
-		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
-		mpsar |= 1 << vmdq;
-		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
-	} else {
-		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
-		mpsar |= 1 << (vmdq - 32);
-		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
-	}
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  This function should only be involved in the IOV mode.
- *  In IOV mode, Default pool is next pool after the number of
- *  VFs advertized and not 0.
- *  MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
- *
- *  ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
- *  @hw: pointer to hardware struct
- *  @vmdq: VMDq pool index
- **/
-s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
-{
-	u32 rar = hw->mac.san_mac_rar_index;
-
-	DEBUGFUNC("ixgbe_set_vmdq_san_mac");
-
-	if (vmdq < 32) {
-		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
-		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
-	} else {
-		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
- *  @hw: pointer to hardware structure
- **/
-s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
-{
-	int i;
-
-	DEBUGFUNC("ixgbe_init_uta_tables_generic");
-	DEBUGOUT(" Clearing UTA\n");
-
-	for (i = 0; i < 128; i++)
-		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
- *  @hw: pointer to hardware structure
- *  @vlan: VLAN id to write to VLAN filter
- *
- *  return the VLVF index where this VLAN id should be placed
- *
- **/
-s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
-{
-	u32 bits = 0;
-	u32 first_empty_slot = 0;
-	s32 regindex;
-
-	/* short cut the special case */
-	if (vlan == 0)
-		return 0;
-
-	/*
-	  * Search for the vlan id in the VLVF entries. Save off the first empty
-	  * slot found along the way
-	  */
-	for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
-		bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
-		if (!bits && !(first_empty_slot))
-			first_empty_slot = regindex;
-		else if ((bits & 0x0FFF) == vlan)
-			break;
-	}
-
-	/*
-	  * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
-	  * in the VLVF. Else use the first empty VLVF register for this
-	  * vlan id.
-	  */
-	if (regindex >= IXGBE_VLVF_ENTRIES) {
-		if (first_empty_slot)
-			regindex = first_empty_slot;
-		else {
-			ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
-				     "No space in VLVF.\n");
-			regindex = IXGBE_ERR_NO_SPACE;
-		}
-	}
-
-	return regindex;
-}
-
-/**
- *  ixgbe_set_vfta_generic - Set VLAN filter table
- *  @hw: pointer to hardware structure
- *  @vlan: VLAN id to write to VLAN filter
- *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
- *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
- *
- *  Turn on/off specified VLAN in the VLAN filter table.
- **/
-s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
-			   bool vlan_on)
-{
-	s32 regindex;
-	u32 bitindex;
-	u32 vfta;
-	u32 targetbit;
-	s32 ret_val = IXGBE_SUCCESS;
-	bool vfta_changed = false;
-
-	DEBUGFUNC("ixgbe_set_vfta_generic");
-
-	if (vlan > 4095)
-		return IXGBE_ERR_PARAM;
-
-	/*
-	 * this is a 2 part operation - first the VFTA, then the
-	 * VLVF and VLVFB if VT Mode is set
-	 * We don't write the VFTA until we know the VLVF part succeeded.
-	 */
-
-	/* Part 1
-	 * The VFTA is a bitstring made up of 128 32-bit registers
-	 * that enable the particular VLAN id, much like the MTA:
-	 *    bits[11-5]: which register
-	 *    bits[4-0]:  which bit in the register
-	 */
-	regindex = (vlan >> 5) & 0x7F;
-	bitindex = vlan & 0x1F;
-	targetbit = (1 << bitindex);
-	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
-
-	if (vlan_on) {
-		if (!(vfta & targetbit)) {
-			vfta |= targetbit;
-			vfta_changed = true;
-		}
-	} else {
-		if ((vfta & targetbit)) {
-			vfta &= ~targetbit;
-			vfta_changed = true;
-		}
-	}
-
-	/* Part 2
-	 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
-	 */
-	ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
-					 &vfta_changed);
-	if (ret_val != IXGBE_SUCCESS)
-		return ret_val;
-
-	if (vfta_changed)
-		IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_set_vlvf_generic - Set VLAN Pool Filter
- *  @hw: pointer to hardware structure
- *  @vlan: VLAN id to write to VLAN filter
- *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
- *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
- *  @vfta_changed: pointer to boolean flag which indicates whether VFTA
- *                 should be changed
- *
- *  Turn on/off specified bit in VLVF table.
- **/
-s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
-			    bool vlan_on, bool *vfta_changed)
-{
-	u32 vt;
-
-	DEBUGFUNC("ixgbe_set_vlvf_generic");
-
-	if (vlan > 4095)
-		return IXGBE_ERR_PARAM;
-
-	/* If VT Mode is set
-	 *   Either vlan_on
-	 *     make sure the vlan is in VLVF
-	 *     set the vind bit in the matching VLVFB
-	 *   Or !vlan_on
-	 *     clear the pool bit and possibly the vind
-	 */
-	vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
-	if (vt & IXGBE_VT_CTL_VT_ENABLE) {
-		s32 vlvf_index;
-		u32 bits;
-
-		vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
-		if (vlvf_index < 0)
-			return vlvf_index;
-
-		if (vlan_on) {
-			/* set the pool bit */
-			if (vind < 32) {
-				bits = IXGBE_READ_REG(hw,
-						IXGBE_VLVFB(vlvf_index * 2));
-				bits |= (1 << vind);
-				IXGBE_WRITE_REG(hw,
-						IXGBE_VLVFB(vlvf_index * 2),
-						bits);
-			} else {
-				bits = IXGBE_READ_REG(hw,
-					IXGBE_VLVFB((vlvf_index * 2) + 1));
-				bits |= (1 << (vind - 32));
-				IXGBE_WRITE_REG(hw,
-					IXGBE_VLVFB((vlvf_index * 2) + 1),
-					bits);
-			}
-		} else {
-			/* clear the pool bit */
-			if (vind < 32) {
-				bits = IXGBE_READ_REG(hw,
-						IXGBE_VLVFB(vlvf_index * 2));
-				bits &= ~(1 << vind);
-				IXGBE_WRITE_REG(hw,
-						IXGBE_VLVFB(vlvf_index * 2),
-						bits);
-				bits |= IXGBE_READ_REG(hw,
-					IXGBE_VLVFB((vlvf_index * 2) + 1));
-			} else {
-				bits = IXGBE_READ_REG(hw,
-					IXGBE_VLVFB((vlvf_index * 2) + 1));
-				bits &= ~(1 << (vind - 32));
-				IXGBE_WRITE_REG(hw,
-					IXGBE_VLVFB((vlvf_index * 2) + 1),
-					bits);
-				bits |= IXGBE_READ_REG(hw,
-						IXGBE_VLVFB(vlvf_index * 2));
-			}
-		}
-
-		/*
-		 * If there are still bits set in the VLVFB registers
-		 * for the VLAN ID indicated we need to see if the
-		 * caller is requesting that we clear the VFTA entry bit.
-		 * If the caller has requested that we clear the VFTA
-		 * entry bit but there are still pools/VFs using this VLAN
-		 * ID entry then ignore the request.  We're not worried
-		 * about the case where we're turning the VFTA VLAN ID
-		 * entry bit on, only when requested to turn it off as
-		 * there may be multiple pools and/or VFs using the
-		 * VLAN ID entry.  In that case we cannot clear the
-		 * VFTA bit until all pools/VFs using that VLAN ID have also
-		 * been cleared.  This will be indicated by "bits" being
-		 * zero.
-		 */
-		if (bits) {
-			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
-					(IXGBE_VLVF_VIEN | vlan));
-			if ((!vlan_on) && (vfta_changed != NULL)) {
-				/* someone wants to clear the vfta entry
-				 * but some pools/VFs are still using it.
-				 * Ignore it. */
-				*vfta_changed = false;
-			}
-		} else
-			IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_clear_vfta_generic - Clear VLAN filter table
- *  @hw: pointer to hardware structure
- *
- *  Clears the VLAN filer table, and the VMDq index associated with the filter
- **/
-s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
-{
-	u32 offset;
-
-	DEBUGFUNC("ixgbe_clear_vfta_generic");
-
-	for (offset = 0; offset < hw->mac.vft_size; offset++)
-		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
-
-	for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
-		IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_check_mac_link_generic - Determine link and speed status
- *  @hw: pointer to hardware structure
- *  @speed: pointer to link speed
- *  @link_up: true when link is up
- *  @link_up_wait_to_complete: bool used to wait for link up or not
- *
- *  Reads the links register to determine if link is up and the current speed
- **/
-s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
-				 bool *link_up, bool link_up_wait_to_complete)
-{
-	u32 links_reg, links_orig;
-	u32 i;
-
-	DEBUGFUNC("ixgbe_check_mac_link_generic");
-
-	/* clear the old state */
-	links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
-
-	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
-
-	if (links_orig != links_reg) {
-		DEBUGOUT2("LINKS changed from %08X to %08X\n",
-			  links_orig, links_reg);
-	}
-
-	if (link_up_wait_to_complete) {
-		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
-			if (links_reg & IXGBE_LINKS_UP) {
-				*link_up = true;
-				break;
-			} else {
-				*link_up = false;
-			}
-			msec_delay(100);
-			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
-		}
-	} else {
-		if (links_reg & IXGBE_LINKS_UP)
-			*link_up = true;
-		else
-			*link_up = false;
-	}
-
-	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
-	case IXGBE_LINKS_SPEED_10G_82599:
-		*speed = IXGBE_LINK_SPEED_10GB_FULL;
-		if (hw->mac.type >= ixgbe_mac_X550) {
-			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
-				*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
-		}
-		break;
-	case IXGBE_LINKS_SPEED_1G_82599:
-		*speed = IXGBE_LINK_SPEED_1GB_FULL;
-		break;
-	case IXGBE_LINKS_SPEED_100_82599:
-		*speed = IXGBE_LINK_SPEED_100_FULL;
-		if (hw->mac.type >= ixgbe_mac_X550) {
-			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
-				*speed = IXGBE_LINK_SPEED_5GB_FULL;
-		}
-		break;
-	default:
-		*speed = IXGBE_LINK_SPEED_UNKNOWN;
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
- *  the EEPROM
- *  @hw: pointer to hardware structure
- *  @wwnn_prefix: the alternative WWNN prefix
- *  @wwpn_prefix: the alternative WWPN prefix
- *
- *  This function will read the EEPROM from the alternative SAN MAC address
- *  block to check the support for the alternative WWNN/WWPN prefix support.
- **/
-s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
-				 u16 *wwpn_prefix)
-{
-	u16 offset, caps;
-	u16 alt_san_mac_blk_offset;
-
-	DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
-
-	/* clear output first */
-	*wwnn_prefix = 0xFFFF;
-	*wwpn_prefix = 0xFFFF;
-
-	/* check if alternative SAN MAC is supported */
-	offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
-	if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
-		goto wwn_prefix_err;
-
-	if ((alt_san_mac_blk_offset == 0) ||
-	    (alt_san_mac_blk_offset == 0xFFFF))
-		goto wwn_prefix_out;
-
-	/* check capability in alternative san mac address block */
-	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
-	if (hw->eeprom.ops.read(hw, offset, &caps))
-		goto wwn_prefix_err;
-	if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
-		goto wwn_prefix_out;
-
-	/* get the corresponding prefix for WWNN/WWPN */
-	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
-	if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
-		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
-			      "eeprom read at offset %d failed", offset);
-	}
-
-	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
-	if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
-		goto wwn_prefix_err;
-
-wwn_prefix_out:
-	return IXGBE_SUCCESS;
-
-wwn_prefix_err:
-	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
-		      "eeprom read at offset %d failed", offset);
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
- *  @hw: pointer to hardware structure
- *  @bs: the fcoe boot status
- *
- *  This function will read the FCOE boot status from the iSCSI FCOE block
- **/
-s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
-{
-	u16 offset, caps, flags;
-	s32 status;
-
-	DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
-
-	/* clear output first */
-	*bs = ixgbe_fcoe_bootstatus_unavailable;
-
-	/* check if FCOE IBA block is present */
-	offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
-	status = hw->eeprom.ops.read(hw, offset, &caps);
-	if (status != IXGBE_SUCCESS)
-		goto out;
-
-	if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
-		goto out;
-
-	/* check if iSCSI FCOE block is populated */
-	status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
-	if (status != IXGBE_SUCCESS)
-		goto out;
-
-	if ((offset == 0) || (offset == 0xFFFF))
-		goto out;
-
-	/* read fcoe flags in iSCSI FCOE block */
-	offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
-	status = hw->eeprom.ops.read(hw, offset, &flags);
-	if (status != IXGBE_SUCCESS)
-		goto out;
-
-	if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
-		*bs = ixgbe_fcoe_bootstatus_enabled;
-	else
-		*bs = ixgbe_fcoe_bootstatus_disabled;
-
-out:
-	return status;
-}
-
-/**
- *  ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
- *  @hw: pointer to hardware structure
- *  @enable: enable or disable switch for anti-spoofing
- *  @pf: Physical Function pool - do not enable anti-spoofing for the PF
- *
- **/
-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
-{
-	int j;
-	int pf_target_reg = pf >> 3;
-	int pf_target_shift = pf % 8;
-	u32 pfvfspoof = 0;
-
-	if (hw->mac.type == ixgbe_mac_82598EB)
-		return;
-
-	if (enable)
-		pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
-
-	/*
-	 * PFVFSPOOF register array is size 8 with 8 bits assigned to
-	 * MAC anti-spoof enables in each register array element.
-	 */
-	for (j = 0; j < pf_target_reg; j++)
-		IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
-
-	/*
-	 * The PF should be allowed to spoof so that it can support
-	 * emulation mode NICs.  Do not set the bits assigned to the PF
-	 */
-	pfvfspoof &= (1 << pf_target_shift) - 1;
-	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
-
-	/*
-	 * Remaining pools belong to the PF so they do not need to have
-	 * anti-spoofing enabled.
-	 */
-	for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
-		IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
-}
-
-/**
- *  ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
- *  @hw: pointer to hardware structure
- *  @enable: enable or disable switch for VLAN anti-spoofing
- *  @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
- *
- **/
-void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
-{
-	int vf_target_reg = vf >> 3;
-	int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
-	u32 pfvfspoof;
-
-	if (hw->mac.type == ixgbe_mac_82598EB)
-		return;
-
-	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
-	if (enable)
-		pfvfspoof |= (1 << vf_target_shift);
-	else
-		pfvfspoof &= ~(1 << vf_target_shift);
-	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
-}
-
-/**
- *  ixgbe_get_device_caps_generic - Get additional device capabilities
- *  @hw: pointer to hardware structure
- *  @device_caps: the EEPROM word with the extra device capabilities
- *
- *  This function will read the EEPROM location for the device capabilities,
- *  and return the word through device_caps.
- **/
-s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
-{
-	DEBUGFUNC("ixgbe_get_device_caps_generic");
-
-	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
- *  @hw: pointer to hardware structure
- *
- **/
-void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
-{
-	u32 regval;
-	u32 i;
-
-	DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
-
-	/* Enable relaxed ordering */
-	for (i = 0; i < hw->mac.max_tx_queues; i++) {
-		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
-		regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
-		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
-	}
-
-	for (i = 0; i < hw->mac.max_rx_queues; i++) {
-		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
-		regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
-			  IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
-		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
-	}
-
-}
-
-/**
- *  ixgbe_calculate_checksum - Calculate checksum for buffer
- *  @buffer: pointer to EEPROM
- *  @length: size of EEPROM to calculate a checksum for
- *  Calculates the checksum for some buffer on a specified length.  The
- *  checksum calculated is returned.
- **/
-u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
-{
-	u32 i;
-	u8 sum = 0;
-
-	DEBUGFUNC("ixgbe_calculate_checksum");
-
-	if (!buffer)
-		return 0;
-
-	for (i = 0; i < length; i++)
-		sum += buffer[i];
-
-	return (u8) (0 - sum);
-}
-
-/**
- *  ixgbe_get_hi_status - Get host interface command status
- *  @hw: pointer to the HW structure
- *  @return_code: reads and returns code
- *
- *  Check if command returned with success. On success return IXGBE_SUCCESS
- *  else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
- **/
-s32 ixgbe_get_hi_status(struct ixgbe_hw *hw, u8 *ret_status)
-{
-	struct ixgbe_hic_hdr response;
-	u32 *response_val = (u32 *)&response;
-
-	DEBUGFUNC("ixgbe_get_host_interface_status");
-
-	/* Read the command response */
-	*response_val = IXGBE_CPU_TO_LE32(IXGBE_READ_REG(hw, IXGBE_FLEX_MNG));
-
-	if (ret_status)
-		*ret_status = response.cmd_or_resp.ret_status;
-
-	if (response.cmd_or_resp.ret_status != FW_CEM_RESP_STATUS_SUCCESS) {
-		DEBUGOUT1("Host interface error=%x.\n",
-			  response.cmd_or_resp.ret_status);
-		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_host_interface_command - Issue command to manageability block
- *  @hw: pointer to the HW structure
- *  @buffer: contains the command to write and where the return status will
- *   be placed
- *  @length: length of buffer, must be multiple of 4 bytes
- *  @timeout: time in ms to wait for command completion
- *  @return_data: read and return data from the buffer (true) or not (false)
- *   Needed because FW structures are big endian and decoding of
- *   these fields can be 8 bit or 16 bit based on command. Decoding
- *   is not easily understood without making a table of commands.
- *   So we will leave this up to the caller to read back the data
- *   in these cases.
- *
- *  Communicates with the manageability block.  On success return IXGBE_SUCCESS
- *  else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
- **/
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
-				 u32 length, u32 timeout, bool return_data)
-{
-	u32 hicr, i, bi, fwsts;
-	u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
-	u16 buf_len;
-	u16 dword_len;
-
-	DEBUGFUNC("ixgbe_host_interface_command");
-
-	if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
-		DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
-		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
-	}
-	/* Set bit 9 of FWSTS clearing FW reset indication */
-	fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
-	IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
-
-	/* Check that the host interface is enabled. */
-	hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
-	if ((hicr & IXGBE_HICR_EN) == 0) {
-		DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
-		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
-	}
-
-	/* Calculate length in DWORDs. We must be DWORD aligned */
-	if ((length % (sizeof(u32))) != 0) {
-		DEBUGOUT("Buffer length failure, not aligned to dword");
-		return IXGBE_ERR_INVALID_ARGUMENT;
-	}
-
-	dword_len = length >> 2;
-
-	/* The device driver writes the relevant command block
-	 * into the ram area.
-	 */
-	for (i = 0; i < dword_len; i++)
-		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
-				      i, IXGBE_CPU_TO_LE32(buffer[i]));
-
-	/* Setting this bit tells the ARC that a new command is pending. */
-	IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
-
-	for (i = 0; i < timeout; i++) {
-		hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
-		if (!(hicr & IXGBE_HICR_C))
-			break;
-		msec_delay(1);
-	}
-
-	/* Check command completion */
-	if ((timeout != 0 && i == timeout) ||
-	    !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
-		ERROR_REPORT1(IXGBE_ERROR_CAUTION,
-			     "Command has failed with no status valid.\n");
-		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
-	}
-
-	if (!return_data)
-		return 0;
-
-	/* Calculate length in DWORDs */
-	dword_len = hdr_size >> 2;
-
-	/* first pull in the header so we know the buffer length */
-	for (bi = 0; bi < dword_len; bi++) {
-		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
-		IXGBE_LE32_TO_CPUS(&buffer[bi]);
-	}
-
-	/* If there is any thing in data position pull it in */
-	buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
-	if (buf_len == 0)
-		return 0;
-
-	if (length < buf_len + hdr_size) {
-		DEBUGOUT("Buffer not large enough for reply message.\n");
-		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
-	}
-
-	/* Calculate length in DWORDs, add 3 for odd lengths */
-	dword_len = (buf_len + 3) >> 2;
-
-	/* Pull in the rest of the buffer (bi is where we left off) */
-	for (; bi <= dword_len; bi++) {
-		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
-		IXGBE_LE32_TO_CPUS(&buffer[bi]);
-	}
-
-	return 0;
-}
-
-/**
- *  ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
- *  @hw: pointer to the HW structure
- *  @maj: driver version major number
- *  @min: driver version minor number
- *  @build: driver version build number
- *  @sub: driver version sub build number
- *
- *  Sends driver version number to firmware through the manageability
- *  block.  On success return IXGBE_SUCCESS
- *  else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
- *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
- **/
-s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
-				 u8 build, u8 sub)
-{
-	struct ixgbe_hic_drv_info fw_cmd;
-	int i;
-	s32 ret_val = IXGBE_SUCCESS;
-
-	DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
-
-	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
-	    != IXGBE_SUCCESS) {
-		ret_val = IXGBE_ERR_SWFW_SYNC;
-		goto out;
-	}
-
-	fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
-	fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
-	fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
-	fw_cmd.port_num = (u8)hw->bus.func;
-	fw_cmd.ver_maj = maj;
-	fw_cmd.ver_min = min;
-	fw_cmd.ver_build = build;
-	fw_cmd.ver_sub = sub;
-	fw_cmd.hdr.checksum = 0;
-	fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
-				(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
-	fw_cmd.pad = 0;
-	fw_cmd.pad2 = 0;
-
-	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
-		ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
-						       sizeof(fw_cmd),
-						       IXGBE_HI_COMMAND_TIMEOUT,
-						       true);
-		if (ret_val != IXGBE_SUCCESS)
-			continue;
-
-		if (fw_cmd.hdr.cmd_or_resp.ret_status ==
-		    FW_CEM_RESP_STATUS_SUCCESS)
-			ret_val = IXGBE_SUCCESS;
-		else
-			ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
-
-		break;
-	}
-
-	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
-out:
-	return ret_val;
-}
-
-/**
- * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
- * @hw: pointer to hardware structure
- * @num_pb: number of packet buffers to allocate
- * @headroom: reserve n KB of headroom
- * @strategy: packet buffer allocation strategy
- **/
-void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
-			     int strategy)
-{
-	u32 pbsize = hw->mac.rx_pb_size;
-	int i = 0;
-	u32 rxpktsize, txpktsize, txpbthresh;
-
-	/* Reserve headroom */
-	pbsize -= headroom;
-
-	if (!num_pb)
-		num_pb = 1;
-
-	/* Divide remaining packet buffer space amongst the number of packet
-	 * buffers requested using supplied strategy.
-	 */
-	switch (strategy) {
-	case PBA_STRATEGY_WEIGHTED:
-		/* ixgbe_dcb_pba_80_48 strategy weight first half of packet
-		 * buffer with 5/8 of the packet buffer space.
-		 */
-		rxpktsize = (pbsize * 5) / (num_pb * 4);
-		pbsize -= rxpktsize * (num_pb / 2);
-		rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
-		for (; i < (num_pb / 2); i++)
-			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
-		/* Fall through to configure remaining packet buffers */
-	case PBA_STRATEGY_EQUAL:
-		rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
-		for (; i < num_pb; i++)
-			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
-		break;
-	default:
-		break;
-	}
-
-	/* Only support an equally distributed Tx packet buffer strategy. */
-	txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
-	txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
-	for (i = 0; i < num_pb; i++) {
-		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
-		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
-	}
-
-	/* Clear unused TCs, if any, to zero buffer size*/
-	for (; i < IXGBE_MAX_PB; i++) {
-		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
-	}
-}
-
-/**
- * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
- * @hw: pointer to the hardware structure
- *
- * The 82599 and x540 MACs can experience issues if TX work is still pending
- * when a reset occurs.  This function prevents this by flushing the PCIe
- * buffers on the system.
- **/
-void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
-{
-	u32 gcr_ext, hlreg0, i, poll;
-	u16 value;
-
-	/*
-	 * If double reset is not requested then all transactions should
-	 * already be clear and as such there is no work to do
-	 */
-	if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
-		return;
-
-	/*
-	 * Set loopback enable to prevent any transmits from being sent
-	 * should the link come up.  This assumes that the RXCTRL.RXEN bit
-	 * has already been cleared.
-	 */
-	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
-
-	/* Wait for a last completion before clearing buffers */
-	IXGBE_WRITE_FLUSH(hw);
-	msec_delay(3);
-
-	/*
-	 * Before proceeding, make sure that the PCIe block does not have
-	 * transactions pending.
-	 */
-	poll = ixgbe_pcie_timeout_poll(hw);
-	for (i = 0; i < poll; i++) {
-		usec_delay(100);
-		value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
-		if (IXGBE_REMOVED(hw->hw_addr))
-			goto out;
-		if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
-			goto out;
-	}
-
-out:
-	/* initiate cleaning flow for buffers in the PCIe transaction layer */
-	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
-	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
-			gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
-
-	/* Flush all writes and allow 20usec for all transactions to clear */
-	IXGBE_WRITE_FLUSH(hw);
-	usec_delay(20);
-
-	/* restore previous register values */
-	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
-	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
-}
-
-STATIC const u8 ixgbe_emc_temp_data[4] = {
-	IXGBE_EMC_INTERNAL_DATA,
-	IXGBE_EMC_DIODE1_DATA,
-	IXGBE_EMC_DIODE2_DATA,
-	IXGBE_EMC_DIODE3_DATA
-};
-STATIC const u8 ixgbe_emc_therm_limit[4] = {
-	IXGBE_EMC_INTERNAL_THERM_LIMIT,
-	IXGBE_EMC_DIODE1_THERM_LIMIT,
-	IXGBE_EMC_DIODE2_THERM_LIMIT,
-	IXGBE_EMC_DIODE3_THERM_LIMIT
-};
-
-/**
- *  ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
- *  @hw: pointer to hardware structure
- *  @data: pointer to the thermal sensor data structure
- *
- *  Returns the thermal sensor data structure
- **/
-s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
-{
-	s32 status = IXGBE_SUCCESS;
-	u16 ets_offset;
-	u16 ets_cfg;
-	u16 ets_sensor;
-	u8  num_sensors;
-	u8  sensor_index;
-	u8  sensor_location;
-	u8  i;
-	struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
-
-	DEBUGFUNC("ixgbe_get_thermal_sensor_data_generic");
-
-	/* Only support thermal sensors attached to 82599 physical port 0 */
-	if ((hw->mac.type != ixgbe_mac_82599EB) ||
-	    (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
-		status = IXGBE_NOT_IMPLEMENTED;
-		goto out;
-	}
-
-	status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
-	if (status)
-		goto out;
-
-	if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) {
-		status = IXGBE_NOT_IMPLEMENTED;
-		goto out;
-	}
-
-	status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
-	if (status)
-		goto out;
-
-	if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
-		!= IXGBE_ETS_TYPE_EMC) {
-		status = IXGBE_NOT_IMPLEMENTED;
-		goto out;
-	}
-
-	num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
-	if (num_sensors > IXGBE_MAX_SENSORS)
-		num_sensors = IXGBE_MAX_SENSORS;
-
-	for (i = 0; i < num_sensors; i++) {
-		status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
-					     &ets_sensor);
-		if (status)
-			goto out;
-
-		sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
-				IXGBE_ETS_DATA_INDEX_SHIFT);
-		sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
-				   IXGBE_ETS_DATA_LOC_SHIFT);
-
-		if (sensor_location != 0) {
-			status = hw->phy.ops.read_i2c_byte(hw,
-					ixgbe_emc_temp_data[sensor_index],
-					IXGBE_I2C_THERMAL_SENSOR_ADDR,
-					&data->sensor[i].temp);
-			if (status)
-				goto out;
-		}
-	}
-out:
-	return status;
-}
-
-/**
- *  ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
- *  @hw: pointer to hardware structure
- *
- *  Inits the thermal sensor thresholds according to the NVM map
- *  and save off the threshold and location values into mac.thermal_sensor_data
- **/
-s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
-{
-	s32 status = IXGBE_SUCCESS;
-	u16 offset;
-	u16 ets_offset;
-	u16 ets_cfg;
-	u16 ets_sensor;
-	u8  low_thresh_delta;
-	u8  num_sensors;
-	u8  sensor_index;
-	u8  sensor_location;
-	u8  therm_limit;
-	u8  i;
-	struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
-
-	DEBUGFUNC("ixgbe_init_thermal_sensor_thresh_generic");
-
-	memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
-
-	/* Only support thermal sensors attached to 82599 physical port 0 */
-	if ((hw->mac.type != ixgbe_mac_82599EB) ||
-	    (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
-		return IXGBE_NOT_IMPLEMENTED;
-
-	offset = IXGBE_ETS_CFG;
-	if (hw->eeprom.ops.read(hw, offset, &ets_offset))
-		goto eeprom_err;
-	if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
-		return IXGBE_NOT_IMPLEMENTED;
-
-	offset = ets_offset;
-	if (hw->eeprom.ops.read(hw, offset, &ets_cfg))
-		goto eeprom_err;
-	if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
-		!= IXGBE_ETS_TYPE_EMC)
-		return IXGBE_NOT_IMPLEMENTED;
-
-	low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
-			     IXGBE_ETS_LTHRES_DELTA_SHIFT);
-	num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
-
-	for (i = 0; i < num_sensors; i++) {
-		offset = ets_offset + 1 + i;
-		if (hw->eeprom.ops.read(hw, offset, &ets_sensor)) {
-			ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
-				      "eeprom read at offset %d failed",
-				      offset);
-			continue;
-		}
-		sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
-				IXGBE_ETS_DATA_INDEX_SHIFT);
-		sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
-				   IXGBE_ETS_DATA_LOC_SHIFT);
-		therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
-
-		hw->phy.ops.write_i2c_byte(hw,
-			ixgbe_emc_therm_limit[sensor_index],
-			IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
-
-		if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) {
-			data->sensor[i].location = sensor_location;
-			data->sensor[i].caution_thresh = therm_limit;
-			data->sensor[i].max_op_thresh = therm_limit -
-							low_thresh_delta;
-		}
-	}
-	return status;
-
-eeprom_err:
-	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
-		      "eeprom read at offset %d failed", offset);
-	return IXGBE_NOT_IMPLEMENTED;
-}
-
-
-/**
- * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
- * @hw: pointer to hardware structure
- * @map: pointer to u8 arr for returning map
- *
- * Read the rtrup2tc HW register and resolve its content into map
- **/
-void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
-{
-	u32 reg, i;
-
-	reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
-	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
-		map[i] = IXGBE_RTRUP2TC_UP_MASK &
-			(reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
-	return;
-}
-
-void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
-{
-	u32 pfdtxgswc;
-	u32 rxctrl;
-
-	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
-	if (rxctrl & IXGBE_RXCTRL_RXEN) {
-		if (hw->mac.type != ixgbe_mac_82598EB) {
-			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
-			if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
-				pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
-				IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
-				hw->mac.set_lben = true;
-			} else {
-				hw->mac.set_lben = false;
-			}
-		}
-		rxctrl &= ~IXGBE_RXCTRL_RXEN;
-		IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
-	}
-}
-
-void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
-{
-	u32 pfdtxgswc;
-	u32 rxctrl;
-
-	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
-	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
-
-	if (hw->mac.type != ixgbe_mac_82598EB) {
-		if (hw->mac.set_lben) {
-			pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
-			pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
-			IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
-			hw->mac.set_lben = false;
-		}
-	}
-}
-
-/**
- * ixgbe_mng_present - returns true when management capability is present
- * @hw: pointer to hardware structure
- */
-bool ixgbe_mng_present(struct ixgbe_hw *hw)
-{
-	u32 fwsm;
-
-	if (hw->mac.type < ixgbe_mac_82599EB)
-		return false;
-
-	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
-	fwsm &= IXGBE_FWSM_MODE_MASK;
-	return fwsm == IXGBE_FWSM_FW_MODE_PT;
-}
-
-/**
- * ixgbe_mng_enabled - Is the manageability engine enabled?
- * @hw: pointer to hardware structure
- *
- * Returns true if the manageability engine is enabled.
- **/
-bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
-{
-	u32 fwsm, manc, factps;
-
-	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
-	if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
-		return false;
-
-	manc = IXGBE_READ_REG(hw, IXGBE_MANC);
-	if (!(manc & IXGBE_MANC_RCV_TCO_EN))
-		return false;
-
-	if (hw->mac.type <= ixgbe_mac_X540) {
-		factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
-		if (factps & IXGBE_FACTPS_MNGCG)
-			return false;
-	}
-
-	return true;
-}
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.h
deleted file mode 100644
index 9ebdd45..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.h
+++ /dev/null
@@ -1,183 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#ifndef _IXGBE_COMMON_H_
-#define _IXGBE_COMMON_H_
-
-#include "ixgbe_type.h"
-#define IXGBE_WRITE_REG64(hw, reg, value) \
-	do { \
-		IXGBE_WRITE_REG(hw, reg, (u32) value); \
-		IXGBE_WRITE_REG(hw, reg + 4, (u32) (value >> 32)); \
-	} while (0)
-#define IXGBE_REMOVED(a) (0)
-struct ixgbe_pba {
-	u16 word[2];
-	u16 *pba_block;
-};
-
-void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map);
-
-u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
-s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
-s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
-				  u32 pba_num_size);
-s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
-		       u32 eeprom_buf_size, u16 max_pba_block_size,
-		       struct ixgbe_pba *pba);
-s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
-			u32 eeprom_buf_size, struct ixgbe_pba *pba);
-s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
-			     u32 eeprom_buf_size, u16 *pba_block_size);
-s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
-s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
-void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status);
-void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
-s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
-
-s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
-
-s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
-s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
-					       u16 words, u16 *data);
-s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
-s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
-				   u16 words, u16 *data);
-s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
-				    u16 words, u16 *data);
-s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
-				       u16 *data);
-s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
-					      u16 words, u16 *data);
-s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
-s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
-					   u16 *checksum_val);
-s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
-s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
-
-s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
-			  u32 enable_addr);
-s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
-				      u32 mc_addr_count,
-				      ixgbe_mc_addr_itr func, bool clear);
-s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
-				      u32 addr_count, ixgbe_mc_addr_itr func);
-s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
-s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
-s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw);
-s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw);
-
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
-bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
-void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
-
-s32 ixgbe_validate_mac_addr(u8 *mac_addr);
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
-void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask);
-s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
-
-s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
-s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
-
-s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
-
-s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
-s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
-
-s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
-s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
-s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
-s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
-			 u32 vind, bool vlan_on);
-s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
-			   bool vlan_on, bool *vfta_changed);
-s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
-s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan);
-
-s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
-			       ixgbe_link_speed *speed,
-			       bool *link_up, bool link_up_wait_to_complete);
-
-s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
-				 u16 *wwpn_prefix);
-
-s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs);
-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
-void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
-s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
-void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
-			     int strategy);
-void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw);
-s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
-				 u8 build, u8 ver);
-u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
-s32 ixgbe_get_hi_status(struct ixgbe_hw *hw, u8 *ret_status);
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
-				 u32 length, u32 timeout, bool return_data);
-
-void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
-
-extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
-extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
-bool ixgbe_mng_present(struct ixgbe_hw *hw);
-bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
-
-#define IXGBE_I2C_THERMAL_SENSOR_ADDR	0xF8
-#define IXGBE_EMC_INTERNAL_DATA		0x00
-#define IXGBE_EMC_INTERNAL_THERM_LIMIT	0x20
-#define IXGBE_EMC_DIODE1_DATA		0x01
-#define IXGBE_EMC_DIODE1_THERM_LIMIT	0x19
-#define IXGBE_EMC_DIODE2_DATA		0x23
-#define IXGBE_EMC_DIODE2_THERM_LIMIT	0x1A
-#define IXGBE_EMC_DIODE3_DATA		0x2A
-#define IXGBE_EMC_DIODE3_THERM_LIMIT	0x30
-
-s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
-void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
-void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
-#endif /* IXGBE_COMMON */
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.c
deleted file mode 100644
index 60c3b4e..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.c
+++ /dev/null
@@ -1,714 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-
-#include "ixgbe_type.h"
-#include "ixgbe_dcb.h"
-#include "ixgbe_dcb_82598.h"
-#include "ixgbe_dcb_82599.h"
-
-/**
- * ixgbe_dcb_calculate_tc_credits - This calculates the ieee traffic class
- * credits from the configured bandwidth percentages. Credits
- * are the smallest unit programmable into the underlying
- * hardware. The IEEE 802.1Qaz specification do not use bandwidth
- * groups so this is much simplified from the CEE case.
- */
-s32 ixgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max,
-				   int max_frame_size)
-{
-	int min_percent = 100;
-	int min_credit, multiplier;
-	int i;
-
-	min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) /
-			IXGBE_DCB_CREDIT_QUANTUM;
-
-	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-		if (bw[i] < min_percent && bw[i])
-			min_percent = bw[i];
-	}
-
-	multiplier = (min_credit / min_percent) + 1;
-
-	/* Find out the hw credits for each TC */
-	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-		int val = min(bw[i] * multiplier, IXGBE_DCB_MAX_CREDIT_REFILL);
-
-		if (val < min_credit)
-			val = min_credit;
-		refill[i] = (u16)val;
-
-		max[i] = bw[i] ? (bw[i]*IXGBE_DCB_MAX_CREDIT)/100 : min_credit;
-	}
-
-	return 0;
-}
-
-/**
- * ixgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits
- * @ixgbe_dcb_config: Struct containing DCB settings.
- * @direction: Configuring either Tx or Rx.
- *
- * This function calculates the credits allocated to each traffic class.
- * It should be called only after the rules are checked by
- * ixgbe_dcb_check_config_cee().
- */
-s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *hw,
-				   struct ixgbe_dcb_config *dcb_config,
-				   u32 max_frame_size, u8 direction)
-{
-	struct ixgbe_dcb_tc_path *p;
-	u32 min_multiplier	= 0;
-	u16 min_percent		= 100;
-	s32 ret_val =		IXGBE_SUCCESS;
-	/* Initialization values default for Tx settings */
-	u32 min_credit		= 0;
-	u32 credit_refill	= 0;
-	u32 credit_max		= 0;
-	u16 link_percentage	= 0;
-	u8  bw_percent		= 0;
-	u8  i;
-
-	if (dcb_config == NULL) {
-		ret_val = IXGBE_ERR_CONFIG;
-		goto out;
-	}
-
-	min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) /
-		     IXGBE_DCB_CREDIT_QUANTUM;
-
-	/* Find smallest link percentage */
-	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-		p = &dcb_config->tc_config[i].path[direction];
-		bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
-		link_percentage = p->bwg_percent;
-
-		link_percentage = (link_percentage * bw_percent) / 100;
-
-		if (link_percentage && link_percentage < min_percent)
-			min_percent = link_percentage;
-	}
-
-	/*
-	 * The ratio between traffic classes will control the bandwidth
-	 * percentages seen on the wire. To calculate this ratio we use
-	 * a multiplier. It is required that the refill credits must be
-	 * larger than the max frame size so here we find the smallest
-	 * multiplier that will allow all bandwidth percentages to be
-	 * greater than the max frame size.
-	 */
-	min_multiplier = (min_credit / min_percent) + 1;
-
-	/* Find out the link percentage for each TC first */
-	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-		p = &dcb_config->tc_config[i].path[direction];
-		bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
-
-		link_percentage = p->bwg_percent;
-		/* Must be careful of integer division for very small nums */
-		link_percentage = (link_percentage * bw_percent) / 100;
-		if (p->bwg_percent > 0 && link_percentage == 0)
-			link_percentage = 1;
-
-		/* Save link_percentage for reference */
-		p->link_percent = (u8)link_percentage;
-
-		/* Calculate credit refill ratio using multiplier */
-		credit_refill = min(link_percentage * min_multiplier,
-				    (u32)IXGBE_DCB_MAX_CREDIT_REFILL);
-		p->data_credits_refill = (u16)credit_refill;
-
-		/* Calculate maximum credit for the TC */
-		credit_max = (link_percentage * IXGBE_DCB_MAX_CREDIT) / 100;
-
-		/*
-		 * Adjustment based on rule checking, if the percentage
-		 * of a TC is too small, the maximum credit may not be
-		 * enough to send out a jumbo frame in data plane arbitration.
-		 */
-		if (credit_max && (credit_max < min_credit))
-			credit_max = min_credit;
-
-		if (direction == IXGBE_DCB_TX_CONFIG) {
-			/*
-			 * Adjustment based on rule checking, if the
-			 * percentage of a TC is too small, the maximum
-			 * credit may not be enough to send out a TSO
-			 * packet in descriptor plane arbitration.
-			 */
-			if (credit_max && (credit_max <
-			    IXGBE_DCB_MIN_TSO_CREDIT)
-			    && (hw->mac.type == ixgbe_mac_82598EB))
-				credit_max = IXGBE_DCB_MIN_TSO_CREDIT;
-
-			dcb_config->tc_config[i].desc_credits_max =
-								(u16)credit_max;
-		}
-
-		p->data_credits_max = (u16)credit_max;
-	}
-
-out:
-	return ret_val;
-}
-
-/**
- * ixgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info
- * @cfg: dcb configuration to unpack into hardware consumable fields
- * @map: user priority to traffic class map
- * @pfc_up: u8 to store user priority PFC bitmask
- *
- * This unpacks the dcb configuration PFC info which is stored per
- * traffic class into a 8bit user priority bitmask that can be
- * consumed by hardware routines. The priority to tc map must be
- * updated before calling this routine to use current up-to maps.
- */
-void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *cfg, u8 *map, u8 *pfc_up)
-{
-	struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
-	int up;
-
-	/*
-	 * If the TC for this user priority has PFC enabled then set the
-	 * matching bit in 'pfc_up' to reflect that PFC is enabled.
-	 */
-	for (*pfc_up = 0, up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) {
-		if (tc_config[map[up]].pfc != ixgbe_dcb_pfc_disabled)
-			*pfc_up |= 1 << up;
-	}
-}
-
-void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *cfg, int direction,
-			     u16 *refill)
-{
-	struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
-	int tc;
-
-	for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
-		refill[tc] = tc_config[tc].path[direction].data_credits_refill;
-}
-
-void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *cfg, u16 *max)
-{
-	struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
-	int tc;
-
-	for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
-		max[tc] = tc_config[tc].desc_credits_max;
-}
-
-void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *cfg, int direction,
-			    u8 *bwgid)
-{
-	struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
-	int tc;
-
-	for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
-		bwgid[tc] = tc_config[tc].path[direction].bwg_id;
-}
-
-void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *cfg, int direction,
-			   u8 *tsa)
-{
-	struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
-	int tc;
-
-	for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
-		tsa[tc] = tc_config[tc].path[direction].tsa;
-}
-
-u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
-{
-	struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
-	u8 prio_mask = 1 << up;
-	u8 tc = cfg->num_tcs.pg_tcs;
-
-	/* If tc is 0 then DCB is likely not enabled or supported */
-	if (!tc)
-		goto out;
-
-	/*
-	 * Test from maximum TC to 1 and report the first match we find.  If
-	 * we find no match we can assume that the TC is 0 since the TC must
-	 * be set for all user priorities
-	 */
-	for (tc--; tc; tc--) {
-		if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap)
-			break;
-	}
-out:
-	return tc;
-}
-
-void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *cfg, int direction,
-			      u8 *map)
-{
-	u8 up;
-
-	for (up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++)
-		map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up);
-}
-
-/**
- * ixgbe_dcb_config - Struct containing DCB settings.
- * @dcb_config: Pointer to DCB config structure
- *
- * This function checks DCB rules for DCB settings.
- * The following rules are checked:
- * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%.
- * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth
- *    Group must total 100.
- * 3. A Traffic Class should not be set to both Link Strict Priority
- *    and Group Strict Priority.
- * 4. Link strict Bandwidth Groups can only have link strict traffic classes
- *    with zero bandwidth.
- */
-s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *dcb_config)
-{
-	struct ixgbe_dcb_tc_path *p;
-	s32 ret_val = IXGBE_SUCCESS;
-	u8 i, j, bw = 0, bw_id;
-	u8 bw_sum[2][IXGBE_DCB_MAX_BW_GROUP];
-	bool link_strict[2][IXGBE_DCB_MAX_BW_GROUP];
-
-	memset(bw_sum, 0, sizeof(bw_sum));
-	memset(link_strict, 0, sizeof(link_strict));
-
-	/* First Tx, then Rx */
-	for (i = 0; i < 2; i++) {
-		/* Check each traffic class for rule violation */
-		for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
-			p = &dcb_config->tc_config[j].path[i];
-
-			bw = p->bwg_percent;
-			bw_id = p->bwg_id;
-
-			if (bw_id >= IXGBE_DCB_MAX_BW_GROUP) {
-				ret_val = IXGBE_ERR_CONFIG;
-				goto err_config;
-			}
-			if (p->tsa == ixgbe_dcb_tsa_strict) {
-				link_strict[i][bw_id] = true;
-				/* Link strict should have zero bandwidth */
-				if (bw) {
-					ret_val = IXGBE_ERR_CONFIG;
-					goto err_config;
-				}
-			} else if (!bw) {
-				/*
-				 * Traffic classes without link strict
-				 * should have non-zero bandwidth.
-				 */
-				ret_val = IXGBE_ERR_CONFIG;
-				goto err_config;
-			}
-			bw_sum[i][bw_id] += bw;
-		}
-
-		bw = 0;
-
-		/* Check each bandwidth group for rule violation */
-		for (j = 0; j < IXGBE_DCB_MAX_BW_GROUP; j++) {
-			bw += dcb_config->bw_percentage[i][j];
-			/*
-			 * Sum of bandwidth percentages of all traffic classes
-			 * within a Bandwidth Group must total 100 except for
-			 * link strict group (zero bandwidth).
-			 */
-			if (link_strict[i][j]) {
-				if (bw_sum[i][j]) {
-					/*
-					 * Link strict group should have zero
-					 * bandwidth.
-					 */
-					ret_val = IXGBE_ERR_CONFIG;
-					goto err_config;
-				}
-			} else if (bw_sum[i][j] != IXGBE_DCB_BW_PERCENT &&
-				   bw_sum[i][j] != 0) {
-				ret_val = IXGBE_ERR_CONFIG;
-				goto err_config;
-			}
-		}
-
-		if (bw != IXGBE_DCB_BW_PERCENT) {
-			ret_val = IXGBE_ERR_CONFIG;
-			goto err_config;
-		}
-	}
-
-err_config:
-	DEBUGOUT2("DCB error code %d while checking %s settings.\n",
-		  ret_val, (i == IXGBE_DCB_TX_CONFIG) ? "Tx" : "Rx");
-
-	return ret_val;
-}
-
-/**
- * ixgbe_dcb_get_tc_stats - Returns status of each traffic class
- * @hw: pointer to hardware structure
- * @stats: pointer to statistics structure
- * @tc_count:  Number of elements in bwg_array.
- *
- * This function returns the status data for each of the Traffic Classes in use.
- */
-s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
-			   u8 tc_count)
-{
-	s32 ret = IXGBE_NOT_IMPLEMENTED;
-	switch (hw->mac.type) {
-	case ixgbe_mac_82598EB:
-		ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count);
-		break;
-	case ixgbe_mac_82599EB:
-	case ixgbe_mac_X540:
-	case ixgbe_mac_X550:
-	case ixgbe_mac_X550EM_x:
-		ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count);
-		break;
-	default:
-		break;
-	}
-	return ret;
-}
-
-/**
- * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class
- * @hw: pointer to hardware structure
- * @stats: pointer to statistics structure
- * @tc_count:  Number of elements in bwg_array.
- *
- * This function returns the CBFC status data for each of the Traffic Classes.
- */
-s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
-			    u8 tc_count)
-{
-	s32 ret = IXGBE_NOT_IMPLEMENTED;
-	switch (hw->mac.type) {
-	case ixgbe_mac_82598EB:
-		ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count);
-		break;
-	case ixgbe_mac_82599EB:
-	case ixgbe_mac_X540:
-	case ixgbe_mac_X550:
-	case ixgbe_mac_X550EM_x:
-		ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count);
-		break;
-	default:
-		break;
-	}
-	return ret;
-}
-
-/**
- * ixgbe_dcb_config_rx_arbiter_cee - Config Rx arbiter
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure Rx Data Arbiter and credits for each traffic class.
- */
-s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *hw,
-				struct ixgbe_dcb_config *dcb_config)
-{
-	s32 ret = IXGBE_NOT_IMPLEMENTED;
-	u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]	= { 0 };
-	u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]	= { 0 };
-	u8 map[IXGBE_DCB_MAX_USER_PRIORITY]	= { 0 };
-	u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]	= { 0 };
-	u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]	= { 0 };
-
-	ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
-	ixgbe_dcb_unpack_max_cee(dcb_config, max);
-	ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
-	ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
-	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
-
-	switch (hw->mac.type) {
-	case ixgbe_mac_82598EB:
-		ret = ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
-		break;
-	case ixgbe_mac_82599EB:
-	case ixgbe_mac_X540:
-	case ixgbe_mac_X550:
-	case ixgbe_mac_X550EM_x:
-		ret = ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwgid,
-							tsa, map);
-		break;
-	default:
-		break;
-	}
-	return ret;
-}
-
-/**
- * ixgbe_dcb_config_tx_desc_arbiter_cee - Config Tx Desc arbiter
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure Tx Descriptor Arbiter and credits for each traffic class.
- */
-s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *hw,
-				     struct ixgbe_dcb_config *dcb_config)
-{
-	s32 ret = IXGBE_NOT_IMPLEMENTED;
-	u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS];
-	u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS];
-	u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS];
-	u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS];
-
-	ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
-	ixgbe_dcb_unpack_max_cee(dcb_config, max);
-	ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
-	ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
-
-	switch (hw->mac.type) {
-	case ixgbe_mac_82598EB:
-		ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
-							     bwgid, tsa);
-		break;
-	case ixgbe_mac_82599EB:
-	case ixgbe_mac_X540:
-	case ixgbe_mac_X550:
-	case ixgbe_mac_X550EM_x:
-		ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
-							     bwgid, tsa);
-		break;
-	default:
-		break;
-	}
-	return ret;
-}
-
-/**
- * ixgbe_dcb_config_tx_data_arbiter_cee - Config Tx data arbiter
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure Tx Data Arbiter and credits for each traffic class.
- */
-s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *hw,
-				     struct ixgbe_dcb_config *dcb_config)
-{
-	s32 ret = IXGBE_NOT_IMPLEMENTED;
-	u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS];
-	u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS];
-	u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
-	u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS];
-	u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS];
-
-	ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
-	ixgbe_dcb_unpack_max_cee(dcb_config, max);
-	ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
-	ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
-	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
-
-	switch (hw->mac.type) {
-	case ixgbe_mac_82598EB:
-		ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
-							     bwgid, tsa);
-		break;
-	case ixgbe_mac_82599EB:
-	case ixgbe_mac_X540:
-	case ixgbe_mac_X550:
-	case ixgbe_mac_X550EM_x:
-		ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
-							     bwgid, tsa,
-							     map);
-		break;
-	default:
-		break;
-	}
-	return ret;
-}
-
-/**
- * ixgbe_dcb_config_pfc_cee - Config priority flow control
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure Priority Flow Control for each traffic class.
- */
-s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *hw,
-			 struct ixgbe_dcb_config *dcb_config)
-{
-	s32 ret = IXGBE_NOT_IMPLEMENTED;
-	u8 pfc_en;
-	u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
-
-	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
-	ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
-
-	switch (hw->mac.type) {
-	case ixgbe_mac_82598EB:
-		ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
-		break;
-	case ixgbe_mac_82599EB:
-	case ixgbe_mac_X540:
-	case ixgbe_mac_X550:
-	case ixgbe_mac_X550EM_x:
-		ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map);
-		break;
-	default:
-		break;
-	}
-	return ret;
-}
-
-/**
- * ixgbe_dcb_config_tc_stats - Config traffic class statistics
- * @hw: pointer to hardware structure
- *
- * Configure queue statistics registers, all queues belonging to same traffic
- * class uses a single set of queue statistics counters.
- */
-s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw)
-{
-	s32 ret = IXGBE_NOT_IMPLEMENTED;
-	switch (hw->mac.type) {
-	case ixgbe_mac_82598EB:
-		ret = ixgbe_dcb_config_tc_stats_82598(hw);
-		break;
-	case ixgbe_mac_82599EB:
-	case ixgbe_mac_X540:
-	case ixgbe_mac_X550:
-	case ixgbe_mac_X550EM_x:
-		ret = ixgbe_dcb_config_tc_stats_82599(hw, NULL);
-		break;
-	default:
-		break;
-	}
-	return ret;
-}
-
-/**
- * ixgbe_dcb_hw_config_cee - Config and enable DCB
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure dcb settings and enable dcb mode.
- */
-s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *hw,
-			struct ixgbe_dcb_config *dcb_config)
-{
-	s32 ret = IXGBE_NOT_IMPLEMENTED;
-	u8 pfc_en;
-	u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS];
-	u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS];
-	u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
-	u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS];
-	u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS];
-
-	/* Unpack CEE standard containers */
-	ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
-	ixgbe_dcb_unpack_max_cee(dcb_config, max);
-	ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
-	ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
-	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
-
-	switch (hw->mac.type) {
-	case ixgbe_mac_82598EB:
-		ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->link_speed,
-						refill, max, bwgid, tsa);
-		break;
-	case ixgbe_mac_82599EB:
-	case ixgbe_mac_X540:
-	case ixgbe_mac_X550:
-	case ixgbe_mac_X550EM_x:
-		ixgbe_dcb_config_82599(hw, dcb_config);
-		ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->link_speed,
-						refill, max, bwgid,
-						tsa, map);
-
-		ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
-		break;
-	default:
-		break;
-	}
-
-	if (!ret && dcb_config->pfc_mode_enable) {
-		ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
-		ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
-	}
-
-	return ret;
-}
-
-/* Helper routines to abstract HW specifics from DCB netlink ops */
-s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
-{
-	int ret = IXGBE_ERR_PARAM;
-
-	switch (hw->mac.type) {
-	case ixgbe_mac_82598EB:
-		ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
-		break;
-	case ixgbe_mac_82599EB:
-	case ixgbe_mac_X540:
-	case ixgbe_mac_X550:
-	case ixgbe_mac_X550EM_x:
-		ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map);
-		break;
-	default:
-		break;
-	}
-	return ret;
-}
-
-s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
-			    u8 *bwg_id, u8 *tsa, u8 *map)
-{
-	switch (hw->mac.type) {
-	case ixgbe_mac_82598EB:
-		ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
-		ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,
-						       tsa);
-		ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,
-						       tsa);
-		break;
-	case ixgbe_mac_82599EB:
-	case ixgbe_mac_X540:
-	case ixgbe_mac_X550:
-	case ixgbe_mac_X550EM_x:
-		ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
-						  tsa, map);
-		ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
-						       tsa);
-		ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
-						       tsa, map);
-		break;
-	default:
-		break;
-	}
-	return 0;
-}
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.h
deleted file mode 100644
index 47c593f..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb.h
+++ /dev/null
@@ -1,174 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#ifndef _IXGBE_DCB_H_
-#define _IXGBE_DCB_H_
-
-#include "ixgbe_type.h"
-
-/* DCB defines */
-/* DCB credit calculation defines */
-#define IXGBE_DCB_CREDIT_QUANTUM	64
-#define IXGBE_DCB_MAX_CREDIT_REFILL	200   /* 200 * 64B = 12800B */
-#define IXGBE_DCB_MAX_TSO_SIZE		(32 * 1024) /* Max TSO pkt size in DCB*/
-#define IXGBE_DCB_MAX_CREDIT		(2 * IXGBE_DCB_MAX_CREDIT_REFILL)
-
-/* 513 for 32KB TSO packet */
-#define IXGBE_DCB_MIN_TSO_CREDIT	\
-	((IXGBE_DCB_MAX_TSO_SIZE / IXGBE_DCB_CREDIT_QUANTUM) + 1)
-
-/* DCB configuration defines */
-#define IXGBE_DCB_MAX_USER_PRIORITY	8
-#define IXGBE_DCB_MAX_BW_GROUP		8
-#define IXGBE_DCB_BW_PERCENT		100
-
-#define IXGBE_DCB_TX_CONFIG		0
-#define IXGBE_DCB_RX_CONFIG		1
-
-/* DCB capability defines */
-#define IXGBE_DCB_PG_SUPPORT	0x00000001
-#define IXGBE_DCB_PFC_SUPPORT	0x00000002
-#define IXGBE_DCB_BCN_SUPPORT	0x00000004
-#define IXGBE_DCB_UP2TC_SUPPORT	0x00000008
-#define IXGBE_DCB_GSP_SUPPORT	0x00000010
-
-struct ixgbe_dcb_support {
-	u32 capabilities; /* DCB capabilities */
-
-	/* Each bit represents a number of TCs configurable in the hw.
-	 * If 8 traffic classes can be configured, the value is 0x80. */
-	u8 traffic_classes;
-	u8 pfc_traffic_classes;
-};
-
-enum ixgbe_dcb_tsa {
-	ixgbe_dcb_tsa_ets = 0,
-	ixgbe_dcb_tsa_group_strict_cee,
-	ixgbe_dcb_tsa_strict
-};
-
-/* Traffic class bandwidth allocation per direction */
-struct ixgbe_dcb_tc_path {
-	u8 bwg_id; /* Bandwidth Group (BWG) ID */
-	u8 bwg_percent; /* % of BWG's bandwidth */
-	u8 link_percent; /* % of link bandwidth */
-	u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
-	u16 data_credits_refill; /* Credit refill amount in 64B granularity */
-	u16 data_credits_max; /* Max credits for a configured packet buffer
-			       * in 64B granularity.*/
-	enum ixgbe_dcb_tsa tsa; /* Link or Group Strict Priority */
-};
-
-enum ixgbe_dcb_pfc {
-	ixgbe_dcb_pfc_disabled = 0,
-	ixgbe_dcb_pfc_enabled,
-	ixgbe_dcb_pfc_enabled_txonly,
-	ixgbe_dcb_pfc_enabled_rxonly
-};
-
-/* Traffic class configuration */
-struct ixgbe_dcb_tc_config {
-	struct ixgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */
-	enum ixgbe_dcb_pfc pfc; /* Class based flow control setting */
-
-	u16 desc_credits_max; /* For Tx Descriptor arbitration */
-	u8 tc; /* Traffic class (TC) */
-};
-
-enum ixgbe_dcb_pba {
-	/* PBA[0-7] each use 64KB FIFO */
-	ixgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL,
-	/* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
-	ixgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED
-};
-
-struct ixgbe_dcb_num_tcs {
-	u8 pg_tcs;
-	u8 pfc_tcs;
-};
-
-struct ixgbe_dcb_config {
-	struct ixgbe_dcb_tc_config tc_config[IXGBE_DCB_MAX_TRAFFIC_CLASS];
-	struct ixgbe_dcb_support support;
-	struct ixgbe_dcb_num_tcs num_tcs;
-	u8 bw_percentage[2][IXGBE_DCB_MAX_BW_GROUP]; /* One each for Tx/Rx */
-	bool pfc_mode_enable;
-	bool round_robin_enable;
-
-	enum ixgbe_dcb_pba rx_pba_cfg;
-
-	u32 dcb_cfg_version; /* Not used...OS-specific? */
-	u32 link_speed; /* For bandwidth allocation validation purpose */
-	bool vt_mode;
-};
-
-/* DCB driver APIs */
-
-/* DCB rule checking */
-s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *);
-
-/* DCB credits calculation */
-s32 ixgbe_dcb_calculate_tc_credits(u8 *, u16 *, u16 *, int);
-s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *,
-				       struct ixgbe_dcb_config *, u32, u8);
-
-/* DCB PFC */
-s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, u8, u8 *);
-s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *);
-
-/* DCB stats */
-s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *);
-s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
-s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
-
-/* DCB config arbiters */
-s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *,
-					 struct ixgbe_dcb_config *);
-s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *,
-					 struct ixgbe_dcb_config *);
-s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *,
-				    struct ixgbe_dcb_config *);
-
-/* DCB unpack routines */
-void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *, u8 *, u8 *);
-void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *, int, u16 *);
-void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *, u16 *);
-void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *, int, u8 *);
-void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *, int, u8 *);
-void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *, int, u8 *);
-u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8);
-
-/* DCB initialization */
-s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, u16 *, u16 *, u8 *, u8 *, u8 *);
-s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *);
-#endif /* _IXGBE_DCB_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.c
deleted file mode 100644
index 08d44d4..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.c
+++ /dev/null
@@ -1,360 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-
-#include "ixgbe_type.h"
-#include "ixgbe_dcb.h"
-#include "ixgbe_dcb_82598.h"
-
-/**
- * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class
- * @hw: pointer to hardware structure
- * @stats: pointer to statistics structure
- * @tc_count:  Number of elements in bwg_array.
- *
- * This function returns the status data for each of the Traffic Classes in use.
- */
-s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw,
-				 struct ixgbe_hw_stats *stats,
-				 u8 tc_count)
-{
-	int tc;
-
-	DEBUGFUNC("dcb_get_tc_stats");
-
-	if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
-		return IXGBE_ERR_PARAM;
-
-	/* Statistics pertaining to each traffic class */
-	for (tc = 0; tc < tc_count; tc++) {
-		/* Transmitted Packets */
-		stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
-		/* Transmitted Bytes */
-		stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
-		/* Received Packets */
-		stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
-		/* Received Bytes */
-		stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
-
-#if 0
-		/* Can we get rid of these??  Consequently, getting rid
-		 * of the tc_stats structure.
-		 */
-		tc_stats_array[up]->in_overflow_discards = 0;
-		tc_stats_array[up]->out_overflow_discards = 0;
-#endif
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data
- * @hw: pointer to hardware structure
- * @stats: pointer to statistics structure
- * @tc_count:  Number of elements in bwg_array.
- *
- * This function returns the CBFC status data for each of the Traffic Classes.
- */
-s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
-				  struct ixgbe_hw_stats *stats,
-				  u8 tc_count)
-{
-	int tc;
-
-	DEBUGFUNC("dcb_get_pfc_stats");
-
-	if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
-		return IXGBE_ERR_PARAM;
-
-	for (tc = 0; tc < tc_count; tc++) {
-		/* Priority XOFF Transmitted */
-		stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
-		/* Priority XOFF Received */
-		stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc));
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure Rx Data Arbiter and credits for each traffic class.
- */
-s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
-				      u16 *max, u8 *tsa)
-{
-	u32 reg = 0;
-	u32 credit_refill = 0;
-	u32 credit_max = 0;
-	u8 i = 0;
-
-	reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA;
-	IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg);
-
-	reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
-	/* Enable Arbiter */
-	reg &= ~IXGBE_RMCS_ARBDIS;
-	/* Enable Receive Recycle within the BWG */
-	reg |= IXGBE_RMCS_RRM;
-	/* Enable Deficit Fixed Priority arbitration*/
-	reg |= IXGBE_RMCS_DFP;
-
-	IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
-
-	/* Configure traffic class credits and priority */
-	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-		credit_refill = refill[i];
-		credit_max = max[i];
-
-		reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
-
-		if (tsa[i] == ixgbe_dcb_tsa_strict)
-			reg |= IXGBE_RT2CR_LSP;
-
-		IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
-	}
-
-	reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-	reg |= IXGBE_RDRXCTL_RDMTS_1_2;
-	reg |= IXGBE_RDRXCTL_MPBEN;
-	reg |= IXGBE_RDRXCTL_MCEN;
-	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
-
-	reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
-	/* Make sure there is enough descriptors before arbitration */
-	reg &= ~IXGBE_RXCTRL_DMBYPS;
-	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure Tx Descriptor Arbiter and credits for each traffic class.
- */
-s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
-					   u16 *refill, u16 *max, u8 *bwg_id,
-					   u8 *tsa)
-{
-	u32 reg, max_credits;
-	u8 i;
-
-	reg = IXGBE_READ_REG(hw, IXGBE_DPMCS);
-
-	/* Enable arbiter */
-	reg &= ~IXGBE_DPMCS_ARBDIS;
-	reg |= IXGBE_DPMCS_TSOEF;
-
-	/* Configure Max TSO packet size 34KB including payload and headers */
-	reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
-
-	IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg);
-
-	/* Configure traffic class credits and priority */
-	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-		max_credits = max[i];
-		reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
-		reg |= refill[i];
-		reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
-
-		if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
-			reg |= IXGBE_TDTQ2TCCR_GSP;
-
-		if (tsa[i] == ixgbe_dcb_tsa_strict)
-			reg |= IXGBE_TDTQ2TCCR_LSP;
-
-		IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure Tx Data Arbiter and credits for each traffic class.
- */
-s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
-					   u16 *refill, u16 *max, u8 *bwg_id,
-					   u8 *tsa)
-{
-	u32 reg;
-	u8 i;
-
-	reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
-	/* Enable Data Plane Arbiter */
-	reg &= ~IXGBE_PDPMCS_ARBDIS;
-	/* Enable DFP and Transmit Recycle Mode */
-	reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM);
-
-	IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg);
-
-	/* Configure traffic class credits and priority */
-	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-		reg = refill[i];
-		reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
-		reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
-
-		if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
-			reg |= IXGBE_TDPT2TCCR_GSP;
-
-		if (tsa[i] == ixgbe_dcb_tsa_strict)
-			reg |= IXGBE_TDPT2TCCR_LSP;
-
-		IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
-	}
-
-	/* Enable Tx packet buffer division */
-	reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
-	reg |= IXGBE_DTXCTL_ENDBUBD;
-	IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_dcb_config_pfc_82598 - Config priority flow control
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure Priority Flow Control for each traffic class.
- */
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
-{
-	u32 fcrtl, reg;
-	u8 i;
-
-	/* Enable Transmit Priority Flow Control */
-	reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
-	reg &= ~IXGBE_RMCS_TFCE_802_3X;
-	reg |= IXGBE_RMCS_TFCE_PRIORITY;
-	IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
-
-	/* Enable Receive Priority Flow Control */
-	reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-	reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE);
-
-	if (pfc_en)
-		reg |= IXGBE_FCTRL_RPFCE;
-
-	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
-
-	/* Configure PFC Tx thresholds per TC */
-	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-		if (!(pfc_en & (1 << i))) {
-			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
-			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
-			continue;
-		}
-
-		fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
-		reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
-		IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
-		IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
-	}
-
-	/* Configure pause time */
-	reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
-	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
-		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
-
-	/* Configure flow control refresh threshold value */
-	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics
- * @hw: pointer to hardware structure
- *
- * Configure queue statistics registers, all queues belonging to same traffic
- * class uses a single set of queue statistics counters.
- */
-s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
-{
-	u32 reg = 0;
-	u8 i = 0;
-	u8 j = 0;
-
-	/* Receive Queues stats setting -  8 queues per statistics reg */
-	for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) {
-		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i));
-		reg |= ((0x1010101) * j);
-		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
-		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1));
-		reg |= ((0x1010101) * j);
-		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
-	}
-	/* Transmit Queues stats setting -  4 queues per statistics reg*/
-	for (i = 0; i < 8; i++) {
-		reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
-		reg |= ((0x1010101) * i);
-		IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg);
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_dcb_hw_config_82598 - Config and enable DCB
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure dcb settings and enable dcb mode.
- */
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, int link_speed,
-			      u16 *refill, u16 *max, u8 *bwg_id,
-			      u8 *tsa)
-{
-	UNREFERENCED_1PARAMETER(link_speed);
-
-	ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
-	ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,
-					       tsa);
-	ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,
-					       tsa);
-	ixgbe_dcb_config_tc_stats_82598(hw);
-
-
-	return IXGBE_SUCCESS;
-}
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.h
deleted file mode 100644
index 06ffaa4..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82598.h
+++ /dev/null
@@ -1,99 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#ifndef _IXGBE_DCB_82598_H_
-#define _IXGBE_DCB_82598_H_
-
-/* DCB register definitions */
-
-#define IXGBE_DPMCS_MTSOS_SHIFT	16
-#define IXGBE_DPMCS_TDPAC	0x00000001 /* 0 Round Robin,
-					    * 1 DFP - Deficit Fixed Priority */
-#define IXGBE_DPMCS_TRM		0x00000010 /* Transmit Recycle Mode */
-#define IXGBE_DPMCS_ARBDIS	0x00000040 /* DCB arbiter disable */
-#define IXGBE_DPMCS_TSOEF	0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */
-
-#define IXGBE_RUPPBMR_MQA	0x80000000 /* Enable UP to queue mapping */
-
-#define IXGBE_RT2CR_MCL_SHIFT	12 /* Offset to Max Credit Limit setting */
-#define IXGBE_RT2CR_LSP		0x80000000 /* LSP enable bit */
-
-#define IXGBE_RDRXCTL_MPBEN	0x00000010 /* DMA config for multiple packet
-					    * buffers enable */
-#define IXGBE_RDRXCTL_MCEN	0x00000040 /* DMA config for multiple cores
-					    * (RSS) enable */
-
-#define IXGBE_TDTQ2TCCR_MCL_SHIFT	12
-#define IXGBE_TDTQ2TCCR_BWG_SHIFT	9
-#define IXGBE_TDTQ2TCCR_GSP	0x40000000
-#define IXGBE_TDTQ2TCCR_LSP	0x80000000
-
-#define IXGBE_TDPT2TCCR_MCL_SHIFT	12
-#define IXGBE_TDPT2TCCR_BWG_SHIFT	9
-#define IXGBE_TDPT2TCCR_GSP	0x40000000
-#define IXGBE_TDPT2TCCR_LSP	0x80000000
-
-#define IXGBE_PDPMCS_TPPAC	0x00000020 /* 0 Round Robin,
-					    * 1 DFP - Deficit Fixed Priority */
-#define IXGBE_PDPMCS_ARBDIS	0x00000040 /* Arbiter disable */
-#define IXGBE_PDPMCS_TRM	0x00000100 /* Transmit Recycle Mode enable */
-
-#define IXGBE_DTXCTL_ENDBUBD	0x00000004 /* Enable DBU buffer division */
-
-#define IXGBE_TXPBSIZE_40KB	0x0000A000 /* 40KB Packet Buffer */
-#define IXGBE_RXPBSIZE_48KB	0x0000C000 /* 48KB Packet Buffer */
-#define IXGBE_RXPBSIZE_64KB	0x00010000 /* 64KB Packet Buffer */
-#define IXGBE_RXPBSIZE_80KB	0x00014000 /* 80KB Packet Buffer */
-
-/* DCB driver APIs */
-
-/* DCB PFC */
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8);
-
-/* DCB stats */
-s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *);
-s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *,
-				 struct ixgbe_hw_stats *, u8);
-s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *,
-				  struct ixgbe_hw_stats *, u8);
-
-/* DCB config arbiters */
-s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *,
-					   u8 *, u8 *);
-s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *,
-					   u8 *, u8 *);
-s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, u8 *);
-
-/* DCB initialization */
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, int, u16 *, u16 *, u8 *, u8 *);
-#endif /* _IXGBE_DCB_82958_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.c
deleted file mode 100644
index 9778164..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.c
+++ /dev/null
@@ -1,593 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-
-#include "ixgbe_type.h"
-#include "ixgbe_dcb.h"
-#include "ixgbe_dcb_82599.h"
-
-/**
- * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
- * @hw: pointer to hardware structure
- * @stats: pointer to statistics structure
- * @tc_count:  Number of elements in bwg_array.
- *
- * This function returns the status data for each of the Traffic Classes in use.
- */
-s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
-				 struct ixgbe_hw_stats *stats,
-				 u8 tc_count)
-{
-	int tc;
-
-	DEBUGFUNC("dcb_get_tc_stats");
-
-	if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
-		return IXGBE_ERR_PARAM;
-
-	/* Statistics pertaining to each traffic class */
-	for (tc = 0; tc < tc_count; tc++) {
-		/* Transmitted Packets */
-		stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
-		/* Transmitted Bytes (read low first to prevent missed carry) */
-		stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc));
-		stats->qbtc[tc] +=
-			(((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32);
-		/* Received Packets */
-		stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
-		/* Received Bytes (read low first to prevent missed carry) */
-		stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc));
-		stats->qbrc[tc] +=
-			(((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32);
-
-		/* Received Dropped Packet */
-		stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc));
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
- * @hw: pointer to hardware structure
- * @stats: pointer to statistics structure
- * @tc_count:  Number of elements in bwg_array.
- *
- * This function returns the CBFC status data for each of the Traffic Classes.
- */
-s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
-				  struct ixgbe_hw_stats *stats,
-				  u8 tc_count)
-{
-	int tc;
-
-	DEBUGFUNC("dcb_get_pfc_stats");
-
-	if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
-		return IXGBE_ERR_PARAM;
-
-	for (tc = 0; tc < tc_count; tc++) {
-		/* Priority XOFF Transmitted */
-		stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
-		/* Priority XOFF Received */
-		stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure Rx Packet Arbiter and credits for each traffic class.
- */
-s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
-				      u16 *max, u8 *bwg_id, u8 *tsa,
-				      u8 *map)
-{
-	u32 reg = 0;
-	u32 credit_refill = 0;
-	u32 credit_max = 0;
-	u8  i = 0;
-
-	/*
-	 * Disable the arbiter before changing parameters
-	 * (always enable recycle mode; WSP)
-	 */
-	reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
-	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
-
-	/*
-	 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
-	 * bits sets for the UPs that needs to be mappped to that TC.
-	 * e.g if priorities 6 and 7 are to be mapped to a TC then the
-	 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
-	 */
-	reg = 0;
-	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
-		reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
-
-	IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
-
-	/* Configure traffic class credits and priority */
-	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-		credit_refill = refill[i];
-		credit_max = max[i];
-		reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
-
-		reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
-
-		if (tsa[i] == ixgbe_dcb_tsa_strict)
-			reg |= IXGBE_RTRPT4C_LSP;
-
-		IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
-	}
-
-	/*
-	 * Configure Rx packet plane (recycle mode; WSP) and
-	 * enable arbiter
-	 */
-	reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
-	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure Tx Descriptor Arbiter and credits for each traffic class.
- */
-s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
-					   u16 *max, u8 *bwg_id, u8 *tsa)
-{
-	u32 reg, max_credits;
-	u8  i;
-
-	/* Clear the per-Tx queue credits; we use per-TC instead */
-	for (i = 0; i < 128; i++) {
-		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
-		IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
-	}
-
-	/* Configure traffic class credits and priority */
-	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-		max_credits = max[i];
-		reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
-		reg |= refill[i];
-		reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
-
-		if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
-			reg |= IXGBE_RTTDT2C_GSP;
-
-		if (tsa[i] == ixgbe_dcb_tsa_strict)
-			reg |= IXGBE_RTTDT2C_LSP;
-
-		IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
-	}
-
-	/*
-	 * Configure Tx descriptor plane (recycle mode; WSP) and
-	 * enable arbiter
-	 */
-	reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
-	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure Tx Packet Arbiter and credits for each traffic class.
- */
-s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
-					   u16 *max, u8 *bwg_id, u8 *tsa,
-					   u8 *map)
-{
-	u32 reg;
-	u8 i;
-
-	/*
-	 * Disable the arbiter before changing parameters
-	 * (always enable recycle mode; SP; arb delay)
-	 */
-	reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
-	      (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
-	      IXGBE_RTTPCS_ARBDIS;
-	IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
-
-	/*
-	 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
-	 * bits sets for the UPs that needs to be mappped to that TC.
-	 * e.g if priorities 6 and 7 are to be mapped to a TC then the
-	 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
-	 */
-	reg = 0;
-	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
-		reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
-
-	IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
-
-	/* Configure traffic class credits and priority */
-	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-		reg = refill[i];
-		reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
-		reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
-
-		if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
-			reg |= IXGBE_RTTPT2C_GSP;
-
-		if (tsa[i] == ixgbe_dcb_tsa_strict)
-			reg |= IXGBE_RTTPT2C_LSP;
-
-		IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
-	}
-
-	/*
-	 * Configure Tx packet plane (recycle mode; SP; arb delay) and
-	 * enable arbiter
-	 */
-	reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
-	      (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
-	IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
- * @hw: pointer to hardware structure
- * @pfc_en: enabled pfc bitmask
- * @map: priority to tc assignments indexed by priority
- *
- * Configure Priority Flow Control (PFC) for each traffic class.
- */
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
-{
-	u32 i, j, fcrtl, reg;
-	u8 max_tc = 0;
-
-	/* Enable Transmit Priority Flow Control */
-	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY);
-
-	/* Enable Receive Priority Flow Control */
-	reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
-	reg |= IXGBE_MFLCN_DPF;
-
-	/*
-	 * X540 supports per TC Rx priority flow control.  So
-	 * clear all TCs and only enable those that should be
-	 * enabled.
-	 */
-	reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
-
-	if (hw->mac.type >= ixgbe_mac_X540)
-		reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
-
-	if (pfc_en)
-		reg |= IXGBE_MFLCN_RPFCE;
-
-	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
-
-	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) {
-		if (map[i] > max_tc)
-			max_tc = map[i];
-	}
-
-
-	/* Configure PFC Tx thresholds per TC */
-	for (i = 0; i <= max_tc; i++) {
-		int enabled = 0;
-
-		for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) {
-			if ((map[j] == i) && (pfc_en & (1 << j))) {
-				enabled = 1;
-				break;
-			}
-		}
-
-		if (enabled) {
-			reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
-			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
-			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
-		} else {
-			/*
-			 * In order to prevent Tx hangs when the internal Tx
-			 * switch is enabled we must set the high water mark
-			 * to the Rx packet buffer size - 24KB.  This allows
-			 * the Tx switch to function even under heavy Rx
-			 * workloads.
-			 */
-			reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
-			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
-		}
-
-		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
-	}
-
-	for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
-	}
-
-	/* Configure pause time (2 TCs per register) */
-	reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
-	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
-		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
-
-	/* Configure flow control refresh threshold value */
-	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
- * @hw: pointer to hardware structure
- *
- * Configure queue statistics registers, all queues belonging to same traffic
- * class uses a single set of queue statistics counters.
- */
-s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw,
-				    struct ixgbe_dcb_config *dcb_config)
-{
-	u32 reg = 0;
-	u8  i   = 0;
-	u8 tc_count = 8;
-	bool vt_mode = false;
-
-	if (dcb_config != NULL) {
-		tc_count = dcb_config->num_tcs.pg_tcs;
-		vt_mode = dcb_config->vt_mode;
-	}
-
-	if (!((tc_count == 8 && vt_mode == false) || tc_count == 4))
-		return IXGBE_ERR_PARAM;
-
-	if (tc_count == 8 && vt_mode == false) {
-		/*
-		 * Receive Queues stats setting
-		 * 32 RQSMR registers, each configuring 4 queues.
-		 *
-		 * Set all 16 queues of each TC to the same stat
-		 * with TC 'n' going to stat 'n'.
-		 */
-		for (i = 0; i < 32; i++) {
-			reg = 0x01010101 * (i / 4);
-			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
-		}
-		/*
-		 * Transmit Queues stats setting
-		 * 32 TQSM registers, each controlling 4 queues.
-		 *
-		 * Set all queues of each TC to the same stat
-		 * with TC 'n' going to stat 'n'.
-		 * Tx queues are allocated non-uniformly to TCs:
-		 * 32, 32, 16, 16, 8, 8, 8, 8.
-		 */
-		for (i = 0; i < 32; i++) {
-			if (i < 8)
-				reg = 0x00000000;
-			else if (i < 16)
-				reg = 0x01010101;
-			else if (i < 20)
-				reg = 0x02020202;
-			else if (i < 24)
-				reg = 0x03030303;
-			else if (i < 26)
-				reg = 0x04040404;
-			else if (i < 28)
-				reg = 0x05050505;
-			else if (i < 30)
-				reg = 0x06060606;
-			else
-				reg = 0x07070707;
-			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
-		}
-	} else if (tc_count == 4 && vt_mode == false) {
-		/*
-		 * Receive Queues stats setting
-		 * 32 RQSMR registers, each configuring 4 queues.
-		 *
-		 * Set all 16 queues of each TC to the same stat
-		 * with TC 'n' going to stat 'n'.
-		 */
-		for (i = 0; i < 32; i++) {
-			if (i % 8 > 3)
-				/* In 4 TC mode, odd 16-queue ranges are
-				 *  not used.
-				*/
-				continue;
-			reg = 0x01010101 * (i / 8);
-			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
-		}
-		/*
-		 * Transmit Queues stats setting
-		 * 32 TQSM registers, each controlling 4 queues.
-		 *
-		 * Set all queues of each TC to the same stat
-		 * with TC 'n' going to stat 'n'.
-		 * Tx queues are allocated non-uniformly to TCs:
-		 * 64, 32, 16, 16.
-		 */
-		for (i = 0; i < 32; i++) {
-			if (i < 16)
-				reg = 0x00000000;
-			else if (i < 24)
-				reg = 0x01010101;
-			else if (i < 28)
-				reg = 0x02020202;
-			else
-				reg = 0x03030303;
-			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
-		}
-	} else if (tc_count == 4 && vt_mode == true) {
-		/*
-		 * Receive Queues stats setting
-		 * 32 RQSMR registers, each configuring 4 queues.
-		 *
-		 * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
-		 * pool. Set all 32 queues of each TC across pools to the same
-		 * stat with TC 'n' going to stat 'n'.
-		 */
-		for (i = 0; i < 32; i++)
-			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100);
-		/*
-		 * Transmit Queues stats setting
-		 * 32 TQSM registers, each controlling 4 queues.
-		 *
-		 * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
-		 * pool. Set all 32 queues of each TC across pools to the same
-		 * stat with TC 'n' going to stat 'n'.
-		 */
-		for (i = 0; i < 32; i++)
-			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100);
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_dcb_config_82599 - Configure general DCB parameters
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure general DCB parameters.
- */
-s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw,
-			   struct ixgbe_dcb_config *dcb_config)
-{
-	u32 reg;
-	u32 q;
-
-	/* Disable the Tx desc arbiter so that MTQC can be changed */
-	reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
-	reg |= IXGBE_RTTDCS_ARBDIS;
-	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
-
-	reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
-	if (dcb_config->num_tcs.pg_tcs == 8) {
-		/* Enable DCB for Rx with 8 TCs */
-		switch (reg & IXGBE_MRQC_MRQE_MASK) {
-		case 0:
-		case IXGBE_MRQC_RT4TCEN:
-			/* RSS disabled cases */
-			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
-			      IXGBE_MRQC_RT8TCEN;
-			break;
-		case IXGBE_MRQC_RSSEN:
-		case IXGBE_MRQC_RTRSS4TCEN:
-			/* RSS enabled cases */
-			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
-			      IXGBE_MRQC_RTRSS8TCEN;
-			break;
-		default:
-			/*
-			 * Unsupported value, assume stale data,
-			 * overwrite no RSS
-			 */
-			ASSERT(0);
-			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
-			      IXGBE_MRQC_RT8TCEN;
-		}
-	}
-	if (dcb_config->num_tcs.pg_tcs == 4) {
-		/* We support both VT-on and VT-off with 4 TCs. */
-		if (dcb_config->vt_mode)
-			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
-			      IXGBE_MRQC_VMDQRT4TCEN;
-		else
-			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
-			      IXGBE_MRQC_RTRSS4TCEN;
-	}
-	IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
-
-	/* Enable DCB for Tx with 8 TCs */
-	if (dcb_config->num_tcs.pg_tcs == 8)
-		reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
-	else {
-		/* We support both VT-on and VT-off with 4 TCs. */
-		reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
-		if (dcb_config->vt_mode)
-			reg |= IXGBE_MTQC_VT_ENA;
-	}
-	IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
-
-	/* Disable drop for all queues */
-	for (q = 0; q < 128; q++)
-		IXGBE_WRITE_REG(hw, IXGBE_QDE,
-				(IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
-
-	/* Enable the Tx desc arbiter */
-	reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
-	reg &= ~IXGBE_RTTDCS_ARBDIS;
-	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
-
-	/* Enable Security TX Buffer IFG for DCB */
-	reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
-	reg |= IXGBE_SECTX_DCB;
-	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure dcb settings and enable dcb mode.
- */
-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed,
-			      u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa,
-			      u8 *map)
-{
-	UNREFERENCED_1PARAMETER(link_speed);
-
-	ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa,
-					  map);
-	ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
-					       tsa);
-	ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
-					       tsa, map);
-
-	return IXGBE_SUCCESS;
-}
-
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.h
deleted file mode 100644
index 2e18350..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_dcb_82599.h
+++ /dev/null
@@ -1,153 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#ifndef _IXGBE_DCB_82599_H_
-#define _IXGBE_DCB_82599_H_
-
-/* DCB register definitions */
-#define IXGBE_RTTDCS_TDPAC	0x00000001 /* 0 Round Robin,
-					    * 1 WSP - Weighted Strict Priority
-					    */
-#define IXGBE_RTTDCS_VMPAC	0x00000002 /* 0 Round Robin,
-					    * 1 WRR - Weighted Round Robin
-					    */
-#define IXGBE_RTTDCS_TDRM	0x00000010 /* Transmit Recycle Mode */
-#define IXGBE_RTTDCS_BDPM	0x00400000 /* Bypass Data Pipe - must clear! */
-#define IXGBE_RTTDCS_BPBFSM	0x00800000 /* Bypass PB Free Space - must
-					     * clear!
-					     */
-#define IXGBE_RTTDCS_SPEED_CHG	0x80000000 /* Link speed change */
-
-/* Receive UP2TC mapping */
-#define IXGBE_RTRUP2TC_UP_SHIFT	3
-#define IXGBE_RTRUP2TC_UP_MASK	7
-/* Transmit UP2TC mapping */
-#define IXGBE_RTTUP2TC_UP_SHIFT	3
-
-#define IXGBE_RTRPT4C_MCL_SHIFT	12 /* Offset to Max Credit Limit setting */
-#define IXGBE_RTRPT4C_BWG_SHIFT	9  /* Offset to BWG index */
-#define IXGBE_RTRPT4C_GSP	0x40000000 /* GSP enable bit */
-#define IXGBE_RTRPT4C_LSP	0x80000000 /* LSP enable bit */
-
-#define IXGBE_RDRXCTL_MPBEN	0x00000010 /* DMA config for multiple packet
-					    * buffers enable
-					    */
-#define IXGBE_RDRXCTL_MCEN	0x00000040 /* DMA config for multiple cores
-					    * (RSS) enable
-					    */
-
-/* RTRPCS Bit Masks */
-#define IXGBE_RTRPCS_RRM	0x00000002 /* Receive Recycle Mode enable */
-/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
-#define IXGBE_RTRPCS_RAC	0x00000004
-#define IXGBE_RTRPCS_ARBDIS	0x00000040 /* Arbitration disable bit */
-
-/* RTTDT2C Bit Masks */
-#define IXGBE_RTTDT2C_MCL_SHIFT	12
-#define IXGBE_RTTDT2C_BWG_SHIFT	9
-#define IXGBE_RTTDT2C_GSP	0x40000000
-#define IXGBE_RTTDT2C_LSP	0x80000000
-
-#define IXGBE_RTTPT2C_MCL_SHIFT	12
-#define IXGBE_RTTPT2C_BWG_SHIFT	9
-#define IXGBE_RTTPT2C_GSP	0x40000000
-#define IXGBE_RTTPT2C_LSP	0x80000000
-
-/* RTTPCS Bit Masks */
-#define IXGBE_RTTPCS_TPPAC	0x00000020 /* 0 Round Robin,
-					    * 1 SP - Strict Priority
-					    */
-#define IXGBE_RTTPCS_ARBDIS	0x00000040 /* Arbiter disable */
-#define IXGBE_RTTPCS_TPRM	0x00000100 /* Transmit Recycle Mode enable */
-#define IXGBE_RTTPCS_ARBD_SHIFT	22
-#define IXGBE_RTTPCS_ARBD_DCB	0x4 /* Arbitration delay in DCB mode */
-
-#define IXGBE_TXPBTHRESH_DCB	0xA /* THRESH value for DCB mode */
-
-/* SECTXMINIFG DCB */
-#define IXGBE_SECTX_DCB		0x00001F00 /* DCB TX Buffer SEC IFG */
-
-/* BCN register definitions */
-#define IXGBE_RTTBCNRC_RF_INT_SHIFT	14
-#define IXGBE_RTTBCNRC_RS_ENA		0x80000000
-
-#define IXGBE_RTTBCNCR_MNG_CMTGI	0x00000001
-#define IXGBE_RTTBCNCR_MGN_BCNA_MODE	0x00000002
-#define IXGBE_RTTBCNCR_RSV7_11_SHIFT	5
-#define IXGBE_RTTBCNCR_G		0x00000400
-#define IXGBE_RTTBCNCR_I		0x00000800
-#define IXGBE_RTTBCNCR_H		0x00001000
-#define IXGBE_RTTBCNCR_VER_SHIFT	14
-#define IXGBE_RTTBCNCR_CMT_ETH_SHIFT	16
-
-#define IXGBE_RTTBCNACL_SMAC_L_SHIFT	16
-
-#define IXGBE_RTTBCNTG_BCNA_MODE	0x80000000
-
-#define IXGBE_RTTBCNRTT_TS_SHIFT	3
-#define IXGBE_RTTBCNRTT_TXQ_IDX_SHIFT	16
-
-#define IXGBE_RTTBCNRD_BCN_CLEAR_ALL	0x00000002
-#define IXGBE_RTTBCNRD_DRIFT_FAC_SHIFT	2
-#define IXGBE_RTTBCNRD_DRIFT_INT_SHIFT	16
-#define IXGBE_RTTBCNRD_DRIFT_ENA	0x80000000
-
-
-/* DCB driver APIs */
-
-/* DCB PFC */
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *, u8, u8 *);
-
-/* DCB stats */
-s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *,
-				    struct ixgbe_dcb_config *);
-s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *,
-				 struct ixgbe_hw_stats *, u8);
-s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *,
-				  struct ixgbe_hw_stats *, u8);
-
-/* DCB config arbiters */
-s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *,
-					   u8 *, u8 *);
-s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *,
-					   u8 *, u8 *, u8 *);
-s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, u8 *,
-				      u8 *, u8 *);
-
-/* DCB initialization */
-s32 ixgbe_dcb_config_82599(struct ixgbe_hw *,
-			   struct ixgbe_dcb_config *);
-
-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *, int, u16 *, u16 *, u8 *,
-			      u8 *, u8 *);
-#endif /* _IXGBE_DCB_82959_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c
deleted file mode 100644
index c00c2f7..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c
+++ /dev/null
@@ -1,789 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#include "ixgbe_type.h"
-#include "ixgbe_mbx.h"
-
-/**
- *  ixgbe_read_mbx - Reads a message from the mailbox
- *  @hw: pointer to the HW structure
- *  @msg: The message buffer
- *  @size: Length of buffer
- *  @mbx_id: id of mailbox to read
- *
- *  returns SUCCESS if it successfully read message from buffer
- **/
-s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
-{
-	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	s32 ret_val = IXGBE_ERR_MBX;
-
-	DEBUGFUNC("ixgbe_read_mbx");
-
-	/* limit read to size of mailbox */
-	if (size > mbx->size)
-		size = mbx->size;
-
-	if (mbx->ops.read)
-		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_write_mbx - Write a message to the mailbox
- *  @hw: pointer to the HW structure
- *  @msg: The message buffer
- *  @size: Length of buffer
- *  @mbx_id: id of mailbox to write
- *
- *  returns SUCCESS if it successfully copied message into the buffer
- **/
-s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
-{
-	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	s32 ret_val = IXGBE_SUCCESS;
-
-	DEBUGFUNC("ixgbe_write_mbx");
-
-	if (size > mbx->size) {
-		ret_val = IXGBE_ERR_MBX;
-		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
-			     "Invalid mailbox message size %d", size);
-	} else if (mbx->ops.write)
-		ret_val = mbx->ops.write(hw, msg, size, mbx_id);
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_check_for_msg - checks to see if someone sent us mail
- *  @hw: pointer to the HW structure
- *  @mbx_id: id of mailbox to check
- *
- *  returns SUCCESS if the Status bit was found or else ERR_MBX
- **/
-s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
-{
-	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	s32 ret_val = IXGBE_ERR_MBX;
-
-	DEBUGFUNC("ixgbe_check_for_msg");
-
-	if (mbx->ops.check_for_msg)
-		ret_val = mbx->ops.check_for_msg(hw, mbx_id);
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_check_for_ack - checks to see if someone sent us ACK
- *  @hw: pointer to the HW structure
- *  @mbx_id: id of mailbox to check
- *
- *  returns SUCCESS if the Status bit was found or else ERR_MBX
- **/
-s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
-{
-	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	s32 ret_val = IXGBE_ERR_MBX;
-
-	DEBUGFUNC("ixgbe_check_for_ack");
-
-	if (mbx->ops.check_for_ack)
-		ret_val = mbx->ops.check_for_ack(hw, mbx_id);
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_check_for_rst - checks to see if other side has reset
- *  @hw: pointer to the HW structure
- *  @mbx_id: id of mailbox to check
- *
- *  returns SUCCESS if the Status bit was found or else ERR_MBX
- **/
-s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
-{
-	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	s32 ret_val = IXGBE_ERR_MBX;
-
-	DEBUGFUNC("ixgbe_check_for_rst");
-
-	if (mbx->ops.check_for_rst)
-		ret_val = mbx->ops.check_for_rst(hw, mbx_id);
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_poll_for_msg - Wait for message notification
- *  @hw: pointer to the HW structure
- *  @mbx_id: id of mailbox to write
- *
- *  returns SUCCESS if it successfully received a message notification
- **/
-STATIC s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
-{
-	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	int countdown = mbx->timeout;
-
-	DEBUGFUNC("ixgbe_poll_for_msg");
-
-	if (!countdown || !mbx->ops.check_for_msg)
-		goto out;
-
-	while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
-		countdown--;
-		if (!countdown)
-			break;
-		usec_delay(mbx->usec_delay);
-	}
-
-	if (countdown == 0)
-		ERROR_REPORT2(IXGBE_ERROR_POLLING,
-			   "Polling for VF%d mailbox message timedout", mbx_id);
-
-out:
-	return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
-}
-
-/**
- *  ixgbe_poll_for_ack - Wait for message acknowledgement
- *  @hw: pointer to the HW structure
- *  @mbx_id: id of mailbox to write
- *
- *  returns SUCCESS if it successfully received a message acknowledgement
- **/
-STATIC s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
-{
-	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	int countdown = mbx->timeout;
-
-	DEBUGFUNC("ixgbe_poll_for_ack");
-
-	if (!countdown || !mbx->ops.check_for_ack)
-		goto out;
-
-	while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
-		countdown--;
-		if (!countdown)
-			break;
-		usec_delay(mbx->usec_delay);
-	}
-
-	if (countdown == 0)
-		ERROR_REPORT2(IXGBE_ERROR_POLLING,
-			     "Polling for VF%d mailbox ack timedout", mbx_id);
-
-out:
-	return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
-}
-
-/**
- *  ixgbe_read_posted_mbx - Wait for message notification and receive message
- *  @hw: pointer to the HW structure
- *  @msg: The message buffer
- *  @size: Length of buffer
- *  @mbx_id: id of mailbox to write
- *
- *  returns SUCCESS if it successfully received a message notification and
- *  copied it into the receive buffer.
- **/
-s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
-{
-	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	s32 ret_val = IXGBE_ERR_MBX;
-
-	DEBUGFUNC("ixgbe_read_posted_mbx");
-
-	if (!mbx->ops.read)
-		goto out;
-
-	ret_val = ixgbe_poll_for_msg(hw, mbx_id);
-
-	/* if ack received read message, otherwise we timed out */
-	if (!ret_val)
-		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
-out:
-	return ret_val;
-}
-
-/**
- *  ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
- *  @hw: pointer to the HW structure
- *  @msg: The message buffer
- *  @size: Length of buffer
- *  @mbx_id: id of mailbox to write
- *
- *  returns SUCCESS if it successfully copied message into the buffer and
- *  received an ack to that message within delay * timeout period
- **/
-s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
-			   u16 mbx_id)
-{
-	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	s32 ret_val = IXGBE_ERR_MBX;
-
-	DEBUGFUNC("ixgbe_write_posted_mbx");
-
-	/* exit if either we can't write or there isn't a defined timeout */
-	if (!mbx->ops.write || !mbx->timeout)
-		goto out;
-
-	/* send msg */
-	ret_val = mbx->ops.write(hw, msg, size, mbx_id);
-
-	/* if msg sent wait until we receive an ack */
-	if (!ret_val)
-		ret_val = ixgbe_poll_for_ack(hw, mbx_id);
-out:
-	return ret_val;
-}
-
-/**
- *  ixgbe_init_mbx_ops_generic - Initialize MB function pointers
- *  @hw: pointer to the HW structure
- *
- *  Setups up the mailbox read and write message function pointers
- **/
-void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
-{
-	struct ixgbe_mbx_info *mbx = &hw->mbx;
-
-	mbx->ops.read_posted = ixgbe_read_posted_mbx;
-	mbx->ops.write_posted = ixgbe_write_posted_mbx;
-}
-
-/**
- *  ixgbe_read_v2p_mailbox - read v2p mailbox
- *  @hw: pointer to the HW structure
- *
- *  This function is used to read the v2p mailbox without losing the read to
- *  clear status bits.
- **/
-STATIC u32 ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw)
-{
-	u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
-
-	v2p_mailbox |= hw->mbx.v2p_mailbox;
-	hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
-
-	return v2p_mailbox;
-}
-
-/**
- *  ixgbe_check_for_bit_vf - Determine if a status bit was set
- *  @hw: pointer to the HW structure
- *  @mask: bitmask for bits to be tested and cleared
- *
- *  This function is used to check for the read to clear bits within
- *  the V2P mailbox.
- **/
-STATIC s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
-{
-	u32 v2p_mailbox = ixgbe_read_v2p_mailbox(hw);
-	s32 ret_val = IXGBE_ERR_MBX;
-
-	if (v2p_mailbox & mask)
-		ret_val = IXGBE_SUCCESS;
-
-	hw->mbx.v2p_mailbox &= ~mask;
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_check_for_msg_vf - checks to see if the PF has sent mail
- *  @hw: pointer to the HW structure
- *  @mbx_id: id of mailbox to check
- *
- *  returns SUCCESS if the PF has set the Status bit or else ERR_MBX
- **/
-STATIC s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id)
-{
-	s32 ret_val = IXGBE_ERR_MBX;
-
-	UNREFERENCED_1PARAMETER(mbx_id);
-	DEBUGFUNC("ixgbe_check_for_msg_vf");
-
-	if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
-		ret_val = IXGBE_SUCCESS;
-		hw->mbx.stats.reqs++;
-	}
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_check_for_ack_vf - checks to see if the PF has ACK'd
- *  @hw: pointer to the HW structure
- *  @mbx_id: id of mailbox to check
- *
- *  returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
- **/
-STATIC s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id)
-{
-	s32 ret_val = IXGBE_ERR_MBX;
-
-	UNREFERENCED_1PARAMETER(mbx_id);
-	DEBUGFUNC("ixgbe_check_for_ack_vf");
-
-	if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
-		ret_val = IXGBE_SUCCESS;
-		hw->mbx.stats.acks++;
-	}
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_check_for_rst_vf - checks to see if the PF has reset
- *  @hw: pointer to the HW structure
- *  @mbx_id: id of mailbox to check
- *
- *  returns true if the PF has set the reset done bit or else false
- **/
-STATIC s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id)
-{
-	s32 ret_val = IXGBE_ERR_MBX;
-
-	UNREFERENCED_1PARAMETER(mbx_id);
-	DEBUGFUNC("ixgbe_check_for_rst_vf");
-
-	if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
-	    IXGBE_VFMAILBOX_RSTI))) {
-		ret_val = IXGBE_SUCCESS;
-		hw->mbx.stats.rsts++;
-	}
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_obtain_mbx_lock_vf - obtain mailbox lock
- *  @hw: pointer to the HW structure
- *
- *  return SUCCESS if we obtained the mailbox lock
- **/
-STATIC s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
-{
-	s32 ret_val = IXGBE_ERR_MBX;
-
-	DEBUGFUNC("ixgbe_obtain_mbx_lock_vf");
-
-	/* Take ownership of the buffer */
-	IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
-
-	/* reserve mailbox for vf use */
-	if (ixgbe_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
-		ret_val = IXGBE_SUCCESS;
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_write_mbx_vf - Write a message to the mailbox
- *  @hw: pointer to the HW structure
- *  @msg: The message buffer
- *  @size: Length of buffer
- *  @mbx_id: id of mailbox to write
- *
- *  returns SUCCESS if it successfully copied message into the buffer
- **/
-STATIC s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
-			      u16 mbx_id)
-{
-	s32 ret_val;
-	u16 i;
-
-	UNREFERENCED_1PARAMETER(mbx_id);
-
-	DEBUGFUNC("ixgbe_write_mbx_vf");
-
-	/* lock the mailbox to prevent pf/vf race condition */
-	ret_val = ixgbe_obtain_mbx_lock_vf(hw);
-	if (ret_val)
-		goto out_no_write;
-
-	/* flush msg and acks as we are overwriting the message buffer */
-	ixgbe_check_for_msg_vf(hw, 0);
-	ixgbe_check_for_ack_vf(hw, 0);
-
-	/* copy the caller specified message to the mailbox memory buffer */
-	for (i = 0; i < size; i++)
-		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
-
-	/*
-	 * Complete the remaining mailbox data registers with zero to reset
-	 * the data sent in a previous exchange (in either side) with the PF,
-	 * including exchanges performed by another Guest OS to which that VF
-	 * was previously assigned.
-	 */
-	while (i < hw->mbx.size) {
-		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, 0);
-		i++;
-	}
-
-	/* update stats */
-	hw->mbx.stats.msgs_tx++;
-
-	/* Drop VFU and interrupt the PF to tell it a message has been sent */
-	IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
-
-out_no_write:
-	return ret_val;
-}
-
-/**
- *  ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf
- *  @hw: pointer to the HW structure
- *  @msg: The message buffer
- *  @size: Length of buffer
- *  @mbx_id: id of mailbox to read
- *
- *  returns SUCCESS if it successfully read message from buffer
- **/
-STATIC s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
-			     u16 mbx_id)
-{
-	s32 ret_val = IXGBE_SUCCESS;
-	u16 i;
-
-	DEBUGFUNC("ixgbe_read_mbx_vf");
-	UNREFERENCED_1PARAMETER(mbx_id);
-
-	/* lock the mailbox to prevent pf/vf race condition */
-	ret_val = ixgbe_obtain_mbx_lock_vf(hw);
-	if (ret_val)
-		goto out_no_read;
-
-	/* copy the message from the mailbox memory buffer */
-	for (i = 0; i < size; i++)
-		msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
-
-	/* Acknowledge receipt and release mailbox, then we're done */
-	IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
-
-	/* update stats */
-	hw->mbx.stats.msgs_rx++;
-
-out_no_read:
-	return ret_val;
-}
-
-/**
- *  ixgbe_init_mbx_params_vf - set initial values for vf mailbox
- *  @hw: pointer to the HW structure
- *
- *  Initializes the hw->mbx struct to correct values for vf mailbox
- */
-void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw)
-{
-	struct ixgbe_mbx_info *mbx = &hw->mbx;
-
-	/* start mailbox as timed out and let the reset_hw call set the timeout
-	 * value to begin communications */
-	mbx->timeout = 0;
-	mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
-
-	mbx->size = IXGBE_VFMAILBOX_SIZE;
-
-	mbx->ops.read = ixgbe_read_mbx_vf;
-	mbx->ops.write = ixgbe_write_mbx_vf;
-	mbx->ops.read_posted = ixgbe_read_posted_mbx;
-	mbx->ops.write_posted = ixgbe_write_posted_mbx;
-	mbx->ops.check_for_msg = ixgbe_check_for_msg_vf;
-	mbx->ops.check_for_ack = ixgbe_check_for_ack_vf;
-	mbx->ops.check_for_rst = ixgbe_check_for_rst_vf;
-
-	mbx->stats.msgs_tx = 0;
-	mbx->stats.msgs_rx = 0;
-	mbx->stats.reqs = 0;
-	mbx->stats.acks = 0;
-	mbx->stats.rsts = 0;
-}
-
-STATIC s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
-{
-	u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
-	s32 ret_val = IXGBE_ERR_MBX;
-
-	if (mbvficr & mask) {
-		ret_val = IXGBE_SUCCESS;
-		IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
-	}
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
- *  @hw: pointer to the HW structure
- *  @vf_number: the VF index
- *
- *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
- **/
-STATIC s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
-{
-	s32 ret_val = IXGBE_ERR_MBX;
-	s32 index = IXGBE_MBVFICR_INDEX(vf_number);
-	u32 vf_bit = vf_number % 16;
-
-	DEBUGFUNC("ixgbe_check_for_msg_pf");
-
-	if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
-				    index)) {
-		ret_val = IXGBE_SUCCESS;
-		hw->mbx.stats.reqs++;
-	}
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
- *  @hw: pointer to the HW structure
- *  @vf_number: the VF index
- *
- *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
- **/
-STATIC s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
-{
-	s32 ret_val = IXGBE_ERR_MBX;
-	s32 index = IXGBE_MBVFICR_INDEX(vf_number);
-	u32 vf_bit = vf_number % 16;
-
-	DEBUGFUNC("ixgbe_check_for_ack_pf");
-
-	if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
-				    index)) {
-		ret_val = IXGBE_SUCCESS;
-		hw->mbx.stats.acks++;
-	}
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_check_for_rst_pf - checks to see if the VF has reset
- *  @hw: pointer to the HW structure
- *  @vf_number: the VF index
- *
- *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
- **/
-STATIC s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
-{
-	u32 reg_offset = (vf_number < 32) ? 0 : 1;
-	u32 vf_shift = vf_number % 32;
-	u32 vflre = 0;
-	s32 ret_val = IXGBE_ERR_MBX;
-
-	DEBUGFUNC("ixgbe_check_for_rst_pf");
-
-	switch (hw->mac.type) {
-	case ixgbe_mac_82599EB:
-		vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
-		break;
-	case ixgbe_mac_X550:
-	case ixgbe_mac_X550EM_x:
-	case ixgbe_mac_X540:
-		vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
-		break;
-	default:
-		break;
-	}
-
-	if (vflre & (1 << vf_shift)) {
-		ret_val = IXGBE_SUCCESS;
-		IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
-		hw->mbx.stats.rsts++;
-	}
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
- *  @hw: pointer to the HW structure
- *  @vf_number: the VF index
- *
- *  return SUCCESS if we obtained the mailbox lock
- **/
-STATIC s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
-{
-	s32 ret_val = IXGBE_ERR_MBX;
-	u32 p2v_mailbox;
-
-	DEBUGFUNC("ixgbe_obtain_mbx_lock_pf");
-
-	/* Take ownership of the buffer */
-	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
-
-	/* reserve mailbox for vf use */
-	p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
-	if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
-		ret_val = IXGBE_SUCCESS;
-	else
-		ERROR_REPORT2(IXGBE_ERROR_POLLING,
-			   "Failed to obtain mailbox lock for VF%d", vf_number);
-
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_write_mbx_pf - Places a message in the mailbox
- *  @hw: pointer to the HW structure
- *  @msg: The message buffer
- *  @size: Length of buffer
- *  @vf_number: the VF index
- *
- *  returns SUCCESS if it successfully copied message into the buffer
- **/
-STATIC s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
-			      u16 vf_number)
-{
-	s32 ret_val;
-	u16 i;
-
-	DEBUGFUNC("ixgbe_write_mbx_pf");
-
-	/* lock the mailbox to prevent pf/vf race condition */
-	ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
-	if (ret_val)
-		goto out_no_write;
-
-	/* flush msg and acks as we are overwriting the message buffer */
-	ixgbe_check_for_msg_pf(hw, vf_number);
-	ixgbe_check_for_ack_pf(hw, vf_number);
-
-	/* copy the caller specified message to the mailbox memory buffer */
-	for (i = 0; i < size; i++)
-		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
-
-	/*
-	 * Complete the remaining mailbox data registers with zero to reset
-	 * the data sent in a previous exchange (in either side) with the VF,
-	 * including exchanges performed by another Guest OS to which that VF
-	 * was previously assigned.
-	 */
-	while (i < hw->mbx.size) {
-		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, 0);
-		i++;
-	}
-
-	/* Interrupt VF to tell it a message has been sent and release buffer*/
-	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
-
-	/* update stats */
-	hw->mbx.stats.msgs_tx++;
-
-out_no_write:
-	return ret_val;
-
-}
-
-/**
- *  ixgbe_read_mbx_pf - Read a message from the mailbox
- *  @hw: pointer to the HW structure
- *  @msg: The message buffer
- *  @size: Length of buffer
- *  @vf_number: the VF index
- *
- *  This function copies a message from the mailbox buffer to the caller's
- *  memory buffer.  The presumption is that the caller knows that there was
- *  a message due to a VF request so no polling for message is needed.
- **/
-STATIC s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
-			     u16 vf_number)
-{
-	s32 ret_val;
-	u16 i;
-
-	DEBUGFUNC("ixgbe_read_mbx_pf");
-
-	/* lock the mailbox to prevent pf/vf race condition */
-	ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
-	if (ret_val)
-		goto out_no_read;
-
-	/* copy the message to the mailbox memory buffer */
-	for (i = 0; i < size; i++)
-		msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
-
-	/* Acknowledge the message and release buffer */
-	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
-
-	/* update stats */
-	hw->mbx.stats.msgs_rx++;
-
-out_no_read:
-	return ret_val;
-}
-
-/**
- *  ixgbe_init_mbx_params_pf - set initial values for pf mailbox
- *  @hw: pointer to the HW structure
- *
- *  Initializes the hw->mbx struct to correct values for pf mailbox
- */
-void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
-{
-	struct ixgbe_mbx_info *mbx = &hw->mbx;
-
-	if (hw->mac.type != ixgbe_mac_82599EB &&
-	    hw->mac.type != ixgbe_mac_X550 &&
-	    hw->mac.type != ixgbe_mac_X550EM_x &&
-	    hw->mac.type != ixgbe_mac_X540)
-		return;
-
-	mbx->timeout = 0;
-	mbx->usec_delay = 0;
-
-	mbx->size = IXGBE_VFMAILBOX_SIZE;
-
-	mbx->ops.read = ixgbe_read_mbx_pf;
-	mbx->ops.write = ixgbe_write_mbx_pf;
-	mbx->ops.read_posted = ixgbe_read_posted_mbx;
-	mbx->ops.write_posted = ixgbe_write_posted_mbx;
-	mbx->ops.check_for_msg = ixgbe_check_for_msg_pf;
-	mbx->ops.check_for_ack = ixgbe_check_for_ack_pf;
-	mbx->ops.check_for_rst = ixgbe_check_for_rst_pf;
-
-	mbx->stats.msgs_tx = 0;
-	mbx->stats.msgs_rx = 0;
-	mbx->stats.reqs = 0;
-	mbx->stats.acks = 0;
-	mbx->stats.rsts = 0;
-}
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.h
deleted file mode 100644
index 4594572..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.h
+++ /dev/null
@@ -1,150 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#ifndef _IXGBE_MBX_H_
-#define _IXGBE_MBX_H_
-
-#include "ixgbe_type.h"
-
-#define IXGBE_VFMAILBOX_SIZE	16 /* 16 32 bit words - 64 bytes */
-#define IXGBE_ERR_MBX		-100
-
-#define IXGBE_VFMAILBOX		0x002FC
-#define IXGBE_VFMBMEM		0x00200
-
-/* Define mailbox register bits */
-#define IXGBE_VFMAILBOX_REQ	0x00000001 /* Request for PF Ready bit */
-#define IXGBE_VFMAILBOX_ACK	0x00000002 /* Ack PF message received */
-#define IXGBE_VFMAILBOX_VFU	0x00000004 /* VF owns the mailbox buffer */
-#define IXGBE_VFMAILBOX_PFU	0x00000008 /* PF owns the mailbox buffer */
-#define IXGBE_VFMAILBOX_PFSTS	0x00000010 /* PF wrote a message in the MB */
-#define IXGBE_VFMAILBOX_PFACK	0x00000020 /* PF ack the previous VF msg */
-#define IXGBE_VFMAILBOX_RSTI	0x00000040 /* PF has reset indication */
-#define IXGBE_VFMAILBOX_RSTD	0x00000080 /* PF has indicated reset done */
-#define IXGBE_VFMAILBOX_R2C_BITS	0x000000B0 /* All read to clear bits */
-
-#define IXGBE_PFMAILBOX_STS	0x00000001 /* Initiate message send to VF */
-#define IXGBE_PFMAILBOX_ACK	0x00000002 /* Ack message recv'd from VF */
-#define IXGBE_PFMAILBOX_VFU	0x00000004 /* VF owns the mailbox buffer */
-#define IXGBE_PFMAILBOX_PFU	0x00000008 /* PF owns the mailbox buffer */
-#define IXGBE_PFMAILBOX_RVFU	0x00000010 /* Reset VFU - used when VF stuck */
-
-#define IXGBE_MBVFICR_VFREQ_MASK	0x0000FFFF /* bits for VF messages */
-#define IXGBE_MBVFICR_VFREQ_VF1		0x00000001 /* bit for VF 1 message */
-#define IXGBE_MBVFICR_VFACK_MASK	0xFFFF0000 /* bits for VF acks */
-#define IXGBE_MBVFICR_VFACK_VF1		0x00010000 /* bit for VF 1 ack */
-
-
-/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
- * PF.  The reverse is true if it is IXGBE_PF_*.
- * Message ACK's are the value or'd with 0xF0000000
- */
-#define IXGBE_VT_MSGTYPE_ACK	0x80000000 /* Messages below or'd with
-					    * this are the ACK */
-#define IXGBE_VT_MSGTYPE_NACK	0x40000000 /* Messages below or'd with
-					    * this are the NACK */
-#define IXGBE_VT_MSGTYPE_CTS	0x20000000 /* Indicates that VF is still
-					    * clear to send requests */
-#define IXGBE_VT_MSGINFO_SHIFT	16
-/* bits 23:16 are used for extra info for certain messages */
-#define IXGBE_VT_MSGINFO_MASK	(0xFF << IXGBE_VT_MSGINFO_SHIFT)
-
-/* definitions to support mailbox API version negotiation */
-
-/*
- * each element denotes a version of the API; existing numbers may not
- * change; any additions must go at the end
- */
-enum ixgbe_pfvf_api_rev {
-	ixgbe_mbox_api_10,	/* API version 1.0, linux/freebsd VF driver */
-	ixgbe_mbox_api_20,	/* API version 2.0, solaris Phase1 VF driver */
-	ixgbe_mbox_api_11,	/* API version 1.1, linux/freebsd VF driver */
-	/* This value should always be last */
-	ixgbe_mbox_api_unknown,	/* indicates that API version is not known */
-};
-
-/* mailbox API, legacy requests */
-#define IXGBE_VF_RESET		0x01 /* VF requests reset */
-#define IXGBE_VF_SET_MAC_ADDR	0x02 /* VF requests PF to set MAC addr */
-#define IXGBE_VF_SET_MULTICAST	0x03 /* VF requests PF to set MC addr */
-#define IXGBE_VF_SET_VLAN	0x04 /* VF requests PF to set VLAN */
-
-/* mailbox API, version 1.0 VF requests */
-#define IXGBE_VF_SET_LPE	0x05 /* VF requests PF to set VMOLR.LPE */
-#define IXGBE_VF_SET_MACVLAN	0x06 /* VF requests PF for unicast filter */
-#define IXGBE_VF_API_NEGOTIATE	0x08 /* negotiate API version */
-
-/* mailbox API, version 1.1 VF requests */
-#define IXGBE_VF_GET_QUEUES	0x09 /* get queue configuration */
-
-/* GET_QUEUES return data indices within the mailbox */
-#define IXGBE_VF_TX_QUEUES	1	/* number of Tx queues supported */
-#define IXGBE_VF_RX_QUEUES	2	/* number of Rx queues supported */
-#define IXGBE_VF_TRANS_VLAN	3	/* Indication of port vlan */
-#define IXGBE_VF_DEF_QUEUE	4	/* Default queue offset */
-
-/* length of permanent address message returned from PF */
-#define IXGBE_VF_PERMADDR_MSG_LEN	4
-/* word in permanent address message with the current multicast type */
-#define IXGBE_VF_MC_TYPE_WORD		3
-
-#define IXGBE_PF_CONTROL_MSG		0x0100 /* PF control message */
-
-/* mailbox API, version 2.0 VF requests */
-#define IXGBE_VF_API_NEGOTIATE		0x08 /* negotiate API version */
-#define IXGBE_VF_GET_QUEUES		0x09 /* get queue configuration */
-#define IXGBE_VF_ENABLE_MACADDR		0x0A /* enable MAC address */
-#define IXGBE_VF_DISABLE_MACADDR	0x0B /* disable MAC address */
-#define IXGBE_VF_GET_MACADDRS		0x0C /* get all configured MAC addrs */
-#define IXGBE_VF_SET_MCAST_PROMISC	0x0D /* enable multicast promiscuous */
-#define IXGBE_VF_GET_MTU		0x0E /* get bounds on MTU */
-#define IXGBE_VF_SET_MTU		0x0F /* set a specific MTU */
-
-/* mailbox API, version 2.0 PF requests */
-#define IXGBE_PF_TRANSPARENT_VLAN	0x0101 /* enable transparent vlan */
-
-#define IXGBE_VF_MBX_INIT_TIMEOUT	2000 /* number of retries on mailbox */
-#define IXGBE_VF_MBX_INIT_DELAY		500  /* microseconds between retries */
-
-s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
-s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
-s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
-void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
-void ixgbe_init_mbx_params_vf(struct ixgbe_hw *);
-void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
-
-#endif /* _IXGBE_MBX_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h
deleted file mode 100644
index 4fb8dd7..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h
+++ /dev/null
@@ -1,155 +0,0 @@ 
-/******************************************************************************
-
-  Copyright (c) 2001-2014, Intel Corporation 
-  All rights reserved.
-  
-  Redistribution and use in source and binary forms, with or without 
-  modification, are permitted provided that the following conditions are met:
-  
-   1. Redistributions of source code must retain the above copyright notice, 
-      this list of conditions and the following disclaimer.
-  
-   2. Redistributions in binary form must reproduce the above copyright 
-      notice, this list of conditions and the following disclaimer in the 
-      documentation and/or other materials provided with the distribution.
-  
-   3. Neither the name of the Intel Corporation nor the names of its 
-      contributors may be used to endorse or promote products derived from 
-      this software without specific prior written permission.
-  
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
-  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
-  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
-  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
-  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
-  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
-  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-  POSSIBILITY OF SUCH DAMAGE.
-
-******************************************************************************/
-/*$FreeBSD$*/
-
-#ifndef _IXGBE_OS_H_
-#define _IXGBE_OS_H_
-
-#include <string.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdarg.h>
-#include <rte_common.h>
-#include <rte_debug.h>
-#include <rte_cycles.h>
-#include <rte_log.h>
-#include <rte_byteorder.h>
-
-#include "../ixgbe_logs.h"
-#include "../ixgbe_bypass_defines.h"
-
-#define ASSERT(x) if(!(x)) rte_panic("IXGBE: x")
-
-#define DELAY(x) rte_delay_us(x)
-#define usec_delay(x) DELAY(x)
-#define msec_delay(x) DELAY(1000*(x))
-
-#define DEBUGFUNC(F)            DEBUGOUT(F "\n");
-#define DEBUGOUT(S, args...)    PMD_DRV_LOG_RAW(DEBUG, S, ##args)
-#define DEBUGOUT1(S, args...)   DEBUGOUT(S, ##args)
-#define DEBUGOUT2(S, args...)   DEBUGOUT(S, ##args)
-#define DEBUGOUT3(S, args...)   DEBUGOUT(S, ##args)
-#define DEBUGOUT6(S, args...)   DEBUGOUT(S, ##args)
-#define DEBUGOUT7(S, args...)   DEBUGOUT(S, ##args)
-
-#define ERROR_REPORT1(e, S, args...)   DEBUGOUT(S, ##args)
-#define ERROR_REPORT2(e, S, args...)   DEBUGOUT(S, ##args)
-#define ERROR_REPORT3(e, S, args...)   DEBUGOUT(S, ##args)
-
-#define FALSE               0
-#define TRUE                1
-
-#define false               0
-#define true                1
-#define min(a,b)	RTE_MIN(a,b) 
-
-#define EWARN(hw, S, args...)     DEBUGOUT1(S, ##args)
-
-/* Bunch of defines for shared code bogosity */
-#define UNREFERENCED_PARAMETER(_p)  
-#define UNREFERENCED_1PARAMETER(_p) 
-#define UNREFERENCED_2PARAMETER(_p, _q)
-#define UNREFERENCED_3PARAMETER(_p, _q, _r) 
-#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) 
-
-/* Shared code error reporting */
-enum {
-	IXGBE_ERROR_SOFTWARE,
-	IXGBE_ERROR_POLLING,
-	IXGBE_ERROR_INVALID_STATE,
-	IXGBE_ERROR_UNSUPPORTED,
-	IXGBE_ERROR_ARGUMENT,
-	IXGBE_ERROR_CAUTION,
-};
-
-#define STATIC static
-#define IXGBE_NTOHL(_i)	rte_be_to_cpu_32(_i)
-#define IXGBE_NTOHS(_i)	rte_be_to_cpu_16(_i)
-#define IXGBE_CPU_TO_LE32(_i)  rte_cpu_to_le_32(_i)
-#define IXGBE_LE32_TO_CPUS(_i) rte_le_to_cpu_32(_i)
-#define IXGBE_CPU_TO_BE16(_i)  rte_cpu_to_be_16(_i)
-#define IXGBE_CPU_TO_BE32(_i)  rte_cpu_to_be_32(_i)
-
-typedef uint8_t		u8;
-typedef int8_t		s8;
-typedef uint16_t	u16;
-typedef int16_t		s16;
-typedef uint32_t	u32;
-typedef int32_t		s32;
-typedef uint64_t	u64;
-typedef int		bool;
-
-#define mb()	rte_mb()
-#define wmb()	rte_wmb()
-#define rmb()	rte_rmb()
-
-#define IOMEM
-
-#define prefetch(x) rte_prefetch0(x)
-
-#define IXGBE_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
-
-static inline uint32_t ixgbe_read_addr(volatile void* addr)
-{
-	return rte_le_to_cpu_32(IXGBE_PCI_REG(addr));
-}
-
-#define IXGBE_PCI_REG_WRITE(reg, value) do { \
-	IXGBE_PCI_REG((reg)) = (rte_cpu_to_le_32(value)); \
-} while(0)
-
-#define IXGBE_PCI_REG_ADDR(hw, reg) \
-	((volatile uint32_t *)((char *)(hw)->hw_addr + (reg)))
-
-#define IXGBE_PCI_REG_ARRAY_ADDR(hw, reg, index) \
-	IXGBE_PCI_REG_ADDR((hw), (reg) + ((index) << 2))
-
-/* Not implemented !! */
-#define IXGBE_READ_PCIE_WORD(hw, reg) 0	
-#define IXGBE_WRITE_PCIE_WORD(hw, reg, value) do { } while(0)
-
-#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
-
-#define IXGBE_READ_REG(hw, reg) \
-	ixgbe_read_addr(IXGBE_PCI_REG_ADDR((hw), (reg)))
-
-#define IXGBE_WRITE_REG(hw, reg, value) \
-	IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ADDR((hw), (reg)), (value))
-
-#define IXGBE_READ_REG_ARRAY(hw, reg, index) \
-	IXGBE_PCI_REG(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index)))
-
-#define IXGBE_WRITE_REG_ARRAY(hw, reg, index, value) \
-	IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value))
-
-#endif /* _IXGBE_OS_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.c
deleted file mode 100644
index 4a3463a..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.c
+++ /dev/null
@@ -1,2583 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#include "ixgbe_api.h"
-#include "ixgbe_common.h"
-#include "ixgbe_phy.h"
-
-STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw);
-STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw);
-STATIC s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
-STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
-STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
-STATIC s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
-STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
-STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
-STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
-STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
-STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl);
-STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
-					  u8 *sff8472_data);
-
-/**
- * ixgbe_out_i2c_byte_ack - Send I2C byte with ack
- * @hw: pointer to the hardware structure
- * @byte: byte to send
- *
- * Returns an error code on error.
- */
-STATIC s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
-{
-	s32 status;
-
-	status = ixgbe_clock_out_i2c_byte(hw, byte);
-	if (status)
-		return status;
-	return ixgbe_get_i2c_ack(hw);
-}
-
-/**
- * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack
- * @hw: pointer to the hardware structure
- * @byte: pointer to a u8 to receive the byte
- *
- * Returns an error code on error.
- */
-STATIC s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
-{
-	s32 status;
-
-	status = ixgbe_clock_in_i2c_byte(hw, byte);
-	if (status)
-		return status;
-	/* ACK */
-	return ixgbe_clock_out_i2c_bit(hw, false);
-}
-
-/**
- * ixgbe_ones_comp_byte_add - Perform one's complement addition
- * @add1 - addend 1
- * @add2 - addend 2
- *
- * Returns one's complement 8-bit sum.
- */
-STATIC u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
-{
-	u16 sum = add1 + add2;
-
-	sum = (sum & 0xFF) + (sum >> 8);
-	return sum & 0xFF;
-}
-
-/**
- * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
- * @hw: pointer to the hardware structure
- * @addr: I2C bus address to read from
- * @reg: I2C device register to read from
- * @val: pointer to location to receive read value
- *
- * Returns an error code on error.
- */
-STATIC s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
-					   u16 reg, u16 *val)
-{
-	u32 swfw_mask = hw->phy.phy_semaphore_mask;
-	int max_retry = 10;
-	int retry = 0;
-	u8 csum_byte;
-	u8 high_bits;
-	u8 low_bits;
-	u8 reg_high;
-	u8 csum;
-
-	reg_high = ((reg >> 7) & 0xFE) | 1;	/* Indicate read combined */
-	csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
-	csum = ~csum;
-	do {
-		if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
-			return IXGBE_ERR_SWFW_SYNC;
-		ixgbe_i2c_start(hw);
-		/* Device Address and write indication */
-		if (ixgbe_out_i2c_byte_ack(hw, addr))
-			goto fail;
-		/* Write bits 14:8 */
-		if (ixgbe_out_i2c_byte_ack(hw, reg_high))
-			goto fail;
-		/* Write bits 7:0 */
-		if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
-			goto fail;
-		/* Write csum */
-		if (ixgbe_out_i2c_byte_ack(hw, csum))
-			goto fail;
-		/* Re-start condition */
-		ixgbe_i2c_start(hw);
-		/* Device Address and read indication */
-		if (ixgbe_out_i2c_byte_ack(hw, addr | 1))
-			goto fail;
-		/* Get upper bits */
-		if (ixgbe_in_i2c_byte_ack(hw, &high_bits))
-			goto fail;
-		/* Get low bits */
-		if (ixgbe_in_i2c_byte_ack(hw, &low_bits))
-			goto fail;
-		/* Get csum */
-		if (ixgbe_clock_in_i2c_byte(hw, &csum_byte))
-			goto fail;
-		/* NACK */
-		if (ixgbe_clock_out_i2c_bit(hw, false))
-			goto fail;
-		ixgbe_i2c_stop(hw);
-		hw->mac.ops.release_swfw_sync(hw, swfw_mask);
-		*val = (high_bits << 8) | low_bits;
-		return 0;
-
-fail:
-		ixgbe_i2c_bus_clear(hw);
-		hw->mac.ops.release_swfw_sync(hw, swfw_mask);
-		retry++;
-		if (retry < max_retry)
-			DEBUGOUT("I2C byte read combined error - Retrying.\n");
-		else
-			DEBUGOUT("I2C byte read combined error.\n");
-	} while (retry < max_retry);
-
-	return IXGBE_ERR_I2C;
-}
-
-/**
- * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
- * @hw: pointer to the hardware structure
- * @addr: I2C bus address to write to
- * @reg: I2C device register to write to
- * @val: value to write
- *
- * Returns an error code on error.
- */
-STATIC s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
-					    u8 addr, u16 reg, u16 val)
-{
-	int max_retry = 1;
-	int retry = 0;
-	u8 reg_high;
-	u8 csum;
-
-	reg_high = (reg >> 7) & 0xFE;	/* Indicate write combined */
-	csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
-	csum = ixgbe_ones_comp_byte_add(csum, val >> 8);
-	csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
-	csum = ~csum;
-	do {
-		ixgbe_i2c_start(hw);
-		/* Device Address and write indication */
-		if (ixgbe_out_i2c_byte_ack(hw, addr))
-			goto fail;
-		/* Write bits 14:8 */
-		if (ixgbe_out_i2c_byte_ack(hw, reg_high))
-			goto fail;
-		/* Write bits 7:0 */
-		if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
-			goto fail;
-		/* Write data 15:8 */
-		if (ixgbe_out_i2c_byte_ack(hw, val >> 8))
-			goto fail;
-		/* Write data 7:0 */
-		if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF))
-			goto fail;
-		/* Write csum */
-		if (ixgbe_out_i2c_byte_ack(hw, csum))
-			goto fail;
-		ixgbe_i2c_stop(hw);
-		return 0;
-
-fail:
-		ixgbe_i2c_bus_clear(hw);
-		retry++;
-		if (retry < max_retry)
-			DEBUGOUT("I2C byte write combined error - Retrying.\n");
-		else
-			DEBUGOUT("I2C byte write combined error.\n");
-	} while (retry < max_retry);
-
-	return IXGBE_ERR_I2C;
-}
-
-/**
- *  ixgbe_init_phy_ops_generic - Inits PHY function ptrs
- *  @hw: pointer to the hardware structure
- *
- *  Initialize the function pointers.
- **/
-s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
-{
-	struct ixgbe_phy_info *phy = &hw->phy;
-
-	DEBUGFUNC("ixgbe_init_phy_ops_generic");
-
-	/* PHY */
-	phy->ops.identify = ixgbe_identify_phy_generic;
-	phy->ops.reset = ixgbe_reset_phy_generic;
-	phy->ops.read_reg = ixgbe_read_phy_reg_generic;
-	phy->ops.write_reg = ixgbe_write_phy_reg_generic;
-	phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi;
-	phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi;
-	phy->ops.setup_link = ixgbe_setup_phy_link_generic;
-	phy->ops.setup_link_speed = ixgbe_setup_phy_link_speed_generic;
-	phy->ops.check_link = NULL;
-	phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic;
-	phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_generic;
-	phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_generic;
-	phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_generic;
-	phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_generic;
-	phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_generic;
-	phy->ops.i2c_bus_clear = ixgbe_i2c_bus_clear;
-	phy->ops.identify_sfp = ixgbe_identify_module_generic;
-	phy->sfp_type = ixgbe_sfp_type_unknown;
-	phy->ops.read_i2c_combined = ixgbe_read_i2c_combined_generic;
-	phy->ops.write_i2c_combined = ixgbe_write_i2c_combined_generic;
-	phy->ops.check_overtemp = ixgbe_tn_check_overtemp;
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_identify_phy_generic - Get physical layer module
- *  @hw: pointer to hardware structure
- *
- *  Determines the physical layer module found on the current adapter.
- **/
-s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
-{
-	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
-	u32 phy_addr;
-	u16 ext_ability = 0;
-
-	DEBUGFUNC("ixgbe_identify_phy_generic");
-
-	if (!hw->phy.phy_semaphore_mask) {
-		if (hw->bus.lan_id)
-			hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
-		else
-			hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
-	}
-
-	if (hw->phy.type == ixgbe_phy_unknown) {
-		for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
-			if (ixgbe_validate_phy_addr(hw, phy_addr)) {
-				hw->phy.addr = phy_addr;
-				ixgbe_get_phy_id(hw);
-				hw->phy.type =
-					ixgbe_get_phy_type_from_id(hw->phy.id);
-
-				if (hw->phy.type == ixgbe_phy_unknown) {
-					hw->phy.ops.read_reg(hw,
-						  IXGBE_MDIO_PHY_EXT_ABILITY,
-						  IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-						  &ext_ability);
-					if (ext_ability &
-					    (IXGBE_MDIO_PHY_10GBASET_ABILITY |
-					     IXGBE_MDIO_PHY_1000BASET_ABILITY))
-						hw->phy.type =
-							 ixgbe_phy_cu_unknown;
-					else
-						hw->phy.type =
-							 ixgbe_phy_generic;
-				}
-
-				status = IXGBE_SUCCESS;
-				break;
-			}
-		}
-
-		/* Certain media types do not have a phy so an address will not
-		 * be found and the code will take this path.  Caller has to
-		 * decide if it is an error or not.
-		 */
-		if (status != IXGBE_SUCCESS) {
-			hw->phy.addr = 0;
-		}
-	} else {
-		status = IXGBE_SUCCESS;
-	}
-
-	return status;
-}
-
-/**
- * ixgbe_check_reset_blocked - check status of MNG FW veto bit
- * @hw: pointer to the hardware structure
- *
- * This function checks the MMNGC.MNG_VETO bit to see if there are
- * any constraints on link from manageability.  For MAC's that don't
- * have this bit just return faluse since the link can not be blocked
- * via this method.
- **/
-s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
-{
-	u32 mmngc;
-
-	DEBUGFUNC("ixgbe_check_reset_blocked");
-
-	/* If we don't have this bit, it can't be blocking */
-	if (hw->mac.type == ixgbe_mac_82598EB)
-		return false;
-
-	mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC);
-	if (mmngc & IXGBE_MMNGC_MNG_VETO) {
-		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
-			      "MNG_VETO bit detected.\n");
-		return true;
-	}
-
-	return false;
-}
-
-/**
- *  ixgbe_validate_phy_addr - Determines phy address is valid
- *  @hw: pointer to hardware structure
- *
- **/
-bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
-{
-	u16 phy_id = 0;
-	bool valid = false;
-
-	DEBUGFUNC("ixgbe_validate_phy_addr");
-
-	hw->phy.addr = phy_addr;
-	hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
-			     IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
-
-	if (phy_id != 0xFFFF && phy_id != 0x0)
-		valid = true;
-
-	return valid;
-}
-
-/**
- *  ixgbe_get_phy_id - Get the phy type
- *  @hw: pointer to hardware structure
- *
- **/
-s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
-{
-	u32 status;
-	u16 phy_id_high = 0;
-	u16 phy_id_low = 0;
-
-	DEBUGFUNC("ixgbe_get_phy_id");
-
-	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
-				      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-				      &phy_id_high);
-
-	if (status == IXGBE_SUCCESS) {
-		hw->phy.id = (u32)(phy_id_high << 16);
-		status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW,
-					      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-					      &phy_id_low);
-		hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
-		hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
-	}
-	return status;
-}
-
-/**
- *  ixgbe_get_phy_type_from_id - Get the phy type
- *  @hw: pointer to hardware structure
- *
- **/
-enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
-{
-	enum ixgbe_phy_type phy_type;
-
-	DEBUGFUNC("ixgbe_get_phy_type_from_id");
-
-	switch (phy_id) {
-	case TN1010_PHY_ID:
-		phy_type = ixgbe_phy_tn;
-		break;
-	case X550_PHY_ID:
-	case X540_PHY_ID:
-		phy_type = ixgbe_phy_aq;
-		break;
-	case QT2022_PHY_ID:
-		phy_type = ixgbe_phy_qt;
-		break;
-	case ATH_PHY_ID:
-		phy_type = ixgbe_phy_nl;
-		break;
-	case X557_PHY_ID:
-		phy_type = ixgbe_phy_x550em_ext_t;
-		break;
-	default:
-		phy_type = ixgbe_phy_unknown;
-		break;
-	}
-
-	DEBUGOUT1("phy type found is %d\n", phy_type);
-	return phy_type;
-}
-
-/**
- *  ixgbe_reset_phy_generic - Performs a PHY reset
- *  @hw: pointer to hardware structure
- **/
-s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
-{
-	u32 i;
-	u16 ctrl = 0;
-	s32 status = IXGBE_SUCCESS;
-
-	DEBUGFUNC("ixgbe_reset_phy_generic");
-
-	if (hw->phy.type == ixgbe_phy_unknown)
-		status = ixgbe_identify_phy_generic(hw);
-
-	if (status != IXGBE_SUCCESS || hw->phy.type == ixgbe_phy_none)
-		goto out;
-
-	/* Don't reset PHY if it's shut down due to overtemp. */
-	if (!hw->phy.reset_if_overtemp &&
-	    (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
-		goto out;
-
-	/* Blocked by MNG FW so bail */
-	if (ixgbe_check_reset_blocked(hw))
-		goto out;
-
-	/*
-	 * Perform soft PHY reset to the PHY_XS.
-	 * This will cause a soft reset to the PHY
-	 */
-	hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
-			      IXGBE_MDIO_PHY_XS_DEV_TYPE,
-			      IXGBE_MDIO_PHY_XS_RESET);
-
-	/*
-	 * Poll for reset bit to self-clear indicating reset is complete.
-	 * Some PHYs could take up to 3 seconds to complete and need about
-	 * 1.7 usec delay after the reset is complete.
-	 */
-	for (i = 0; i < 30; i++) {
-		msec_delay(100);
-		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
-				     IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl);
-		if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
-			usec_delay(2);
-			break;
-		}
-	}
-
-	if (ctrl & IXGBE_MDIO_PHY_XS_RESET) {
-		status = IXGBE_ERR_RESET_FAILED;
-		ERROR_REPORT1(IXGBE_ERROR_POLLING,
-			     "PHY reset polling failed to complete.\n");
-	}
-
-out:
-	return status;
-}
-
-/**
- *  ixgbe_read_phy_mdi - Reads a value from a specified PHY register without
- *  the SWFW lock
- *  @hw: pointer to hardware structure
- *  @reg_addr: 32 bit address of PHY register to read
- *  @phy_data: Pointer to read data from PHY register
- **/
-s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
-		       u16 *phy_data)
-{
-	u32 i, data, command;
-
-	/* Setup and write the address cycle command */
-	command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
-		   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
-		   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
-		   (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
-
-	IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
-	/*
-	 * Check every 10 usec to see if the address cycle completed.
-	 * The MDI Command bit will clear when the operation is
-	 * complete
-	 */
-	for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
-		usec_delay(10);
-
-		command = IXGBE_READ_REG(hw, IXGBE_MSCA);
-		if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
-				break;
-	}
-
-
-	if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
-		ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address command did not complete.\n");
-		return IXGBE_ERR_PHY;
-	}
-
-	/*
-	 * Address cycle complete, setup and write the read
-	 * command
-	 */
-	command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
-		   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
-		   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
-		   (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
-
-	IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
-	/*
-	 * Check every 10 usec to see if the address cycle
-	 * completed. The MDI Command bit will clear when the
-	 * operation is complete
-	 */
-	for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
-		usec_delay(10);
-
-		command = IXGBE_READ_REG(hw, IXGBE_MSCA);
-		if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
-			break;
-	}
-
-	if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
-		ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY read command didn't complete\n");
-		return IXGBE_ERR_PHY;
-	}
-
-	/*
-	 * Read operation is complete.  Get the data
-	 * from MSRWD
-	 */
-	data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
-	data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
-	*phy_data = (u16)(data);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
- *  using the SWFW lock - this function is needed in most cases
- *  @hw: pointer to hardware structure
- *  @reg_addr: 32 bit address of PHY register to read
- *  @phy_data: Pointer to read data from PHY register
- **/
-s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
-			       u32 device_type, u16 *phy_data)
-{
-	s32 status;
-	u32 gssr = hw->phy.phy_semaphore_mask;
-
-	DEBUGFUNC("ixgbe_read_phy_reg_generic");
-
-	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) {
-		status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
-						phy_data);
-		hw->mac.ops.release_swfw_sync(hw, gssr);
-	} else {
-		status = IXGBE_ERR_SWFW_SYNC;
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register
- *  without SWFW lock
- *  @hw: pointer to hardware structure
- *  @reg_addr: 32 bit PHY register to write
- *  @device_type: 5 bit device type
- *  @phy_data: Data to write to the PHY register
- **/
-s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
-				u32 device_type, u16 phy_data)
-{
-	u32 i, command;
-
-	/* Put the data in the MDI single read and write data register*/
-	IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
-
-	/* Setup and write the address cycle command */
-	command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
-		   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
-		   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
-		   (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
-
-	IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
-	/*
-	 * Check every 10 usec to see if the address cycle completed.
-	 * The MDI Command bit will clear when the operation is
-	 * complete
-	 */
-	for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
-		usec_delay(10);
-
-		command = IXGBE_READ_REG(hw, IXGBE_MSCA);
-		if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
-			break;
-	}
-
-	if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
-		ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address cmd didn't complete\n");
-		return IXGBE_ERR_PHY;
-	}
-
-	/*
-	 * Address cycle complete, setup and write the write
-	 * command
-	 */
-	command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
-		   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
-		   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
-		   (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
-
-	IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
-	/*
-	 * Check every 10 usec to see if the address cycle
-	 * completed. The MDI Command bit will clear when the
-	 * operation is complete
-	 */
-	for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
-		usec_delay(10);
-
-		command = IXGBE_READ_REG(hw, IXGBE_MSCA);
-		if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
-			break;
-	}
-
-	if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
-		ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY write cmd didn't complete\n");
-		return IXGBE_ERR_PHY;
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
- *  using SWFW lock- this function is needed in most cases
- *  @hw: pointer to hardware structure
- *  @reg_addr: 32 bit PHY register to write
- *  @device_type: 5 bit device type
- *  @phy_data: Data to write to the PHY register
- **/
-s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
-				u32 device_type, u16 phy_data)
-{
-	s32 status;
-	u32 gssr = hw->phy.phy_semaphore_mask;
-
-	DEBUGFUNC("ixgbe_write_phy_reg_generic");
-
-	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) {
-		status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
-						 phy_data);
-		hw->mac.ops.release_swfw_sync(hw, gssr);
-	} else {
-		status = IXGBE_ERR_SWFW_SYNC;
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_setup_phy_link_generic - Set and restart auto-neg
- *  @hw: pointer to hardware structure
- *
- *  Restart auto-negotiation and PHY and waits for completion.
- **/
-s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
-{
-	s32 status = IXGBE_SUCCESS;
-	u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
-	bool autoneg = false;
-	ixgbe_link_speed speed;
-
-	DEBUGFUNC("ixgbe_setup_phy_link_generic");
-
-	ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
-
-	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
-		/* Set or unset auto-negotiation 10G advertisement */
-		hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
-				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-				     &autoneg_reg);
-
-		autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
-		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
-			autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
-
-		hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
-				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-				      autoneg_reg);
-	}
-
-	if (hw->mac.type == ixgbe_mac_X550) {
-		if (speed & IXGBE_LINK_SPEED_5GB_FULL) {
-			/* Set or unset auto-negotiation 1G advertisement */
-			hw->phy.ops.read_reg(hw,
-				IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
-				IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-				&autoneg_reg);
-
-			autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
-			if (hw->phy.autoneg_advertised &
-			     IXGBE_LINK_SPEED_5GB_FULL)
-				autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
-
-			hw->phy.ops.write_reg(hw,
-				IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
-				IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-				autoneg_reg);
-		}
-
-		if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) {
-			/* Set or unset auto-negotiation 1G advertisement */
-			hw->phy.ops.read_reg(hw,
-				IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
-				IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-				&autoneg_reg);
-
-			autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
-			if (hw->phy.autoneg_advertised &
-			    IXGBE_LINK_SPEED_2_5GB_FULL)
-				autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
-
-			hw->phy.ops.write_reg(hw,
-				IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
-				IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-				autoneg_reg);
-		}
-	}
-
-	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
-		/* Set or unset auto-negotiation 1G advertisement */
-		hw->phy.ops.read_reg(hw,
-				     IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
-				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-				     &autoneg_reg);
-
-		autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
-		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
-			autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
-
-		hw->phy.ops.write_reg(hw,
-				      IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
-				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-				      autoneg_reg);
-	}
-
-	if (speed & IXGBE_LINK_SPEED_100_FULL) {
-		/* Set or unset auto-negotiation 100M advertisement */
-		hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
-				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-				     &autoneg_reg);
-
-		autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE |
-				 IXGBE_MII_100BASE_T_ADVERTISE_HALF);
-		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
-			autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
-
-		hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
-				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-				      autoneg_reg);
-	}
-
-	/* Blocked by MNG FW so don't reset PHY */
-	if (ixgbe_check_reset_blocked(hw))
-		return status;
-
-	/* Restart PHY auto-negotiation. */
-	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
-			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
-
-	autoneg_reg |= IXGBE_MII_RESTART;
-
-	hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
-			      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
-
-	return status;
-}
-
-/**
- *  ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
- *  @hw: pointer to hardware structure
- *  @speed: new link speed
- **/
-s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
-				       ixgbe_link_speed speed,
-				       bool autoneg_wait_to_complete)
-{
-	UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
-
-	DEBUGFUNC("ixgbe_setup_phy_link_speed_generic");
-
-	/*
-	 * Clear autoneg_advertised and set new values based on input link
-	 * speed.
-	 */
-	hw->phy.autoneg_advertised = 0;
-
-	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
-		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
-
-	if (speed & IXGBE_LINK_SPEED_5GB_FULL)
-		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL;
-
-	if (speed & IXGBE_LINK_SPEED_2_5GB_FULL)
-		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
-
-	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
-		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
-
-	if (speed & IXGBE_LINK_SPEED_100_FULL)
-		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
-
-	/* Setup link based on the new speed settings */
-	hw->phy.ops.setup_link(hw);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
- *  @hw: pointer to hardware structure
- *  @speed: pointer to link speed
- *  @autoneg: boolean auto-negotiation value
- *
- *  Determines the supported link capabilities by reading the PHY auto
- *  negotiation register.
- **/
-s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
-					       ixgbe_link_speed *speed,
-					       bool *autoneg)
-{
-	s32 status;
-	u16 speed_ability;
-
-	DEBUGFUNC("ixgbe_get_copper_link_capabilities_generic");
-
-	*speed = 0;
-	*autoneg = true;
-
-	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
-				      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-				      &speed_ability);
-
-	if (status == IXGBE_SUCCESS) {
-		if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
-			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
-		if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G)
-			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
-		if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M)
-			*speed |= IXGBE_LINK_SPEED_100_FULL;
-	}
-
-	/* Internal PHY does not support 100 Mbps */
-	if (hw->mac.type == ixgbe_mac_X550EM_x)
-		*speed &= ~IXGBE_LINK_SPEED_100_FULL;
-
-	if (hw->mac.type == ixgbe_mac_X550) {
-		*speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
-		*speed |= IXGBE_LINK_SPEED_5GB_FULL;
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_check_phy_link_tnx - Determine link and speed status
- *  @hw: pointer to hardware structure
- *
- *  Reads the VS1 register to determine if link is up and the current speed for
- *  the PHY.
- **/
-s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
-			     bool *link_up)
-{
-	s32 status = IXGBE_SUCCESS;
-	u32 time_out;
-	u32 max_time_out = 10;
-	u16 phy_link = 0;
-	u16 phy_speed = 0;
-	u16 phy_data = 0;
-
-	DEBUGFUNC("ixgbe_check_phy_link_tnx");
-
-	/* Initialize speed and link to default case */
-	*link_up = false;
-	*speed = IXGBE_LINK_SPEED_10GB_FULL;
-
-	/*
-	 * Check current speed and link status of the PHY register.
-	 * This is a vendor specific register and may have to
-	 * be changed for other copper PHYs.
-	 */
-	for (time_out = 0; time_out < max_time_out; time_out++) {
-		usec_delay(10);
-		status = hw->phy.ops.read_reg(hw,
-					IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
-					IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
-					&phy_data);
-		phy_link = phy_data & IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
-		phy_speed = phy_data &
-				 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
-		if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
-			*link_up = true;
-			if (phy_speed ==
-			    IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
-				*speed = IXGBE_LINK_SPEED_1GB_FULL;
-			break;
-		}
-	}
-
-	return status;
-}
-
-/**
- *	ixgbe_setup_phy_link_tnx - Set and restart auto-neg
- *	@hw: pointer to hardware structure
- *
- *	Restart auto-negotiation and PHY and waits for completion.
- **/
-s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
-{
-	s32 status = IXGBE_SUCCESS;
-	u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
-	bool autoneg = false;
-	ixgbe_link_speed speed;
-
-	DEBUGFUNC("ixgbe_setup_phy_link_tnx");
-
-	ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
-
-	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
-		/* Set or unset auto-negotiation 10G advertisement */
-		hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
-				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-				     &autoneg_reg);
-
-		autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
-		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
-			autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
-
-		hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
-				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-				      autoneg_reg);
-	}
-
-	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
-		/* Set or unset auto-negotiation 1G advertisement */
-		hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
-				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-				     &autoneg_reg);
-
-		autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
-		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
-			autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
-
-		hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
-				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-				      autoneg_reg);
-	}
-
-	if (speed & IXGBE_LINK_SPEED_100_FULL) {
-		/* Set or unset auto-negotiation 100M advertisement */
-		hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
-				     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-				     &autoneg_reg);
-
-		autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE;
-		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
-			autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
-
-		hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
-				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-				      autoneg_reg);
-	}
-
-	/* Blocked by MNG FW so don't reset PHY */
-	if (ixgbe_check_reset_blocked(hw))
-		return status;
-
-	/* Restart PHY auto-negotiation. */
-	hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
-			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
-
-	autoneg_reg |= IXGBE_MII_RESTART;
-
-	hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
-			      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
-
-	return status;
-}
-
-/**
- *  ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
- *  @hw: pointer to hardware structure
- *  @firmware_version: pointer to the PHY Firmware Version
- **/
-s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
-				       u16 *firmware_version)
-{
-	s32 status;
-
-	DEBUGFUNC("ixgbe_get_phy_firmware_version_tnx");
-
-	status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
-				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
-				      firmware_version);
-
-	return status;
-}
-
-/**
- *  ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
- *  @hw: pointer to hardware structure
- *  @firmware_version: pointer to the PHY Firmware Version
- **/
-s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
-					   u16 *firmware_version)
-{
-	s32 status;
-
-	DEBUGFUNC("ixgbe_get_phy_firmware_version_generic");
-
-	status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
-				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
-				      firmware_version);
-
-	return status;
-}
-
-/**
- *  ixgbe_reset_phy_nl - Performs a PHY reset
- *  @hw: pointer to hardware structure
- **/
-s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
-{
-	u16 phy_offset, control, eword, edata, block_crc;
-	bool end_data = false;
-	u16 list_offset, data_offset;
-	u16 phy_data = 0;
-	s32 ret_val = IXGBE_SUCCESS;
-	u32 i;
-
-	DEBUGFUNC("ixgbe_reset_phy_nl");
-
-	/* Blocked by MNG FW so bail */
-	if (ixgbe_check_reset_blocked(hw))
-		goto out;
-
-	hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
-			     IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
-
-	/* reset the PHY and poll for completion */
-	hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
-			      IXGBE_MDIO_PHY_XS_DEV_TYPE,
-			      (phy_data | IXGBE_MDIO_PHY_XS_RESET));
-
-	for (i = 0; i < 100; i++) {
-		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
-				     IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
-		if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0)
-			break;
-		msec_delay(10);
-	}
-
-	if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) {
-		DEBUGOUT("PHY reset did not complete.\n");
-		ret_val = IXGBE_ERR_PHY;
-		goto out;
-	}
-
-	/* Get init offsets */
-	ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
-						      &data_offset);
-	if (ret_val != IXGBE_SUCCESS)
-		goto out;
-
-	ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
-	data_offset++;
-	while (!end_data) {
-		/*
-		 * Read control word from PHY init contents offset
-		 */
-		ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
-		if (ret_val)
-			goto err_eeprom;
-		control = (eword & IXGBE_CONTROL_MASK_NL) >>
-			   IXGBE_CONTROL_SHIFT_NL;
-		edata = eword & IXGBE_DATA_MASK_NL;
-		switch (control) {
-		case IXGBE_DELAY_NL:
-			data_offset++;
-			DEBUGOUT1("DELAY: %d MS\n", edata);
-			msec_delay(edata);
-			break;
-		case IXGBE_DATA_NL:
-			DEBUGOUT("DATA:\n");
-			data_offset++;
-			ret_val = hw->eeprom.ops.read(hw, data_offset,
-						      &phy_offset);
-			if (ret_val)
-				goto err_eeprom;
-			data_offset++;
-			for (i = 0; i < edata; i++) {
-				ret_val = hw->eeprom.ops.read(hw, data_offset,
-							      &eword);
-				if (ret_val)
-					goto err_eeprom;
-				hw->phy.ops.write_reg(hw, phy_offset,
-						      IXGBE_TWINAX_DEV, eword);
-				DEBUGOUT2("Wrote %4.4x to %4.4x\n", eword,
-					  phy_offset);
-				data_offset++;
-				phy_offset++;
-			}
-			break;
-		case IXGBE_CONTROL_NL:
-			data_offset++;
-			DEBUGOUT("CONTROL:\n");
-			if (edata == IXGBE_CONTROL_EOL_NL) {
-				DEBUGOUT("EOL\n");
-				end_data = true;
-			} else if (edata == IXGBE_CONTROL_SOL_NL) {
-				DEBUGOUT("SOL\n");
-			} else {
-				DEBUGOUT("Bad control value\n");
-				ret_val = IXGBE_ERR_PHY;
-				goto out;
-			}
-			break;
-		default:
-			DEBUGOUT("Bad control type\n");
-			ret_val = IXGBE_ERR_PHY;
-			goto out;
-		}
-	}
-
-out:
-	return ret_val;
-
-err_eeprom:
-	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
-		      "eeprom read at offset %d failed", data_offset);
-	return IXGBE_ERR_PHY;
-}
-
-/**
- *  ixgbe_identify_module_generic - Identifies module type
- *  @hw: pointer to hardware structure
- *
- *  Determines HW type and calls appropriate function.
- **/
-s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
-{
-	s32 status = IXGBE_ERR_SFP_NOT_PRESENT;
-
-	DEBUGFUNC("ixgbe_identify_module_generic");
-
-	switch (hw->mac.ops.get_media_type(hw)) {
-	case ixgbe_media_type_fiber:
-		status = ixgbe_identify_sfp_module_generic(hw);
-		break;
-
-	case ixgbe_media_type_fiber_qsfp:
-		status = ixgbe_identify_qsfp_module_generic(hw);
-		break;
-
-	default:
-		hw->phy.sfp_type = ixgbe_sfp_type_not_present;
-		status = IXGBE_ERR_SFP_NOT_PRESENT;
-		break;
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_identify_sfp_module_generic - Identifies SFP modules
- *  @hw: pointer to hardware structure
- *
- *  Searches for and identifies the SFP module and assigns appropriate PHY type.
- **/
-s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
-{
-	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
-	u32 vendor_oui = 0;
-	enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
-	u8 identifier = 0;
-	u8 comp_codes_1g = 0;
-	u8 comp_codes_10g = 0;
-	u8 oui_bytes[3] = {0, 0, 0};
-	u8 cable_tech = 0;
-	u8 cable_spec = 0;
-	u16 enforce_sfp = 0;
-
-	DEBUGFUNC("ixgbe_identify_sfp_module_generic");
-
-	if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
-		hw->phy.sfp_type = ixgbe_sfp_type_not_present;
-		status = IXGBE_ERR_SFP_NOT_PRESENT;
-		goto out;
-	}
-
-	/* LAN ID is needed for I2C access */
-	hw->mac.ops.set_lan_id(hw);
-
-	status = hw->phy.ops.read_i2c_eeprom(hw,
-					     IXGBE_SFF_IDENTIFIER,
-					     &identifier);
-
-	if (status != IXGBE_SUCCESS)
-		goto err_read_i2c_eeprom;
-
-	if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
-		hw->phy.type = ixgbe_phy_sfp_unsupported;
-		status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-	} else {
-		status = hw->phy.ops.read_i2c_eeprom(hw,
-						     IXGBE_SFF_1GBE_COMP_CODES,
-						     &comp_codes_1g);
-
-		if (status != IXGBE_SUCCESS)
-			goto err_read_i2c_eeprom;
-
-		status = hw->phy.ops.read_i2c_eeprom(hw,
-						     IXGBE_SFF_10GBE_COMP_CODES,
-						     &comp_codes_10g);
-
-		if (status != IXGBE_SUCCESS)
-			goto err_read_i2c_eeprom;
-		status = hw->phy.ops.read_i2c_eeprom(hw,
-						     IXGBE_SFF_CABLE_TECHNOLOGY,
-						     &cable_tech);
-
-		if (status != IXGBE_SUCCESS)
-			goto err_read_i2c_eeprom;
-
-		 /* ID Module
-		  * =========
-		  * 0   SFP_DA_CU
-		  * 1   SFP_SR
-		  * 2   SFP_LR
-		  * 3   SFP_DA_CORE0 - 82599-specific
-		  * 4   SFP_DA_CORE1 - 82599-specific
-		  * 5   SFP_SR/LR_CORE0 - 82599-specific
-		  * 6   SFP_SR/LR_CORE1 - 82599-specific
-		  * 7   SFP_act_lmt_DA_CORE0 - 82599-specific
-		  * 8   SFP_act_lmt_DA_CORE1 - 82599-specific
-		  * 9   SFP_1g_cu_CORE0 - 82599-specific
-		  * 10  SFP_1g_cu_CORE1 - 82599-specific
-		  * 11  SFP_1g_sx_CORE0 - 82599-specific
-		  * 12  SFP_1g_sx_CORE1 - 82599-specific
-		  */
-		if (hw->mac.type == ixgbe_mac_82598EB) {
-			if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
-				hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
-			else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
-				hw->phy.sfp_type = ixgbe_sfp_type_sr;
-			else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
-				hw->phy.sfp_type = ixgbe_sfp_type_lr;
-			else
-				hw->phy.sfp_type = ixgbe_sfp_type_unknown;
-		} else {
-			if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
-				if (hw->bus.lan_id == 0)
-					hw->phy.sfp_type =
-						     ixgbe_sfp_type_da_cu_core0;
-				else
-					hw->phy.sfp_type =
-						     ixgbe_sfp_type_da_cu_core1;
-			} else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
-				hw->phy.ops.read_i2c_eeprom(
-						hw, IXGBE_SFF_CABLE_SPEC_COMP,
-						&cable_spec);
-				if (cable_spec &
-				    IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
-					if (hw->bus.lan_id == 0)
-						hw->phy.sfp_type =
-						ixgbe_sfp_type_da_act_lmt_core0;
-					else
-						hw->phy.sfp_type =
-						ixgbe_sfp_type_da_act_lmt_core1;
-				} else {
-					hw->phy.sfp_type =
-							ixgbe_sfp_type_unknown;
-				}
-			} else if (comp_codes_10g &
-				   (IXGBE_SFF_10GBASESR_CAPABLE |
-				    IXGBE_SFF_10GBASELR_CAPABLE)) {
-				if (hw->bus.lan_id == 0)
-					hw->phy.sfp_type =
-						      ixgbe_sfp_type_srlr_core0;
-				else
-					hw->phy.sfp_type =
-						      ixgbe_sfp_type_srlr_core1;
-			} else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
-				if (hw->bus.lan_id == 0)
-					hw->phy.sfp_type =
-						ixgbe_sfp_type_1g_cu_core0;
-				else
-					hw->phy.sfp_type =
-						ixgbe_sfp_type_1g_cu_core1;
-			} else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
-				if (hw->bus.lan_id == 0)
-					hw->phy.sfp_type =
-						ixgbe_sfp_type_1g_sx_core0;
-				else
-					hw->phy.sfp_type =
-						ixgbe_sfp_type_1g_sx_core1;
-			} else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) {
-				if (hw->bus.lan_id == 0)
-					hw->phy.sfp_type =
-						ixgbe_sfp_type_1g_lx_core0;
-				else
-					hw->phy.sfp_type =
-						ixgbe_sfp_type_1g_lx_core1;
-			} else {
-				hw->phy.sfp_type = ixgbe_sfp_type_unknown;
-			}
-		}
-
-		if (hw->phy.sfp_type != stored_sfp_type)
-			hw->phy.sfp_setup_needed = true;
-
-		/* Determine if the SFP+ PHY is dual speed or not. */
-		hw->phy.multispeed_fiber = false;
-		if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
-		   (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
-		   ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
-		   (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
-			hw->phy.multispeed_fiber = true;
-
-		/* Determine PHY vendor */
-		if (hw->phy.type != ixgbe_phy_nl) {
-			hw->phy.id = identifier;
-			status = hw->phy.ops.read_i2c_eeprom(hw,
-						    IXGBE_SFF_VENDOR_OUI_BYTE0,
-						    &oui_bytes[0]);
-
-			if (status != IXGBE_SUCCESS)
-				goto err_read_i2c_eeprom;
-
-			status = hw->phy.ops.read_i2c_eeprom(hw,
-						    IXGBE_SFF_VENDOR_OUI_BYTE1,
-						    &oui_bytes[1]);
-
-			if (status != IXGBE_SUCCESS)
-				goto err_read_i2c_eeprom;
-
-			status = hw->phy.ops.read_i2c_eeprom(hw,
-						    IXGBE_SFF_VENDOR_OUI_BYTE2,
-						    &oui_bytes[2]);
-
-			if (status != IXGBE_SUCCESS)
-				goto err_read_i2c_eeprom;
-
-			vendor_oui =
-			  ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
-			   (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
-			   (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
-
-			switch (vendor_oui) {
-			case IXGBE_SFF_VENDOR_OUI_TYCO:
-				if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
-					hw->phy.type =
-						    ixgbe_phy_sfp_passive_tyco;
-				break;
-			case IXGBE_SFF_VENDOR_OUI_FTL:
-				if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
-					hw->phy.type = ixgbe_phy_sfp_ftl_active;
-				else
-					hw->phy.type = ixgbe_phy_sfp_ftl;
-				break;
-			case IXGBE_SFF_VENDOR_OUI_AVAGO:
-				hw->phy.type = ixgbe_phy_sfp_avago;
-				break;
-			case IXGBE_SFF_VENDOR_OUI_INTEL:
-				hw->phy.type = ixgbe_phy_sfp_intel;
-				break;
-			default:
-				if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
-					hw->phy.type =
-						 ixgbe_phy_sfp_passive_unknown;
-				else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
-					hw->phy.type =
-						ixgbe_phy_sfp_active_unknown;
-				else
-					hw->phy.type = ixgbe_phy_sfp_unknown;
-				break;
-			}
-		}
-
-		/* Allow any DA cable vendor */
-		if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
-		    IXGBE_SFF_DA_ACTIVE_CABLE)) {
-			status = IXGBE_SUCCESS;
-			goto out;
-		}
-
-		/* Verify supported 1G SFP modules */
-		if (comp_codes_10g == 0 &&
-		    !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
-		      hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
-		      hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
-		      hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
-		      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
-		      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
-			hw->phy.type = ixgbe_phy_sfp_unsupported;
-			status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-			goto out;
-		}
-
-		/* Anything else 82598-based is supported */
-		if (hw->mac.type == ixgbe_mac_82598EB) {
-			status = IXGBE_SUCCESS;
-			goto out;
-		}
-
-		ixgbe_get_device_caps(hw, &enforce_sfp);
-		if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
-		    !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
-		      hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
-		      hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
-		      hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
-		      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
-		      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
-			/* Make sure we're a supported PHY type */
-			if (hw->phy.type == ixgbe_phy_sfp_intel) {
-				status = IXGBE_SUCCESS;
-			} else {
-				if (hw->allow_unsupported_sfp == true) {
-					EWARN(hw, "WARNING: Intel (R) Network "
-					      "Connections are quality tested "
-					      "using Intel (R) Ethernet Optics."
-					      " Using untested modules is not "
-					      "supported and may cause unstable"
-					      " operation or damage to the "
-					      "module or the adapter. Intel "
-					      "Corporation is not responsible "
-					      "for any harm caused by using "
-					      "untested modules.\n", status);
-					status = IXGBE_SUCCESS;
-				} else {
-					DEBUGOUT("SFP+ module not supported\n");
-					hw->phy.type =
-						ixgbe_phy_sfp_unsupported;
-					status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-				}
-			}
-		} else {
-			status = IXGBE_SUCCESS;
-		}
-	}
-
-out:
-	return status;
-
-err_read_i2c_eeprom:
-	hw->phy.sfp_type = ixgbe_sfp_type_not_present;
-	if (hw->phy.type != ixgbe_phy_nl) {
-		hw->phy.id = 0;
-		hw->phy.type = ixgbe_phy_unknown;
-	}
-	return IXGBE_ERR_SFP_NOT_PRESENT;
-}
-
-/**
- *  ixgbe_get_supported_phy_sfp_layer_generic - Returns physical layer type
- *  @hw: pointer to hardware structure
- *
- *  Determines physical layer capabilities of the current SFP.
- */
-s32 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw)
-{
-	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-	u8 comp_codes_10g = 0;
-	u8 comp_codes_1g = 0;
-
-	DEBUGFUNC("ixgbe_get_supported_phy_sfp_layer_generic");
-
-	hw->phy.ops.identify_sfp(hw);
-	if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
-		return physical_layer;
-
-	switch (hw->phy.type) {
-	case ixgbe_phy_sfp_passive_tyco:
-	case ixgbe_phy_sfp_passive_unknown:
-	case ixgbe_phy_qsfp_passive_unknown:
-		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
-		break;
-	case ixgbe_phy_sfp_ftl_active:
-	case ixgbe_phy_sfp_active_unknown:
-	case ixgbe_phy_qsfp_active_unknown:
-		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
-		break;
-	case ixgbe_phy_sfp_avago:
-	case ixgbe_phy_sfp_ftl:
-	case ixgbe_phy_sfp_intel:
-	case ixgbe_phy_sfp_unknown:
-		hw->phy.ops.read_i2c_eeprom(hw,
-		      IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
-		hw->phy.ops.read_i2c_eeprom(hw,
-		      IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
-		if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
-		else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
-		else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
-			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
-		else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
-			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
-		break;
-	case ixgbe_phy_qsfp_intel:
-	case ixgbe_phy_qsfp_unknown:
-		hw->phy.ops.read_i2c_eeprom(hw,
-		      IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g);
-		if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
-		else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
-			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
-		break;
-	default:
-		break;
-	}
-
-	return physical_layer;
-}
-
-/**
- *  ixgbe_identify_qsfp_module_generic - Identifies QSFP modules
- *  @hw: pointer to hardware structure
- *
- *  Searches for and identifies the QSFP module and assigns appropriate PHY type
- **/
-s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
-{
-	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
-	u32 vendor_oui = 0;
-	enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
-	u8 identifier = 0;
-	u8 comp_codes_1g = 0;
-	u8 comp_codes_10g = 0;
-	u8 oui_bytes[3] = {0, 0, 0};
-	u16 enforce_sfp = 0;
-	u8 connector = 0;
-	u8 cable_length = 0;
-	u8 device_tech = 0;
-	bool active_cable = false;
-
-	DEBUGFUNC("ixgbe_identify_qsfp_module_generic");
-
-	if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
-		hw->phy.sfp_type = ixgbe_sfp_type_not_present;
-		status = IXGBE_ERR_SFP_NOT_PRESENT;
-		goto out;
-	}
-
-	status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
-					     &identifier);
-
-	if (status != IXGBE_SUCCESS)
-		goto err_read_i2c_eeprom;
-
-	if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
-		hw->phy.type = ixgbe_phy_sfp_unsupported;
-		status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-		goto out;
-	}
-
-	hw->phy.id = identifier;
-
-	/* LAN ID is needed for sfp_type determination */
-	hw->mac.ops.set_lan_id(hw);
-
-	status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
-					     &comp_codes_10g);
-
-	if (status != IXGBE_SUCCESS)
-		goto err_read_i2c_eeprom;
-
-	status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP,
-					     &comp_codes_1g);
-
-	if (status != IXGBE_SUCCESS)
-		goto err_read_i2c_eeprom;
-
-	if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) {
-		hw->phy.type = ixgbe_phy_qsfp_passive_unknown;
-		if (hw->bus.lan_id == 0)
-			hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0;
-		else
-			hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1;
-	} else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
-				     IXGBE_SFF_10GBASELR_CAPABLE)) {
-		if (hw->bus.lan_id == 0)
-			hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0;
-		else
-			hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1;
-	} else {
-		if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE)
-			active_cable = true;
-
-		if (!active_cable) {
-			/* check for active DA cables that pre-date
-			 * SFF-8436 v3.6 */
-			hw->phy.ops.read_i2c_eeprom(hw,
-					IXGBE_SFF_QSFP_CONNECTOR,
-					&connector);
-
-			hw->phy.ops.read_i2c_eeprom(hw,
-					IXGBE_SFF_QSFP_CABLE_LENGTH,
-					&cable_length);
-
-			hw->phy.ops.read_i2c_eeprom(hw,
-					IXGBE_SFF_QSFP_DEVICE_TECH,
-					&device_tech);
-
-			if ((connector ==
-				     IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) &&
-			    (cable_length > 0) &&
-			    ((device_tech >> 4) ==
-				     IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL))
-				active_cable = true;
-		}
-
-		if (active_cable) {
-			hw->phy.type = ixgbe_phy_qsfp_active_unknown;
-			if (hw->bus.lan_id == 0)
-				hw->phy.sfp_type =
-						ixgbe_sfp_type_da_act_lmt_core0;
-			else
-				hw->phy.sfp_type =
-						ixgbe_sfp_type_da_act_lmt_core1;
-		} else {
-			/* unsupported module type */
-			hw->phy.type = ixgbe_phy_sfp_unsupported;
-			status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-			goto out;
-		}
-	}
-
-	if (hw->phy.sfp_type != stored_sfp_type)
-		hw->phy.sfp_setup_needed = true;
-
-	/* Determine if the QSFP+ PHY is dual speed or not. */
-	hw->phy.multispeed_fiber = false;
-	if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
-	   (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
-	   ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
-	   (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
-		hw->phy.multispeed_fiber = true;
-
-	/* Determine PHY vendor for optical modules */
-	if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
-			      IXGBE_SFF_10GBASELR_CAPABLE))  {
-		status = hw->phy.ops.read_i2c_eeprom(hw,
-					    IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0,
-					    &oui_bytes[0]);
-
-		if (status != IXGBE_SUCCESS)
-			goto err_read_i2c_eeprom;
-
-		status = hw->phy.ops.read_i2c_eeprom(hw,
-					    IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1,
-					    &oui_bytes[1]);
-
-		if (status != IXGBE_SUCCESS)
-			goto err_read_i2c_eeprom;
-
-		status = hw->phy.ops.read_i2c_eeprom(hw,
-					    IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2,
-					    &oui_bytes[2]);
-
-		if (status != IXGBE_SUCCESS)
-			goto err_read_i2c_eeprom;
-
-		vendor_oui =
-		  ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
-		   (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
-		   (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
-
-		if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL)
-			hw->phy.type = ixgbe_phy_qsfp_intel;
-		else
-			hw->phy.type = ixgbe_phy_qsfp_unknown;
-
-		ixgbe_get_device_caps(hw, &enforce_sfp);
-		if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
-			/* Make sure we're a supported PHY type */
-			if (hw->phy.type == ixgbe_phy_qsfp_intel) {
-				status = IXGBE_SUCCESS;
-			} else {
-				if (hw->allow_unsupported_sfp == true) {
-					EWARN(hw, "WARNING: Intel (R) Network "
-					      "Connections are quality tested "
-					      "using Intel (R) Ethernet Optics."
-					      " Using untested modules is not "
-					      "supported and may cause unstable"
-					      " operation or damage to the "
-					      "module or the adapter. Intel "
-					      "Corporation is not responsible "
-					      "for any harm caused by using "
-					      "untested modules.\n", status);
-					status = IXGBE_SUCCESS;
-				} else {
-					DEBUGOUT("QSFP module not supported\n");
-					hw->phy.type =
-						ixgbe_phy_sfp_unsupported;
-					status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-				}
-			}
-		} else {
-			status = IXGBE_SUCCESS;
-		}
-	}
-
-out:
-	return status;
-
-err_read_i2c_eeprom:
-	hw->phy.sfp_type = ixgbe_sfp_type_not_present;
-	hw->phy.id = 0;
-	hw->phy.type = ixgbe_phy_unknown;
-
-	return IXGBE_ERR_SFP_NOT_PRESENT;
-}
-
-
-/**
- *  ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
- *  @hw: pointer to hardware structure
- *  @list_offset: offset to the SFP ID list
- *  @data_offset: offset to the SFP data block
- *
- *  Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
- *  so it returns the offsets to the phy init sequence block.
- **/
-s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
-					u16 *list_offset,
-					u16 *data_offset)
-{
-	u16 sfp_id;
-	u16 sfp_type = hw->phy.sfp_type;
-
-	DEBUGFUNC("ixgbe_get_sfp_init_sequence_offsets");
-
-	if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
-		return IXGBE_ERR_SFP_NOT_SUPPORTED;
-
-	if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
-		return IXGBE_ERR_SFP_NOT_PRESENT;
-
-	if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
-	    (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
-		return IXGBE_ERR_SFP_NOT_SUPPORTED;
-
-	/*
-	 * Limiting active cables and 1G Phys must be initialized as
-	 * SR modules
-	 */
-	if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
-	    sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
-	    sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
-	    sfp_type == ixgbe_sfp_type_1g_sx_core0)
-		sfp_type = ixgbe_sfp_type_srlr_core0;
-	else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
-		 sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
-		 sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
-		 sfp_type == ixgbe_sfp_type_1g_sx_core1)
-		sfp_type = ixgbe_sfp_type_srlr_core1;
-
-	/* Read offset to PHY init contents */
-	if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
-		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
-			      "eeprom read at offset %d failed",
-			      IXGBE_PHY_INIT_OFFSET_NL);
-		return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
-	}
-
-	if ((!*list_offset) || (*list_offset == 0xFFFF))
-		return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
-
-	/* Shift offset to first ID word */
-	(*list_offset)++;
-
-	/*
-	 * Find the matching SFP ID in the EEPROM
-	 * and program the init sequence
-	 */
-	if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
-		goto err_phy;
-
-	while (sfp_id != IXGBE_PHY_INIT_END_NL) {
-		if (sfp_id == sfp_type) {
-			(*list_offset)++;
-			if (hw->eeprom.ops.read(hw, *list_offset, data_offset))
-				goto err_phy;
-			if ((!*data_offset) || (*data_offset == 0xFFFF)) {
-				DEBUGOUT("SFP+ module not supported\n");
-				return IXGBE_ERR_SFP_NOT_SUPPORTED;
-			} else {
-				break;
-			}
-		} else {
-			(*list_offset) += 2;
-			if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
-				goto err_phy;
-		}
-	}
-
-	if (sfp_id == IXGBE_PHY_INIT_END_NL) {
-		DEBUGOUT("No matching SFP+ module found\n");
-		return IXGBE_ERR_SFP_NOT_SUPPORTED;
-	}
-
-	return IXGBE_SUCCESS;
-
-err_phy:
-	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
-		      "eeprom read at offset %d failed", *list_offset);
-	return IXGBE_ERR_PHY;
-}
-
-/**
- *  ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface
- *  @hw: pointer to hardware structure
- *  @byte_offset: EEPROM byte offset to read
- *  @eeprom_data: value read
- *
- *  Performs byte read operation to SFP module's EEPROM over I2C interface.
- **/
-s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
-				  u8 *eeprom_data)
-{
-	DEBUGFUNC("ixgbe_read_i2c_eeprom_generic");
-
-	return hw->phy.ops.read_i2c_byte(hw, byte_offset,
-					 IXGBE_I2C_EEPROM_DEV_ADDR,
-					 eeprom_data);
-}
-
-/**
- *  ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface
- *  @hw: pointer to hardware structure
- *  @byte_offset: byte offset at address 0xA2
- *  @eeprom_data: value read
- *
- *  Performs byte read operation to SFP module's SFF-8472 data over I2C
- **/
-STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
-					  u8 *sff8472_data)
-{
-	return hw->phy.ops.read_i2c_byte(hw, byte_offset,
-					 IXGBE_I2C_EEPROM_DEV_ADDR2,
-					 sff8472_data);
-}
-
-/**
- *  ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface
- *  @hw: pointer to hardware structure
- *  @byte_offset: EEPROM byte offset to write
- *  @eeprom_data: value to write
- *
- *  Performs byte write operation to SFP module's EEPROM over I2C interface.
- **/
-s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
-				   u8 eeprom_data)
-{
-	DEBUGFUNC("ixgbe_write_i2c_eeprom_generic");
-
-	return hw->phy.ops.write_i2c_byte(hw, byte_offset,
-					  IXGBE_I2C_EEPROM_DEV_ADDR,
-					  eeprom_data);
-}
-
-/**
- * ixgbe_is_sfp_probe - Returns true if SFP is being detected
- * @hw: pointer to hardware structure
- * @offset: eeprom offset to be read
- * @addr: I2C address to be read
- */
-STATIC bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
-{
-	if (addr == IXGBE_I2C_EEPROM_DEV_ADDR &&
-	    offset == IXGBE_SFF_IDENTIFIER &&
-	    hw->phy.sfp_type == ixgbe_sfp_type_not_present)
-		return true;
-	return false;
-}
-
-/**
- *  ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
- *  @hw: pointer to hardware structure
- *  @byte_offset: byte offset to read
- *  @data: value read
- *
- *  Performs byte read operation to SFP module's EEPROM over I2C interface at
- *  a specified device address.
- **/
-s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
-				u8 dev_addr, u8 *data)
-{
-	s32 status;
-	u32 max_retry = 10;
-	u32 retry = 0;
-	u32 swfw_mask = hw->phy.phy_semaphore_mask;
-	bool nack = 1;
-	*data = 0;
-
-	DEBUGFUNC("ixgbe_read_i2c_byte_generic");
-
-	if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr))
-		max_retry = IXGBE_SFP_DETECT_RETRIES;
-
-	do {
-		if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
-			return IXGBE_ERR_SWFW_SYNC;
-
-		ixgbe_i2c_start(hw);
-
-		/* Device Address and write indication */
-		status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
-		if (status != IXGBE_SUCCESS)
-			goto fail;
-
-		status = ixgbe_get_i2c_ack(hw);
-		if (status != IXGBE_SUCCESS)
-			goto fail;
-
-		status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
-		if (status != IXGBE_SUCCESS)
-			goto fail;
-
-		status = ixgbe_get_i2c_ack(hw);
-		if (status != IXGBE_SUCCESS)
-			goto fail;
-
-		ixgbe_i2c_start(hw);
-
-		/* Device Address and read indication */
-		status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1));
-		if (status != IXGBE_SUCCESS)
-			goto fail;
-
-		status = ixgbe_get_i2c_ack(hw);
-		if (status != IXGBE_SUCCESS)
-			goto fail;
-
-		status = ixgbe_clock_in_i2c_byte(hw, data);
-		if (status != IXGBE_SUCCESS)
-			goto fail;
-
-		status = ixgbe_clock_out_i2c_bit(hw, nack);
-		if (status != IXGBE_SUCCESS)
-			goto fail;
-
-		ixgbe_i2c_stop(hw);
-		hw->mac.ops.release_swfw_sync(hw, swfw_mask);
-		return IXGBE_SUCCESS;
-
-fail:
-		ixgbe_i2c_bus_clear(hw);
-		hw->mac.ops.release_swfw_sync(hw, swfw_mask);
-		msec_delay(100);
-		retry++;
-		if (retry < max_retry)
-			DEBUGOUT("I2C byte read error - Retrying.\n");
-		else
-			DEBUGOUT("I2C byte read error.\n");
-
-	} while (retry < max_retry);
-
-	return status;
-}
-
-/**
- *  ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
- *  @hw: pointer to hardware structure
- *  @byte_offset: byte offset to write
- *  @data: value to write
- *
- *  Performs byte write operation to SFP module's EEPROM over I2C interface at
- *  a specified device address.
- **/
-s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
-				 u8 dev_addr, u8 data)
-{
-	s32 status = IXGBE_SUCCESS;
-	u32 max_retry = 1;
-	u32 retry = 0;
-	u32 swfw_mask = hw->phy.phy_semaphore_mask;
-
-	DEBUGFUNC("ixgbe_write_i2c_byte_generic");
-
-	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != IXGBE_SUCCESS) {
-		status = IXGBE_ERR_SWFW_SYNC;
-		goto write_byte_out;
-	}
-
-	do {
-		ixgbe_i2c_start(hw);
-
-		status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
-		if (status != IXGBE_SUCCESS)
-			goto fail;
-
-		status = ixgbe_get_i2c_ack(hw);
-		if (status != IXGBE_SUCCESS)
-			goto fail;
-
-		status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
-		if (status != IXGBE_SUCCESS)
-			goto fail;
-
-		status = ixgbe_get_i2c_ack(hw);
-		if (status != IXGBE_SUCCESS)
-			goto fail;
-
-		status = ixgbe_clock_out_i2c_byte(hw, data);
-		if (status != IXGBE_SUCCESS)
-			goto fail;
-
-		status = ixgbe_get_i2c_ack(hw);
-		if (status != IXGBE_SUCCESS)
-			goto fail;
-
-		ixgbe_i2c_stop(hw);
-		hw->mac.ops.release_swfw_sync(hw, swfw_mask);
-		return IXGBE_SUCCESS;
-
-fail:
-		ixgbe_i2c_bus_clear(hw);
-		retry++;
-		if (retry < max_retry)
-			DEBUGOUT("I2C byte write error - Retrying.\n");
-		else
-			DEBUGOUT("I2C byte write error.\n");
-	} while (retry < max_retry);
-
-	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
-
-write_byte_out:
-	return status;
-}
-
-/**
- *  ixgbe_i2c_start - Sets I2C start condition
- *  @hw: pointer to hardware structure
- *
- *  Sets I2C start condition (High -> Low on SDA while SCL is High)
- *  Set bit-bang mode on X550 hardware.
- **/
-STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw)
-{
-	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
-
-	DEBUGFUNC("ixgbe_i2c_start");
-
-	i2cctl |= IXGBE_I2C_BB_EN_BY_MAC(hw);
-
-	/* Start condition must begin with data and clock high */
-	ixgbe_set_i2c_data(hw, &i2cctl, 1);
-	ixgbe_raise_i2c_clk(hw, &i2cctl);
-
-	/* Setup time for start condition (4.7us) */
-	usec_delay(IXGBE_I2C_T_SU_STA);
-
-	ixgbe_set_i2c_data(hw, &i2cctl, 0);
-
-	/* Hold time for start condition (4us) */
-	usec_delay(IXGBE_I2C_T_HD_STA);
-
-	ixgbe_lower_i2c_clk(hw, &i2cctl);
-
-	/* Minimum low period of clock is 4.7 us */
-	usec_delay(IXGBE_I2C_T_LOW);
-
-}
-
-/**
- *  ixgbe_i2c_stop - Sets I2C stop condition
- *  @hw: pointer to hardware structure
- *
- *  Sets I2C stop condition (Low -> High on SDA while SCL is High)
- *  Disables bit-bang mode and negates data output enable on X550
- *  hardware.
- **/
-STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw)
-{
-	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
-	u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
-	u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw);
-	u32 bb_en_bit = IXGBE_I2C_BB_EN_BY_MAC(hw);
-
-	DEBUGFUNC("ixgbe_i2c_stop");
-
-	/* Stop condition must begin with data low and clock high */
-	ixgbe_set_i2c_data(hw, &i2cctl, 0);
-	ixgbe_raise_i2c_clk(hw, &i2cctl);
-
-	/* Setup time for stop condition (4us) */
-	usec_delay(IXGBE_I2C_T_SU_STO);
-
-	ixgbe_set_i2c_data(hw, &i2cctl, 1);
-
-	/* bus free time between stop and start (4.7us)*/
-	usec_delay(IXGBE_I2C_T_BUF);
-
-	if (bb_en_bit || data_oe_bit || clk_oe_bit) {
-		i2cctl &= ~bb_en_bit;
-		i2cctl |= data_oe_bit | clk_oe_bit;
-		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
-		IXGBE_WRITE_FLUSH(hw);
-	}
-}
-
-/**
- *  ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C
- *  @hw: pointer to hardware structure
- *  @data: data byte to clock in
- *
- *  Clocks in one byte data via I2C data/clock
- **/
-STATIC s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
-{
-	s32 i;
-	bool bit = 0;
-
-	DEBUGFUNC("ixgbe_clock_in_i2c_byte");
-
-	*data = 0;
-	for (i = 7; i >= 0; i--) {
-		ixgbe_clock_in_i2c_bit(hw, &bit);
-		*data |= bit << i;
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C
- *  @hw: pointer to hardware structure
- *  @data: data byte clocked out
- *
- *  Clocks out one byte data via I2C data/clock
- **/
-STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
-{
-	s32 status = IXGBE_SUCCESS;
-	s32 i;
-	u32 i2cctl;
-	bool bit;
-
-	DEBUGFUNC("ixgbe_clock_out_i2c_byte");
-
-	for (i = 7; i >= 0; i--) {
-		bit = (data >> i) & 0x1;
-		status = ixgbe_clock_out_i2c_bit(hw, bit);
-
-		if (status != IXGBE_SUCCESS)
-			break;
-	}
-
-	/* Release SDA line (set high) */
-	i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
-	i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
-	i2cctl |= IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
-	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
-	IXGBE_WRITE_FLUSH(hw);
-
-	return status;
-}
-
-/**
- *  ixgbe_get_i2c_ack - Polls for I2C ACK
- *  @hw: pointer to hardware structure
- *
- *  Clocks in/out one bit via I2C data/clock
- **/
-STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
-{
-	u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
-	s32 status = IXGBE_SUCCESS;
-	u32 i = 0;
-	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
-	u32 timeout = 10;
-	bool ack = 1;
-
-	DEBUGFUNC("ixgbe_get_i2c_ack");
-
-	if (data_oe_bit) {
-		i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
-		i2cctl |= data_oe_bit;
-		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
-		IXGBE_WRITE_FLUSH(hw);
-	}
-	ixgbe_raise_i2c_clk(hw, &i2cctl);
-
-	/* Minimum high period of clock is 4us */
-	usec_delay(IXGBE_I2C_T_HIGH);
-
-	/* Poll for ACK.  Note that ACK in I2C spec is
-	 * transition from 1 to 0 */
-	for (i = 0; i < timeout; i++) {
-		i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
-		ack = ixgbe_get_i2c_data(hw, &i2cctl);
-
-		usec_delay(1);
-		if (!ack)
-			break;
-	}
-
-	if (ack) {
-		DEBUGOUT("I2C ack was not received.\n");
-		status = IXGBE_ERR_I2C;
-	}
-
-	ixgbe_lower_i2c_clk(hw, &i2cctl);
-
-	/* Minimum low period of clock is 4.7 us */
-	usec_delay(IXGBE_I2C_T_LOW);
-
-	return status;
-}
-
-/**
- *  ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock
- *  @hw: pointer to hardware structure
- *  @data: read data value
- *
- *  Clocks in one bit via I2C data/clock
- **/
-STATIC s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
-{
-	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
-	u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
-
-	DEBUGFUNC("ixgbe_clock_in_i2c_bit");
-
-	if (data_oe_bit) {
-		i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
-		i2cctl |= data_oe_bit;
-		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
-		IXGBE_WRITE_FLUSH(hw);
-	}
-	ixgbe_raise_i2c_clk(hw, &i2cctl);
-
-	/* Minimum high period of clock is 4us */
-	usec_delay(IXGBE_I2C_T_HIGH);
-
-	i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
-	*data = ixgbe_get_i2c_data(hw, &i2cctl);
-
-	ixgbe_lower_i2c_clk(hw, &i2cctl);
-
-	/* Minimum low period of clock is 4.7 us */
-	usec_delay(IXGBE_I2C_T_LOW);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock
- *  @hw: pointer to hardware structure
- *  @data: data value to write
- *
- *  Clocks out one bit via I2C data/clock
- **/
-STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
-{
-	s32 status;
-	u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
-
-	DEBUGFUNC("ixgbe_clock_out_i2c_bit");
-
-	status = ixgbe_set_i2c_data(hw, &i2cctl, data);
-	if (status == IXGBE_SUCCESS) {
-		ixgbe_raise_i2c_clk(hw, &i2cctl);
-
-		/* Minimum high period of clock is 4us */
-		usec_delay(IXGBE_I2C_T_HIGH);
-
-		ixgbe_lower_i2c_clk(hw, &i2cctl);
-
-		/* Minimum low period of clock is 4.7 us.
-		 * This also takes care of the data hold time.
-		 */
-		usec_delay(IXGBE_I2C_T_LOW);
-	} else {
-		status = IXGBE_ERR_I2C;
-		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
-			     "I2C data was not set to %X\n", data);
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_raise_i2c_clk - Raises the I2C SCL clock
- *  @hw: pointer to hardware structure
- *  @i2cctl: Current value of I2CCTL register
- *
- *  Raises the I2C clock line '0'->'1'
- *  Negates the I2C clock output enable on X550 hardware.
- **/
-STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
-{
-	u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw);
-	u32 i = 0;
-	u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
-	u32 i2cctl_r = 0;
-
-	DEBUGFUNC("ixgbe_raise_i2c_clk");
-
-	if (clk_oe_bit) {
-		*i2cctl |= clk_oe_bit;
-		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
-	}
-
-	for (i = 0; i < timeout; i++) {
-		*i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw);
-
-		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
-		IXGBE_WRITE_FLUSH(hw);
-		/* SCL rise time (1000ns) */
-		usec_delay(IXGBE_I2C_T_RISE);
-
-		i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
-		if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw))
-			break;
-	}
-}
-
-/**
- *  ixgbe_lower_i2c_clk - Lowers the I2C SCL clock
- *  @hw: pointer to hardware structure
- *  @i2cctl: Current value of I2CCTL register
- *
- *  Lowers the I2C clock line '1'->'0'
- *  Asserts the I2C clock output enable on X550 hardware.
- **/
-STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
-{
-	DEBUGFUNC("ixgbe_lower_i2c_clk");
-
-	*i2cctl &= ~(IXGBE_I2C_CLK_OUT_BY_MAC(hw));
-	*i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw);
-
-	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
-	IXGBE_WRITE_FLUSH(hw);
-
-	/* SCL fall time (300ns) */
-	usec_delay(IXGBE_I2C_T_FALL);
-}
-
-/**
- *  ixgbe_set_i2c_data - Sets the I2C data bit
- *  @hw: pointer to hardware structure
- *  @i2cctl: Current value of I2CCTL register
- *  @data: I2C data value (0 or 1) to set
- *
- *  Sets the I2C data bit
- *  Asserts the I2C data output enable on X550 hardware.
- **/
-STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
-{
-	u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
-	s32 status = IXGBE_SUCCESS;
-
-	DEBUGFUNC("ixgbe_set_i2c_data");
-
-	if (data)
-		*i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
-	else
-		*i2cctl &= ~(IXGBE_I2C_DATA_OUT_BY_MAC(hw));
-	*i2cctl &= ~data_oe_bit;
-
-	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
-	IXGBE_WRITE_FLUSH(hw);
-
-	/* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
-	usec_delay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
-
-	if (!data)	/* Can't verify data in this case */
-		return IXGBE_SUCCESS;
-	if (data_oe_bit) {
-		*i2cctl |= data_oe_bit;
-		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
-		IXGBE_WRITE_FLUSH(hw);
-	}
-
-	/* Verify data was set correctly */
-	*i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
-	if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
-		status = IXGBE_ERR_I2C;
-		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
-			     "Error - I2C data was not set to %X.\n",
-			     data);
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_get_i2c_data - Reads the I2C SDA data bit
- *  @hw: pointer to hardware structure
- *  @i2cctl: Current value of I2CCTL register
- *
- *  Returns the I2C data bit value
- *  Negates the I2C data output enable on X550 hardware.
- **/
-STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
-{
-	u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
-	bool data;
-
-	DEBUGFUNC("ixgbe_get_i2c_data");
-
-	if (data_oe_bit) {
-		*i2cctl |= data_oe_bit;
-		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
-		IXGBE_WRITE_FLUSH(hw);
-		usec_delay(IXGBE_I2C_T_FALL);
-	}
-
-	if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw))
-		data = 1;
-	else
-		data = 0;
-
-	return data;
-}
-
-/**
- *  ixgbe_i2c_bus_clear - Clears the I2C bus
- *  @hw: pointer to hardware structure
- *
- *  Clears the I2C bus by sending nine clock pulses.
- *  Used when data line is stuck low.
- **/
-void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
-{
-	u32 i2cctl;
-	u32 i;
-
-	DEBUGFUNC("ixgbe_i2c_bus_clear");
-
-	ixgbe_i2c_start(hw);
-	i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
-
-	ixgbe_set_i2c_data(hw, &i2cctl, 1);
-
-	for (i = 0; i < 9; i++) {
-		ixgbe_raise_i2c_clk(hw, &i2cctl);
-
-		/* Min high period of clock is 4us */
-		usec_delay(IXGBE_I2C_T_HIGH);
-
-		ixgbe_lower_i2c_clk(hw, &i2cctl);
-
-		/* Min low period of clock is 4.7us*/
-		usec_delay(IXGBE_I2C_T_LOW);
-	}
-
-	ixgbe_i2c_start(hw);
-
-	/* Put the i2c bus back to default state */
-	ixgbe_i2c_stop(hw);
-}
-
-/**
- *  ixgbe_tn_check_overtemp - Checks if an overtemp occurred.
- *  @hw: pointer to hardware structure
- *
- *  Checks if the LASI temp alarm status was triggered due to overtemp
- **/
-s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
-{
-	s32 status = IXGBE_SUCCESS;
-	u16 phy_data = 0;
-
-	DEBUGFUNC("ixgbe_tn_check_overtemp");
-
-	if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
-		goto out;
-
-	/* Check that the LASI temp alarm status was triggered */
-	hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
-			     IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data);
-
-	if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
-		goto out;
-
-	status = IXGBE_ERR_OVERTEMP;
-	ERROR_REPORT1(IXGBE_ERROR_CAUTION, "Device over temperature");
-out:
-	return status;
-}
-
-/**
- * ixgbe_set_copper_phy_power - Control power for copper phy
- * @hw: pointer to hardware structure
- * @on: true for on, false for off
- */
-s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
-{
-	u32 status;
-	u16 reg;
-
-	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
-				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
-				      &reg);
-	if (status)
-		return status;
-
-	if (on) {
-		reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
-	} else {
-		if (ixgbe_check_reset_blocked(hw))
-			return 0;
-		reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
-	}
-
-	status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
-				       IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
-				       reg);
-	return status;
-}
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.h
deleted file mode 100644
index 21d88f7..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.h
+++ /dev/null
@@ -1,181 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#ifndef _IXGBE_PHY_H_
-#define _IXGBE_PHY_H_
-
-#include "ixgbe_type.h"
-#define IXGBE_I2C_EEPROM_DEV_ADDR	0xA0
-#define IXGBE_I2C_EEPROM_DEV_ADDR2	0xA2
-#define IXGBE_I2C_EEPROM_BANK_LEN	0xFF
-
-/* EEPROM byte offsets */
-#define IXGBE_SFF_IDENTIFIER		0x0
-#define IXGBE_SFF_IDENTIFIER_SFP	0x3
-#define IXGBE_SFF_VENDOR_OUI_BYTE0	0x25
-#define IXGBE_SFF_VENDOR_OUI_BYTE1	0x26
-#define IXGBE_SFF_VENDOR_OUI_BYTE2	0x27
-#define IXGBE_SFF_1GBE_COMP_CODES	0x6
-#define IXGBE_SFF_10GBE_COMP_CODES	0x3
-#define IXGBE_SFF_CABLE_TECHNOLOGY	0x8
-#define IXGBE_SFF_CABLE_SPEC_COMP	0x3C
-#define IXGBE_SFF_SFF_8472_SWAP		0x5C
-#define IXGBE_SFF_SFF_8472_COMP		0x5E
-#define IXGBE_SFF_IDENTIFIER_QSFP_PLUS	0xD
-#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0	0xA5
-#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1	0xA6
-#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2	0xA7
-#define IXGBE_SFF_QSFP_CONNECTOR	0x82
-#define IXGBE_SFF_QSFP_10GBE_COMP	0x83
-#define IXGBE_SFF_QSFP_1GBE_COMP	0x86
-#define IXGBE_SFF_QSFP_CABLE_LENGTH	0x92
-#define IXGBE_SFF_QSFP_DEVICE_TECH	0x93
-
-/* Bitmasks */
-#define IXGBE_SFF_DA_PASSIVE_CABLE	0x4
-#define IXGBE_SFF_DA_ACTIVE_CABLE	0x8
-#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING	0x4
-#define IXGBE_SFF_1GBASESX_CAPABLE	0x1
-#define IXGBE_SFF_1GBASELX_CAPABLE	0x2
-#define IXGBE_SFF_1GBASET_CAPABLE	0x8
-#define IXGBE_SFF_10GBASESR_CAPABLE	0x10
-#define IXGBE_SFF_10GBASELR_CAPABLE	0x20
-#define IXGBE_SFF_ADDRESSING_MODE	0x4
-#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE	0x1
-#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE	0x8
-#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE	0x23
-#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL	0x0
-#define IXGBE_I2C_EEPROM_READ_MASK	0x100
-#define IXGBE_I2C_EEPROM_STATUS_MASK	0x3
-#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION	0x0
-#define IXGBE_I2C_EEPROM_STATUS_PASS	0x1
-#define IXGBE_I2C_EEPROM_STATUS_FAIL	0x2
-#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS	0x3
-
-#define IXGBE_CS4227			0xBE	/* CS4227 address */
-#define IXGBE_CS4227_SPARE24_LSB	0x12B0	/* Reg to program EDC */
-#define IXGBE_CS4227_EDC_MODE_CX1	0x0002
-#define IXGBE_CS4227_EDC_MODE_SR	0x0004
-
-/* Flow control defines */
-#define IXGBE_TAF_SYM_PAUSE		0x400
-#define IXGBE_TAF_ASM_PAUSE		0x800
-
-/* Bit-shift macros */
-#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT	24
-#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT	16
-#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT	8
-
-/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
-#define IXGBE_SFF_VENDOR_OUI_TYCO	0x00407600
-#define IXGBE_SFF_VENDOR_OUI_FTL	0x00906500
-#define IXGBE_SFF_VENDOR_OUI_AVAGO	0x00176A00
-#define IXGBE_SFF_VENDOR_OUI_INTEL	0x001B2100
-
-/* I2C SDA and SCL timing parameters for standard mode */
-#define IXGBE_I2C_T_HD_STA	4
-#define IXGBE_I2C_T_LOW		5
-#define IXGBE_I2C_T_HIGH	4
-#define IXGBE_I2C_T_SU_STA	5
-#define IXGBE_I2C_T_HD_DATA	5
-#define IXGBE_I2C_T_SU_DATA	1
-#define IXGBE_I2C_T_RISE	1
-#define IXGBE_I2C_T_FALL	1
-#define IXGBE_I2C_T_SU_STO	4
-#define IXGBE_I2C_T_BUF		5
-
-#ifndef IXGBE_SFP_DETECT_RETRIES
-#define IXGBE_SFP_DETECT_RETRIES	10
-
-#endif /* IXGBE_SFP_DETECT_RETRIES */
-#define IXGBE_TN_LASI_STATUS_REG	0x9005
-#define IXGBE_TN_LASI_STATUS_TEMP_ALARM	0x0008
-
-/* SFP+ SFF-8472 Compliance */
-#define IXGBE_SFF_SFF_8472_UNSUP	0x00
-
-#ident "$Id: ixgbe_phy.h,v 1.56 2013/09/05 23:59:49 jtkirshe Exp $"
-
-s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
-bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
-enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
-s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
-s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
-s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
-s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
-			   u16 *phy_data);
-s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
-			    u16 phy_data);
-s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
-			       u32 device_type, u16 *phy_data);
-s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
-				u32 device_type, u16 phy_data);
-s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
-s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
-				       ixgbe_link_speed speed,
-				       bool autoneg_wait_to_complete);
-s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
-					       ixgbe_link_speed *speed,
-					       bool *autoneg);
-s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
-
-/* PHY specific */
-s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
-			     ixgbe_link_speed *speed,
-			     bool *link_up);
-s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
-s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
-				       u16 *firmware_version);
-s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
-					   u16 *firmware_version);
-
-s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
-s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
-s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
-s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
-s32 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw);
-s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
-s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
-					u16 *list_offset,
-					u16 *data_offset);
-s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
-s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
-				u8 dev_addr, u8 *data);
-s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
-				 u8 dev_addr, u8 data);
-s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
-				  u8 *eeprom_data);
-s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
-				   u8 eeprom_data);
-void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
-#endif /* _IXGBE_PHY_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h
deleted file mode 100644
index e4432e2..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h
+++ /dev/null
@@ -1,3858 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#ifndef _IXGBE_TYPE_H_
-#define _IXGBE_TYPE_H_
-
-/*
- * The following is a brief description of the error categories used by the
- * ERROR_REPORT* macros.
- *
- * - IXGBE_ERROR_INVALID_STATE
- * This category is for errors which represent a serious failure state that is
- * unexpected, and could be potentially harmful to device operation. It should
- * not be used for errors relating to issues that can be worked around or
- * ignored.
- *
- * - IXGBE_ERROR_POLLING
- * This category is for errors related to polling/timeout issues and should be
- * used in any case where the timeout occured, or a failure to obtain a lock, or
- * failure to receive data within the time limit.
- *
- * - IXGBE_ERROR_CAUTION
- * This category should be used for reporting issues that may be the cause of
- * other errors, such as temperature warnings. It should indicate an event which
- * could be serious, but hasn't necessarily caused problems yet.
- *
- * - IXGBE_ERROR_SOFTWARE
- * This category is intended for errors due to software state preventing
- * something. The category is not intended for errors due to bad arguments, or
- * due to unsupported features. It should be used when a state occurs which
- * prevents action but is not a serious issue.
- *
- * - IXGBE_ERROR_ARGUMENT
- * This category is for when a bad or invalid argument is passed. It should be
- * used whenever a function is called and error checking has detected the
- * argument is wrong or incorrect.
- *
- * - IXGBE_ERROR_UNSUPPORTED
- * This category is for errors which are due to unsupported circumstances or
- * configuration issues. It should not be used when the issue is due to an
- * invalid argument, but for when something has occurred that is unsupported
- * (Ex: Flow control autonegotiation or an unsupported SFP+ module.)
- */
-
-#include "ixgbe_osdep.h"
-
-/* Override this by setting IOMEM in your ixgbe_osdep.h header */
-
-/* Vendor ID */
-#define IXGBE_INTEL_VENDOR_ID			0x8086
-
-/* Device IDs */
-#define IXGBE_DEV_ID_82598			0x10B6
-#define IXGBE_DEV_ID_82598_BX			0x1508
-#define IXGBE_DEV_ID_82598AF_DUAL_PORT		0x10C6
-#define IXGBE_DEV_ID_82598AF_SINGLE_PORT	0x10C7
-#define IXGBE_DEV_ID_82598AT			0x10C8
-#define IXGBE_DEV_ID_82598AT2			0x150B
-#define IXGBE_DEV_ID_82598EB_SFP_LOM		0x10DB
-#define IXGBE_DEV_ID_82598EB_CX4		0x10DD
-#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT	0x10EC
-#define IXGBE_DEV_ID_82598_DA_DUAL_PORT		0x10F1
-#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM	0x10E1
-#define IXGBE_DEV_ID_82598EB_XF_LR		0x10F4
-#define IXGBE_DEV_ID_82599_KX4			0x10F7
-#define IXGBE_DEV_ID_82599_KX4_MEZZ		0x1514
-#define IXGBE_DEV_ID_82599_KR			0x1517
-#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE	0x10F8
-#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ	0x000C
-#define IXGBE_DEV_ID_82599_CX4			0x10F9
-#define IXGBE_DEV_ID_82599_SFP			0x10FB
-#define IXGBE_SUBDEV_ID_82599_SFP		0x11A9
-#define IXGBE_SUBDEV_ID_82599_SFP_WOL0		0x1071
-#define IXGBE_SUBDEV_ID_82599_RNDC		0x1F72
-#define IXGBE_SUBDEV_ID_82599_560FLR		0x17D0
-#define IXGBE_SUBDEV_ID_82599_ECNA_DP		0x0470
-#define IXGBE_SUBDEV_ID_82599_SP_560FLR		0x211B
-#define IXGBE_SUBDEV_ID_82599_LOM_SFP		0x8976
-#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6		0x2159
-#define IXGBE_SUBDEV_ID_82599_SFP_1OCP		0x000D
-#define IXGBE_SUBDEV_ID_82599_SFP_2OCP		0x0008
-#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE	0x152A
-#define IXGBE_DEV_ID_82599_SFP_FCOE		0x1529
-#define IXGBE_DEV_ID_82599_SFP_EM		0x1507
-#define IXGBE_DEV_ID_82599_SFP_SF2		0x154D
-#define IXGBE_DEV_ID_82599_SFP_SF_QP		0x154A
-#define IXGBE_DEV_ID_82599_QSFP_SF_QP		0x1558
-#define IXGBE_DEV_ID_82599EN_SFP		0x1557
-#define IXGBE_SUBDEV_ID_82599EN_SFP_OCP1	0x0001
-#define IXGBE_DEV_ID_82599_XAUI_LOM		0x10FC
-#define IXGBE_DEV_ID_82599_T3_LOM		0x151C
-#define IXGBE_DEV_ID_82599_VF			0x10ED
-#define IXGBE_DEV_ID_82599_VF_HV		0x152E
-#define IXGBE_DEV_ID_82599_LS			0x154F
-#define IXGBE_DEV_ID_X540T			0x1528
-#define IXGBE_DEV_ID_X540_VF			0x1515
-#define IXGBE_DEV_ID_X540_VF_HV			0x1530
-#define IXGBE_DEV_ID_X540T1			0x1560
-#define IXGBE_DEV_ID_X550T			0x1563
-#define IXGBE_DEV_ID_X550EM_X_KX4		0x15AA
-#define IXGBE_DEV_ID_X550EM_X_KR		0x15AB
-#define IXGBE_DEV_ID_X550EM_X_SFP		0x15AC
-#define IXGBE_DEV_ID_X550EM_X_10G_T		0x15AD
-#define IXGBE_DEV_ID_X550EM_X_1G_T		0x15AE
-#define IXGBE_DEV_ID_X550_VF_HV			0x1564
-#define IXGBE_DEV_ID_X550_VF			0x1565
-#define IXGBE_DEV_ID_X550EM_X_VF		0x15A8
-#define IXGBE_DEV_ID_X550EM_X_VF_HV		0x15A9
-
-/* General Registers */
-#define IXGBE_CTRL		0x00000
-#define IXGBE_STATUS		0x00008
-#define IXGBE_CTRL_EXT		0x00018
-#define IXGBE_ESDP		0x00020
-#define IXGBE_EODSDP		0x00028
-#define IXGBE_I2CCTL_82599	0x00028
-#define IXGBE_I2CCTL_X550	0x15F5C
-#define IXGBE_I2CCTL_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
-				 IXGBE_I2CCTL_X550 : IXGBE_I2CCTL_82599))
-#define IXGBE_PHY_GPIO		0x00028
-#define IXGBE_MAC_GPIO		0x00030
-#define IXGBE_PHYINT_STATUS0	0x00100
-#define IXGBE_PHYINT_STATUS1	0x00104
-#define IXGBE_PHYINT_STATUS2	0x00108
-#define IXGBE_LEDCTL		0x00200
-#define IXGBE_FRTIMER		0x00048
-#define IXGBE_TCPTIMER		0x0004C
-#define IXGBE_CORESPARE		0x00600
-#define IXGBE_EXVET		0x05078
-
-/* NVM Registers */
-#define IXGBE_EEC	0x10010
-#define IXGBE_EERD	0x10014
-#define IXGBE_EEWR	0x10018
-#define IXGBE_FLA	0x1001C
-#define IXGBE_EEMNGCTL	0x10110
-#define IXGBE_EEMNGDATA	0x10114
-#define IXGBE_FLMNGCTL	0x10118
-#define IXGBE_FLMNGDATA	0x1011C
-#define IXGBE_FLMNGCNT	0x10120
-#define IXGBE_FLOP	0x1013C
-#define IXGBE_GRC	0x10200
-#define IXGBE_SRAMREL	0x10210
-#define IXGBE_PHYDBG	0x10218
-
-/* General Receive Control */
-#define IXGBE_GRC_MNG	0x00000001 /* Manageability Enable */
-#define IXGBE_GRC_APME	0x00000002 /* APM enabled in EEPROM */
-
-#define IXGBE_VPDDIAG0	0x10204
-#define IXGBE_VPDDIAG1	0x10208
-
-/* I2CCTL Bit Masks */
-#define IXGBE_I2C_CLK_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
-					0x00004000 : 0x00000001)
-#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
-					0x00000200 : 0x00000002)
-#define IXGBE_I2C_DATA_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
-					0x00001000 : 0x00000004)
-#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
-					0x00000400 : 0x00000008)
-#define IXGBE_I2C_BB_EN_BY_MAC(hw) ((hw)->mac.type >= ixgbe_mac_X550 ? \
-				    0x00000100 : 0)
-#define IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw) ((hw)->mac.type >= ixgbe_mac_X550 ? \
-					   0x00000800 : 0)
-#define IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw) ((hw)->mac.type >= ixgbe_mac_X550 ? \
-					  0x00002000 : 0)
-#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT	500
-
-#define IXGBE_I2C_THERMAL_SENSOR_ADDR	0xF8
-#define IXGBE_EMC_INTERNAL_DATA		0x00
-#define IXGBE_EMC_INTERNAL_THERM_LIMIT	0x20
-#define IXGBE_EMC_DIODE1_DATA		0x01
-#define IXGBE_EMC_DIODE1_THERM_LIMIT	0x19
-#define IXGBE_EMC_DIODE2_DATA		0x23
-#define IXGBE_EMC_DIODE2_THERM_LIMIT	0x1A
-
-#define IXGBE_MAX_SENSORS		3
-
-struct ixgbe_thermal_diode_data {
-	u8 location;
-	u8 temp;
-	u8 caution_thresh;
-	u8 max_op_thresh;
-};
-
-struct ixgbe_thermal_sensor_data {
-	struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS];
-};
-
-/* Interrupt Registers */
-#define IXGBE_EICR		0x00800
-#define IXGBE_EICS		0x00808
-#define IXGBE_EIMS		0x00880
-#define IXGBE_EIMC		0x00888
-#define IXGBE_EIAC		0x00810
-#define IXGBE_EIAM		0x00890
-#define IXGBE_EICS_EX(_i)	(0x00A90 + (_i) * 4)
-#define IXGBE_EIMS_EX(_i)	(0x00AA0 + (_i) * 4)
-#define IXGBE_EIMC_EX(_i)	(0x00AB0 + (_i) * 4)
-#define IXGBE_EIAM_EX(_i)	(0x00AD0 + (_i) * 4)
-/* 82599 EITR is only 12 bits, with the lower 3 always zero */
-/*
- * 82598 EITR is 16 bits but set the limits based on the max
- * supported by all ixgbe hardware
- */
-#define IXGBE_MAX_INT_RATE	488281
-#define IXGBE_MIN_INT_RATE	956
-#define IXGBE_MAX_EITR		0x00000FF8
-#define IXGBE_MIN_EITR		8
-#define IXGBE_EITR(_i)		(((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
-				 (0x012300 + (((_i) - 24) * 4)))
-#define IXGBE_EITR_ITR_INT_MASK	0x00000FF8
-#define IXGBE_EITR_LLI_MOD	0x00008000
-#define IXGBE_EITR_CNT_WDIS	0x80000000
-#define IXGBE_IVAR(_i)		(0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
-#define IXGBE_IVAR_MISC		0x00A00 /* misc MSI-X interrupt causes */
-#define IXGBE_EITRSEL		0x00894
-#define IXGBE_MSIXT		0x00000 /* MSI-X Table. 0x0000 - 0x01C */
-#define IXGBE_MSIXPBA		0x02000 /* MSI-X Pending bit array */
-#define IXGBE_PBACL(_i)	(((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4)))
-#define IXGBE_GPIE		0x00898
-
-/* Flow Control Registers */
-#define IXGBE_FCADBUL		0x03210
-#define IXGBE_FCADBUH		0x03214
-#define IXGBE_FCAMACL		0x04328
-#define IXGBE_FCAMACH		0x0432C
-#define IXGBE_FCRTH_82599(_i)	(0x03260 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_FCRTL_82599(_i)	(0x03220 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_PFCTOP		0x03008
-#define IXGBE_FCTTV(_i)		(0x03200 + ((_i) * 4)) /* 4 of these (0-3) */
-#define IXGBE_FCRTL(_i)		(0x03220 + ((_i) * 8)) /* 8 of these (0-7) */
-#define IXGBE_FCRTH(_i)		(0x03260 + ((_i) * 8)) /* 8 of these (0-7) */
-#define IXGBE_FCRTV		0x032A0
-#define IXGBE_FCCFG		0x03D00
-#define IXGBE_TFCS		0x0CE00
-
-/* Receive DMA Registers */
-#define IXGBE_RDBAL(_i)	(((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
-			 (0x0D000 + (((_i) - 64) * 0x40)))
-#define IXGBE_RDBAH(_i)	(((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
-			 (0x0D004 + (((_i) - 64) * 0x40)))
-#define IXGBE_RDLEN(_i)	(((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
-			 (0x0D008 + (((_i) - 64) * 0x40)))
-#define IXGBE_RDH(_i)	(((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
-			 (0x0D010 + (((_i) - 64) * 0x40)))
-#define IXGBE_RDT(_i)	(((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
-			 (0x0D018 + (((_i) - 64) * 0x40)))
-#define IXGBE_RXDCTL(_i)	(((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
-				 (0x0D028 + (((_i) - 64) * 0x40)))
-#define IXGBE_RSCCTL(_i)	(((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
-				 (0x0D02C + (((_i) - 64) * 0x40)))
-#define IXGBE_RSCDBU	0x03028
-#define IXGBE_RDDCC	0x02F20
-#define IXGBE_RXMEMWRAP	0x03190
-#define IXGBE_STARCTRL	0x03024
-/*
- * Split and Replication Receive Control Registers
- * 00-15 : 0x02100 + n*4
- * 16-64 : 0x01014 + n*0x40
- * 64-127: 0x0D014 + (n-64)*0x40
- */
-#define IXGBE_SRRCTL(_i)	(((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
-				 (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
-				 (0x0D014 + (((_i) - 64) * 0x40))))
-/*
- * Rx DCA Control Register:
- * 00-15 : 0x02200 + n*4
- * 16-64 : 0x0100C + n*0x40
- * 64-127: 0x0D00C + (n-64)*0x40
- */
-#define IXGBE_DCA_RXCTRL(_i)	(((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
-				 (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
-				 (0x0D00C + (((_i) - 64) * 0x40))))
-#define IXGBE_RDRXCTL		0x02F00
-/* 8 of these 0x03C00 - 0x03C1C */
-#define IXGBE_RXPBSIZE(_i)	(0x03C00 + ((_i) * 4))
-#define IXGBE_RXCTRL		0x03000
-#define IXGBE_DROPEN		0x03D04
-#define IXGBE_RXPBSIZE_SHIFT	10
-#define IXGBE_RXPBSIZE_MASK	0x000FFC00
-
-/* Receive Registers */
-#define IXGBE_RXCSUM		0x05000
-#define IXGBE_RFCTL		0x05008
-#define IXGBE_DRECCCTL		0x02F08
-#define IXGBE_DRECCCTL_DISABLE	0
-#define IXGBE_DRECCCTL2		0x02F8C
-
-/* Multicast Table Array - 128 entries */
-#define IXGBE_MTA(_i)		(0x05200 + ((_i) * 4))
-#define IXGBE_RAL(_i)		(((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
-				 (0x0A200 + ((_i) * 8)))
-#define IXGBE_RAH(_i)		(((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
-				 (0x0A204 + ((_i) * 8)))
-#define IXGBE_MPSAR_LO(_i)	(0x0A600 + ((_i) * 8))
-#define IXGBE_MPSAR_HI(_i)	(0x0A604 + ((_i) * 8))
-/* Packet split receive type */
-#define IXGBE_PSRTYPE(_i)	(((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
-				 (0x0EA00 + ((_i) * 4)))
-/* array of 4096 1-bit vlan filters */
-#define IXGBE_VFTA(_i)		(0x0A000 + ((_i) * 4))
-/*array of 4096 4-bit vlan vmdq indices */
-#define IXGBE_VFTAVIND(_j, _i)	(0x0A200 + ((_j) * 0x200) + ((_i) * 4))
-#define IXGBE_FCTRL		0x05080
-#define IXGBE_VLNCTRL		0x05088
-#define IXGBE_MCSTCTRL		0x05090
-#define IXGBE_MRQC		0x05818
-#define IXGBE_SAQF(_i)	(0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */
-#define IXGBE_DAQF(_i)	(0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */
-#define IXGBE_SDPQF(_i)	(0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */
-#define IXGBE_FTQF(_i)	(0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */
-#define IXGBE_ETQF(_i)	(0x05128 + ((_i) * 4)) /* EType Queue Filter */
-#define IXGBE_ETQS(_i)	(0x0EC00 + ((_i) * 4)) /* EType Queue Select */
-#define IXGBE_SYNQF	0x0EC30 /* SYN Packet Queue Filter */
-#define IXGBE_RQTC	0x0EC70
-#define IXGBE_MTQC	0x08120
-#define IXGBE_VLVF(_i)	(0x0F100 + ((_i) * 4))  /* 64 of these (0-63) */
-#define IXGBE_VLVFB(_i)	(0x0F200 + ((_i) * 4))  /* 128 of these (0-127) */
-#define IXGBE_VMVIR(_i)	(0x08000 + ((_i) * 4))  /* 64 of these (0-63) */
-#define IXGBE_PFFLPL		0x050B0
-#define IXGBE_PFFLPH		0x050B4
-#define IXGBE_VT_CTL		0x051B0
-#define IXGBE_PFMAILBOX(_i)	(0x04B00 + (4 * (_i))) /* 64 total */
-/* 64 Mailboxes, 16 DW each */
-#define IXGBE_PFMBMEM(_i)	(0x13000 + (64 * (_i)))
-#define IXGBE_PFMBICR(_i)	(0x00710 + (4 * (_i))) /* 4 total */
-#define IXGBE_PFMBIMR(_i)	(0x00720 + (4 * (_i))) /* 4 total */
-#define IXGBE_VFRE(_i)		(0x051E0 + ((_i) * 4))
-#define IXGBE_VFTE(_i)		(0x08110 + ((_i) * 4))
-#define IXGBE_VMECM(_i)		(0x08790 + ((_i) * 4))
-#define IXGBE_QDE		0x2F04
-#define IXGBE_VMTXSW(_i)	(0x05180 + ((_i) * 4)) /* 2 total */
-#define IXGBE_VMOLR(_i)		(0x0F000 + ((_i) * 4)) /* 64 total */
-#define IXGBE_UTA(_i)		(0x0F400 + ((_i) * 4))
-#define IXGBE_MRCTL(_i)		(0x0F600 + ((_i) * 4))
-#define IXGBE_VMRVLAN(_i)	(0x0F610 + ((_i) * 4))
-#define IXGBE_VMRVM(_i)		(0x0F630 + ((_i) * 4))
-#define IXGBE_LVMMC_RX		0x2FA8
-#define IXGBE_LVMMC_TX		0x8108
-#define IXGBE_LMVM_RX		0x2FA4
-#define IXGBE_LMVM_TX		0x8124
-#define IXGBE_WQBR_RX(_i)	(0x2FB0 + ((_i) * 4)) /* 4 total */
-#define IXGBE_WQBR_TX(_i)	(0x8130 + ((_i) * 4)) /* 4 total */
-#define IXGBE_L34T_IMIR(_i)	(0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
-#define IXGBE_RXFECCERR0	0x051B8
-#define IXGBE_LLITHRESH		0x0EC90
-#define IXGBE_IMIR(_i)		(0x05A80 + ((_i) * 4))  /* 8 of these (0-7) */
-#define IXGBE_IMIREXT(_i)	(0x05AA0 + ((_i) * 4))  /* 8 of these (0-7) */
-#define IXGBE_IMIRVP		0x05AC0
-#define IXGBE_VMD_CTL		0x0581C
-#define IXGBE_RETA(_i)		(0x05C00 + ((_i) * 4))  /* 32 of these (0-31) */
-#define IXGBE_ERETA(_i)		(0x0EE80 + ((_i) * 4))  /* 96 of these (0-95) */
-#define IXGBE_RSSRK(_i)		(0x05C80 + ((_i) * 4))  /* 10 of these (0-9) */
-
-/* Registers for setting up RSS on X550 with SRIOV
- * _p - pool number (0..63)
- * _i - index (0..10 for PFVFRSSRK, 0..15 for PFVFRETA)
- */
-#define IXGBE_PFVFMRQC(_p)	(0x03400 + ((_p) * 4))
-#define IXGBE_PFVFRSSRK(_i, _p)	(0x018000 + ((_i) * 4) + ((_p) * 0x40))
-#define IXGBE_PFVFRETA(_i, _p)	(0x019000 + ((_i) * 4) + ((_p) * 0x40))
-
-/* Flow Director registers */
-#define IXGBE_FDIRCTRL	0x0EE00
-#define IXGBE_FDIRHKEY	0x0EE68
-#define IXGBE_FDIRSKEY	0x0EE6C
-#define IXGBE_FDIRDIP4M	0x0EE3C
-#define IXGBE_FDIRSIP4M	0x0EE40
-#define IXGBE_FDIRTCPM	0x0EE44
-#define IXGBE_FDIRUDPM	0x0EE48
-#define IXGBE_FDIRSCTPM	0x0EE78
-#define IXGBE_FDIRIP6M	0x0EE74
-#define IXGBE_FDIRM	0x0EE70
-
-/* Flow Director Stats registers */
-#define IXGBE_FDIRFREE	0x0EE38
-#define IXGBE_FDIRLEN	0x0EE4C
-#define IXGBE_FDIRUSTAT	0x0EE50
-#define IXGBE_FDIRFSTAT	0x0EE54
-#define IXGBE_FDIRMATCH	0x0EE58
-#define IXGBE_FDIRMISS	0x0EE5C
-
-/* Flow Director Programming registers */
-#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */
-#define IXGBE_FDIRIPSA	0x0EE18
-#define IXGBE_FDIRIPDA	0x0EE1C
-#define IXGBE_FDIRPORT	0x0EE20
-#define IXGBE_FDIRVLAN	0x0EE24
-#define IXGBE_FDIRHASH	0x0EE28
-#define IXGBE_FDIRCMD	0x0EE2C
-
-/* Transmit DMA registers */
-#define IXGBE_TDBAL(_i)		(0x06000 + ((_i) * 0x40)) /* 32 of them (0-31)*/
-#define IXGBE_TDBAH(_i)		(0x06004 + ((_i) * 0x40))
-#define IXGBE_TDLEN(_i)		(0x06008 + ((_i) * 0x40))
-#define IXGBE_TDH(_i)		(0x06010 + ((_i) * 0x40))
-#define IXGBE_TDT(_i)		(0x06018 + ((_i) * 0x40))
-#define IXGBE_TXDCTL(_i)	(0x06028 + ((_i) * 0x40))
-#define IXGBE_TDWBAL(_i)	(0x06038 + ((_i) * 0x40))
-#define IXGBE_TDWBAH(_i)	(0x0603C + ((_i) * 0x40))
-#define IXGBE_DTXCTL		0x07E00
-
-#define IXGBE_DMATXCTL		0x04A80
-#define IXGBE_PFVFSPOOF(_i)	(0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */
-#define IXGBE_PFDTXGSWC		0x08220
-#define IXGBE_DTXMXSZRQ		0x08100
-#define IXGBE_DTXTCPFLGL	0x04A88
-#define IXGBE_DTXTCPFLGH	0x04A8C
-#define IXGBE_LBDRPEN		0x0CA00
-#define IXGBE_TXPBTHRESH(_i)	(0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */
-
-#define IXGBE_DMATXCTL_TE	0x1 /* Transmit Enable */
-#define IXGBE_DMATXCTL_NS	0x2 /* No Snoop LSO hdr buffer */
-#define IXGBE_DMATXCTL_GDV	0x8 /* Global Double VLAN */
-#define IXGBE_DMATXCTL_MDP_EN	0x20 /* Bit 5 */
-#define IXGBE_DMATXCTL_MBINTEN	0x40 /* Bit 6 */
-#define IXGBE_DMATXCTL_VT_SHIFT	16  /* VLAN EtherType */
-
-#define IXGBE_PFDTXGSWC_VT_LBEN	0x1 /* Local L2 VT switch enable */
-
-/* Anti-spoofing defines */
-#define IXGBE_SPOOF_MACAS_MASK		0xFF
-#define IXGBE_SPOOF_VLANAS_MASK		0xFF00
-#define IXGBE_SPOOF_VLANAS_SHIFT	8
-#define IXGBE_SPOOF_ETHERTYPEAS		0xFF000000
-#define IXGBE_SPOOF_ETHERTYPEAS_SHIFT	16
-#define IXGBE_PFVFSPOOF_REG_COUNT	8
-/* 16 of these (0-15) */
-#define IXGBE_DCA_TXCTRL(_i)		(0x07200 + ((_i) * 4))
-/* Tx DCA Control register : 128 of these (0-127) */
-#define IXGBE_DCA_TXCTRL_82599(_i)	(0x0600C + ((_i) * 0x40))
-#define IXGBE_TIPG			0x0CB00
-#define IXGBE_TXPBSIZE(_i)		(0x0CC00 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_MNGTXMAP			0x0CD10
-#define IXGBE_TIPG_FIBER_DEFAULT	3
-#define IXGBE_TXPBSIZE_SHIFT		10
-
-/* Wake up registers */
-#define IXGBE_WUC	0x05800
-#define IXGBE_WUFC	0x05808
-#define IXGBE_WUS	0x05810
-#define IXGBE_IPAV	0x05838
-#define IXGBE_IP4AT	0x05840 /* IPv4 table 0x5840-0x5858 */
-#define IXGBE_IP6AT	0x05880 /* IPv6 table 0x5880-0x588F */
-
-#define IXGBE_WUPL	0x05900
-#define IXGBE_WUPM	0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
-#define IXGBE_PROXYS	0x05F60 /* Proxying Status Register */
-#define IXGBE_PROXYFC	0x05F64 /* Proxying Filter Control Register */
-#define IXGBE_VXLANCTRL	0x0000507C /* Rx filter VXLAN UDPPORT Register */
-
-#define IXGBE_FHFT(_n)	(0x09000 + ((_n) * 0x100)) /* Flex host filter table */
-/* Ext Flexible Host Filter Table */
-#define IXGBE_FHFT_EXT(_n)	(0x09800 + ((_n) * 0x100))
-#define IXGBE_FHFT_EXT_X550(_n)	(0x09600 + ((_n) * 0x100))
-
-/* Four Flexible Filters are supported */
-#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX		4
-
-/* Six Flexible Filters are supported */
-#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_6	6
-/* Eight Flexible Filters are supported */
-#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_8	8
-#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX	2
-
-/* Each Flexible Filter is at most 128 (0x80) bytes in length */
-#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX		128
-#define IXGBE_FHFT_LENGTH_OFFSET		0xFC  /* Length byte in FHFT */
-#define IXGBE_FHFT_LENGTH_MASK			0x0FF /* Length in lower byte */
-
-/* Definitions for power management and wakeup registers */
-/* Wake Up Control */
-#define IXGBE_WUC_PME_EN	0x00000002 /* PME Enable */
-#define IXGBE_WUC_PME_STATUS	0x00000004 /* PME Status */
-#define IXGBE_WUC_WKEN		0x00000010 /* Enable PE_WAKE_N pin assertion  */
-
-/* Wake Up Filter Control */
-#define IXGBE_WUFC_LNKC	0x00000001 /* Link Status Change Wakeup Enable */
-#define IXGBE_WUFC_MAG	0x00000002 /* Magic Packet Wakeup Enable */
-#define IXGBE_WUFC_EX	0x00000004 /* Directed Exact Wakeup Enable */
-#define IXGBE_WUFC_MC	0x00000008 /* Directed Multicast Wakeup Enable */
-#define IXGBE_WUFC_BC	0x00000010 /* Broadcast Wakeup Enable */
-#define IXGBE_WUFC_ARP	0x00000020 /* ARP Request Packet Wakeup Enable */
-#define IXGBE_WUFC_IPV4	0x00000040 /* Directed IPv4 Packet Wakeup Enable */
-#define IXGBE_WUFC_IPV6	0x00000080 /* Directed IPv6 Packet Wakeup Enable */
-#define IXGBE_WUFC_MNG	0x00000100 /* Directed Mgmt Packet Wakeup Enable */
-
-#define IXGBE_WUFC_IGNORE_TCO	0x00008000 /* Ignore WakeOn TCO packets */
-#define IXGBE_WUFC_FLX0	0x00010000 /* Flexible Filter 0 Enable */
-#define IXGBE_WUFC_FLX1	0x00020000 /* Flexible Filter 1 Enable */
-#define IXGBE_WUFC_FLX2	0x00040000 /* Flexible Filter 2 Enable */
-#define IXGBE_WUFC_FLX3	0x00080000 /* Flexible Filter 3 Enable */
-#define IXGBE_WUFC_FLX4	0x00100000 /* Flexible Filter 4 Enable */
-#define IXGBE_WUFC_FLX5	0x00200000 /* Flexible Filter 5 Enable */
-#define IXGBE_WUFC_FLX_FILTERS		0x000F0000 /* Mask for 4 flex filters */
-#define IXGBE_WUFC_FLX_FILTERS_6	0x003F0000 /* Mask for 6 flex filters */
-#define IXGBE_WUFC_FLX_FILTERS_8	0x00FF0000 /* Mask for 8 flex filters */
-#define IXGBE_WUFC_FW_RST_WK	0x80000000 /* Ena wake on FW reset assertion */
-/* Mask for Ext. flex filters */
-#define IXGBE_WUFC_EXT_FLX_FILTERS	0x00300000
-#define IXGBE_WUFC_ALL_FILTERS		0x000F00FF /* Mask all 4 flex filters */
-#define IXGBE_WUFC_ALL_FILTERS_6	0x003F00FF /* Mask all 6 flex filters */
-#define IXGBE_WUFC_ALL_FILTERS_8	0x00FF00FF /* Mask all 8 flex filters */
-#define IXGBE_WUFC_FLX_OFFSET	16 /* Offset to the Flexible Filters bits */
-
-/* Wake Up Status */
-#define IXGBE_WUS_LNKC		IXGBE_WUFC_LNKC
-#define IXGBE_WUS_MAG		IXGBE_WUFC_MAG
-#define IXGBE_WUS_EX		IXGBE_WUFC_EX
-#define IXGBE_WUS_MC		IXGBE_WUFC_MC
-#define IXGBE_WUS_BC		IXGBE_WUFC_BC
-#define IXGBE_WUS_ARP		IXGBE_WUFC_ARP
-#define IXGBE_WUS_IPV4		IXGBE_WUFC_IPV4
-#define IXGBE_WUS_IPV6		IXGBE_WUFC_IPV6
-#define IXGBE_WUS_MNG		IXGBE_WUFC_MNG
-#define IXGBE_WUS_FLX0		IXGBE_WUFC_FLX0
-#define IXGBE_WUS_FLX1		IXGBE_WUFC_FLX1
-#define IXGBE_WUS_FLX2		IXGBE_WUFC_FLX2
-#define IXGBE_WUS_FLX3		IXGBE_WUFC_FLX3
-#define IXGBE_WUS_FLX4		IXGBE_WUFC_FLX4
-#define IXGBE_WUS_FLX5		IXGBE_WUFC_FLX5
-#define IXGBE_WUS_FLX_FILTERS	IXGBE_WUFC_FLX_FILTERS
-#define IXGBE_WUS_FW_RST_WK	IXGBE_WUFC_FW_RST_WK
-/* Proxy Status */
-#define IXGBE_PROXYS_EX		0x00000004 /* Exact packet received */
-#define IXGBE_PROXYS_ARP_DIR	0x00000020 /* ARP w/filter match received */
-#define IXGBE_PROXYS_NS		0x00000200 /* IPV6 NS received */
-#define IXGBE_PROXYS_NS_DIR	0x00000400 /* IPV6 NS w/DA match received */
-#define IXGBE_PROXYS_ARP	0x00000800 /* ARP request packet received */
-#define IXGBE_PROXYS_MLD	0x00001000 /* IPv6 MLD packet received */
-
-/* Proxying Filter Control */
-#define IXGBE_PROXYFC_ENABLE	0x00000001 /* Port Proxying Enable */
-#define IXGBE_PROXYFC_EX	0x00000004 /* Directed Exact Proxy Enable */
-#define IXGBE_PROXYFC_ARP_DIR	0x00000020 /* Directed ARP Proxy Enable */
-#define IXGBE_PROXYFC_NS	0x00000200 /* IPv6 Neighbor Solicitation */
-#define IXGBE_PROXYFC_ARP	0x00000800 /* ARP Request Proxy Enable */
-#define IXGBE_PROXYFC_MLD	0x00000800 /* IPv6 MLD Proxy Enable */
-#define IXGBE_PROXYFC_NO_TCO	0x00008000 /* Ignore TCO packets */
-
-#define IXGBE_WUPL_LENGTH_MASK	0xFFFF
-
-/* DCB registers */
-#define IXGBE_DCB_MAX_TRAFFIC_CLASS	8
-#define IXGBE_RMCS		0x03D00
-#define IXGBE_DPMCS		0x07F40
-#define IXGBE_PDPMCS		0x0CD00
-#define IXGBE_RUPPBMR		0x050A0
-#define IXGBE_RT2CR(_i)		(0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RT2SR(_i)		(0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_TDTQ2TCCR(_i)	(0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */
-#define IXGBE_TDTQ2TCSR(_i)	(0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */
-#define IXGBE_TDPT2TCCR(_i)	(0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_TDPT2TCSR(_i)	(0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
-
-/* Power Management */
-/* DMA Coalescing configuration */
-struct ixgbe_dmac_config {
-	u16	watchdog_timer; /* usec units */
-	bool	fcoe_en;
-	u32	link_speed;
-	u8	fcoe_tc;
-	u8	num_tcs;
-};
-
-/*
- * DMA Coalescing threshold Rx PB TC[n] value in Kilobyte by link speed.
- * DMACRXT = 10Gbps = 10,000 bits / usec = 1250 bytes / usec 70 * 1250 ==
- * 87500 bytes [85KB]
- */
-#define IXGBE_DMACRXT_10G		0x55
-#define IXGBE_DMACRXT_1G		0x09
-#define IXGBE_DMACRXT_100M		0x01
-
-/* DMA Coalescing registers */
-#define IXGBE_DMCMNGTH			0x15F20 /* Management Threshold */
-#define IXGBE_DMACR			0x02400 /* Control register */
-#define IXGBE_DMCTH(_i)			(0x03300 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_DMCTLX			0x02404 /* Time to Lx request */
-/* DMA Coalescing register fields */
-#define IXGBE_DMCMNGTH_DMCMNGTH_MASK	0x000FFFF0 /* Mng Threshold mask */
-#define IXGBE_DMCMNGTH_DMCMNGTH_SHIFT	4 /* Management Threshold shift */
-#define IXGBE_DMACR_DMACWT_MASK		0x0000FFFF /* Watchdog Timer mask */
-#define IXGBE_DMACR_HIGH_PRI_TC_MASK	0x00FF0000
-#define IXGBE_DMACR_HIGH_PRI_TC_SHIFT	16
-#define IXGBE_DMACR_EN_MNG_IND		0x10000000 /* Enable Mng Indications */
-#define IXGBE_DMACR_LX_COAL_IND		0x40000000 /* Lx Coalescing indicate */
-#define IXGBE_DMACR_DMAC_EN		0x80000000 /* DMA Coalescing Enable */
-#define IXGBE_DMCTH_DMACRXT_MASK	0x000001FF /* Receive Threshold mask */
-#define IXGBE_DMCTLX_TTLX_MASK		0x00000FFF /* Time to Lx request mask */
-
-/* EEE registers */
-#define IXGBE_EEER			0x043A0 /* EEE register */
-#define IXGBE_EEE_STAT			0x04398 /* EEE Status */
-#define IXGBE_EEE_SU			0x04380 /* EEE Set up */
-#define IXGBE_TLPIC			0x041F4 /* EEE Tx LPI count */
-#define IXGBE_RLPIC			0x041F8 /* EEE Rx LPI count */
-
-/* EEE register fields */
-#define IXGBE_EEER_TX_LPI_EN		0x00010000 /* Enable EEE LPI TX path */
-#define IXGBE_EEER_RX_LPI_EN		0x00020000 /* Enable EEE LPI RX path */
-#define IXGBE_EEE_STAT_NEG		0x20000000 /* EEE support neg on link */
-#define IXGBE_EEE_RX_LPI_STATUS		0x40000000 /* RX Link in LPI status */
-#define IXGBE_EEE_TX_LPI_STATUS		0x80000000 /* TX Link in LPI status */
-
-
-
-/* Security Control Registers */
-#define IXGBE_SECTXCTRL		0x08800
-#define IXGBE_SECTXSTAT		0x08804
-#define IXGBE_SECTXBUFFAF	0x08808
-#define IXGBE_SECTXMINIFG	0x08810
-#define IXGBE_SECRXCTRL		0x08D00
-#define IXGBE_SECRXSTAT		0x08D04
-
-/* Security Bit Fields and Masks */
-#define IXGBE_SECTXCTRL_SECTX_DIS	0x00000001
-#define IXGBE_SECTXCTRL_TX_DIS		0x00000002
-#define IXGBE_SECTXCTRL_STORE_FORWARD	0x00000004
-
-#define IXGBE_SECTXSTAT_SECTX_RDY	0x00000001
-#define IXGBE_SECTXSTAT_ECC_TXERR	0x00000002
-
-#define IXGBE_SECRXCTRL_SECRX_DIS	0x00000001
-#define IXGBE_SECRXCTRL_RX_DIS		0x00000002
-
-#define IXGBE_SECRXSTAT_SECRX_RDY	0x00000001
-#define IXGBE_SECRXSTAT_ECC_RXERR	0x00000002
-
-/* LinkSec (MacSec) Registers */
-#define IXGBE_LSECTXCAP		0x08A00
-#define IXGBE_LSECRXCAP		0x08F00
-#define IXGBE_LSECTXCTRL	0x08A04
-#define IXGBE_LSECTXSCL		0x08A08 /* SCI Low */
-#define IXGBE_LSECTXSCH		0x08A0C /* SCI High */
-#define IXGBE_LSECTXSA		0x08A10
-#define IXGBE_LSECTXPN0		0x08A14
-#define IXGBE_LSECTXPN1		0x08A18
-#define IXGBE_LSECTXKEY0(_n)	(0x08A1C + (4 * (_n))) /* 4 of these (0-3) */
-#define IXGBE_LSECTXKEY1(_n)	(0x08A2C + (4 * (_n))) /* 4 of these (0-3) */
-#define IXGBE_LSECRXCTRL	0x08F04
-#define IXGBE_LSECRXSCL		0x08F08
-#define IXGBE_LSECRXSCH		0x08F0C
-#define IXGBE_LSECRXSA(_i)	(0x08F10 + (4 * (_i))) /* 2 of these (0-1) */
-#define IXGBE_LSECRXPN(_i)	(0x08F18 + (4 * (_i))) /* 2 of these (0-1) */
-#define IXGBE_LSECRXKEY(_n, _m)	(0x08F20 + ((0x10 * (_n)) + (4 * (_m))))
-#define IXGBE_LSECTXUT		0x08A3C /* OutPktsUntagged */
-#define IXGBE_LSECTXPKTE	0x08A40 /* OutPktsEncrypted */
-#define IXGBE_LSECTXPKTP	0x08A44 /* OutPktsProtected */
-#define IXGBE_LSECTXOCTE	0x08A48 /* OutOctetsEncrypted */
-#define IXGBE_LSECTXOCTP	0x08A4C /* OutOctetsProtected */
-#define IXGBE_LSECRXUT		0x08F40 /* InPktsUntagged/InPktsNoTag */
-#define IXGBE_LSECRXOCTD	0x08F44 /* InOctetsDecrypted */
-#define IXGBE_LSECRXOCTV	0x08F48 /* InOctetsValidated */
-#define IXGBE_LSECRXBAD		0x08F4C /* InPktsBadTag */
-#define IXGBE_LSECRXNOSCI	0x08F50 /* InPktsNoSci */
-#define IXGBE_LSECRXUNSCI	0x08F54 /* InPktsUnknownSci */
-#define IXGBE_LSECRXUNCH	0x08F58 /* InPktsUnchecked */
-#define IXGBE_LSECRXDELAY	0x08F5C /* InPktsDelayed */
-#define IXGBE_LSECRXLATE	0x08F60 /* InPktsLate */
-#define IXGBE_LSECRXOK(_n)	(0x08F64 + (0x04 * (_n))) /* InPktsOk */
-#define IXGBE_LSECRXINV(_n)	(0x08F6C + (0x04 * (_n))) /* InPktsInvalid */
-#define IXGBE_LSECRXNV(_n)	(0x08F74 + (0x04 * (_n))) /* InPktsNotValid */
-#define IXGBE_LSECRXUNSA	0x08F7C /* InPktsUnusedSa */
-#define IXGBE_LSECRXNUSA	0x08F80 /* InPktsNotUsingSa */
-
-/* LinkSec (MacSec) Bit Fields and Masks */
-#define IXGBE_LSECTXCAP_SUM_MASK	0x00FF0000
-#define IXGBE_LSECTXCAP_SUM_SHIFT	16
-#define IXGBE_LSECRXCAP_SUM_MASK	0x00FF0000
-#define IXGBE_LSECRXCAP_SUM_SHIFT	16
-
-#define IXGBE_LSECTXCTRL_EN_MASK	0x00000003
-#define IXGBE_LSECTXCTRL_DISABLE	0x0
-#define IXGBE_LSECTXCTRL_AUTH		0x1
-#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT	0x2
-#define IXGBE_LSECTXCTRL_AISCI		0x00000020
-#define IXGBE_LSECTXCTRL_PNTHRSH_MASK	0xFFFFFF00
-#define IXGBE_LSECTXCTRL_RSV_MASK	0x000000D8
-
-#define IXGBE_LSECRXCTRL_EN_MASK	0x0000000C
-#define IXGBE_LSECRXCTRL_EN_SHIFT	2
-#define IXGBE_LSECRXCTRL_DISABLE	0x0
-#define IXGBE_LSECRXCTRL_CHECK		0x1
-#define IXGBE_LSECRXCTRL_STRICT		0x2
-#define IXGBE_LSECRXCTRL_DROP		0x3
-#define IXGBE_LSECRXCTRL_PLSH		0x00000040
-#define IXGBE_LSECRXCTRL_RP		0x00000080
-#define IXGBE_LSECRXCTRL_RSV_MASK	0xFFFFFF33
-
-/* IpSec Registers */
-#define IXGBE_IPSTXIDX		0x08900
-#define IXGBE_IPSTXSALT		0x08904
-#define IXGBE_IPSTXKEY(_i)	(0x08908 + (4 * (_i))) /* 4 of these (0-3) */
-#define IXGBE_IPSRXIDX		0x08E00
-#define IXGBE_IPSRXIPADDR(_i)	(0x08E04 + (4 * (_i))) /* 4 of these (0-3) */
-#define IXGBE_IPSRXSPI		0x08E14
-#define IXGBE_IPSRXIPIDX	0x08E18
-#define IXGBE_IPSRXKEY(_i)	(0x08E1C + (4 * (_i))) /* 4 of these (0-3) */
-#define IXGBE_IPSRXSALT		0x08E2C
-#define IXGBE_IPSRXMOD		0x08E30
-
-#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE	0x4
-
-/* DCB registers */
-#define IXGBE_RTRPCS		0x02430
-#define IXGBE_RTTDCS		0x04900
-#define IXGBE_RTTDCS_ARBDIS	0x00000040 /* DCB arbiter disable */
-#define IXGBE_RTTPCS		0x0CD00
-#define IXGBE_RTRUP2TC		0x03020
-#define IXGBE_RTTUP2TC		0x0C800
-#define IXGBE_RTRPT4C(_i)	(0x02140 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_TXLLQ(_i)		(0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */
-#define IXGBE_RTRPT4S(_i)	(0x02160 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTDT2C(_i)	(0x04910 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTDT2S(_i)	(0x04930 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTPT2C(_i)	(0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTPT2S(_i)	(0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_RTTDQSEL		0x04904
-#define IXGBE_RTTDT1C		0x04908
-#define IXGBE_RTTDT1S		0x0490C
-#define IXGBE_RTTDTECC		0x04990
-#define IXGBE_RTTDTECC_NO_BCN	0x00000100
-
-#define IXGBE_RTTBCNRC			0x04984
-#define IXGBE_RTTBCNRC_RS_ENA		0x80000000
-#define IXGBE_RTTBCNRC_RF_DEC_MASK	0x00003FFF
-#define IXGBE_RTTBCNRC_RF_INT_SHIFT	14
-#define IXGBE_RTTBCNRC_RF_INT_MASK \
-	(IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
-#define IXGBE_RTTBCNRM	0x04980
-
-/* BCN (for DCB) Registers */
-#define IXGBE_RTTBCNRS	0x04988
-#define IXGBE_RTTBCNCR	0x08B00
-#define IXGBE_RTTBCNACH	0x08B04
-#define IXGBE_RTTBCNACL	0x08B08
-#define IXGBE_RTTBCNTG	0x04A90
-#define IXGBE_RTTBCNIDX	0x08B0C
-#define IXGBE_RTTBCNCP	0x08B10
-#define IXGBE_RTFRTIMER	0x08B14
-#define IXGBE_RTTBCNRTT	0x05150
-#define IXGBE_RTTBCNRD	0x0498C
-
-
-/* FCoE DMA Context Registers */
-/* FCoE Direct DMA Context */
-#define IXGBE_FCDDC(_i, _j)	(0x20000 + ((_i) * 0x4) + ((_j) * 0x10))
-#define IXGBE_FCPTRL		0x02410 /* FC User Desc. PTR Low */
-#define IXGBE_FCPTRH		0x02414 /* FC USer Desc. PTR High */
-#define IXGBE_FCBUFF		0x02418 /* FC Buffer Control */
-#define IXGBE_FCDMARW		0x02420 /* FC Receive DMA RW */
-#define IXGBE_FCBUFF_VALID	(1 << 0)   /* DMA Context Valid */
-#define IXGBE_FCBUFF_BUFFSIZE	(3 << 3)   /* User Buffer Size */
-#define IXGBE_FCBUFF_WRCONTX	(1 << 7)   /* 0: Initiator, 1: Target */
-#define IXGBE_FCBUFF_BUFFCNT	0x0000ff00 /* Number of User Buffers */
-#define IXGBE_FCBUFF_OFFSET	0xffff0000 /* User Buffer Offset */
-#define IXGBE_FCBUFF_BUFFSIZE_SHIFT	3
-#define IXGBE_FCBUFF_BUFFCNT_SHIFT	8
-#define IXGBE_FCBUFF_OFFSET_SHIFT	16
-#define IXGBE_FCDMARW_WE		(1 << 14)   /* Write enable */
-#define IXGBE_FCDMARW_RE		(1 << 15)   /* Read enable */
-#define IXGBE_FCDMARW_FCOESEL		0x000001ff  /* FC X_ID: 11 bits */
-#define IXGBE_FCDMARW_LASTSIZE		0xffff0000  /* Last User Buffer Size */
-#define IXGBE_FCDMARW_LASTSIZE_SHIFT	16
-/* FCoE SOF/EOF */
-#define IXGBE_TEOFF		0x04A94 /* Tx FC EOF */
-#define IXGBE_TSOFF		0x04A98 /* Tx FC SOF */
-#define IXGBE_REOFF		0x05158 /* Rx FC EOF */
-#define IXGBE_RSOFF		0x051F8 /* Rx FC SOF */
-/* FCoE Filter Context Registers */
-#define IXGBE_FCD_ID		0x05114 /* FCoE D_ID */
-#define IXGBE_FCSMAC		0x0510C /* FCoE Source MAC */
-#define IXGBE_FCFLTRW_SMAC_HIGH_SHIFT	16
-/* FCoE Direct Filter Context */
-#define IXGBE_FCDFC(_i, _j)	(0x28000 + ((_i) * 0x4) + ((_j) * 0x10))
-#define IXGBE_FCDFCD(_i)	(0x30000 + ((_i) * 0x4))
-#define IXGBE_FCFLT		0x05108 /* FC FLT Context */
-#define IXGBE_FCFLTRW		0x05110 /* FC Filter RW Control */
-#define IXGBE_FCPARAM		0x051d8 /* FC Offset Parameter */
-#define IXGBE_FCFLT_VALID	(1 << 0)   /* Filter Context Valid */
-#define IXGBE_FCFLT_FIRST	(1 << 1)   /* Filter First */
-#define IXGBE_FCFLT_SEQID	0x00ff0000 /* Sequence ID */
-#define IXGBE_FCFLT_SEQCNT	0xff000000 /* Sequence Count */
-#define IXGBE_FCFLTRW_RVALDT	(1 << 13)  /* Fast Re-Validation */
-#define IXGBE_FCFLTRW_WE	(1 << 14)  /* Write Enable */
-#define IXGBE_FCFLTRW_RE	(1 << 15)  /* Read Enable */
-/* FCoE Receive Control */
-#define IXGBE_FCRXCTRL		0x05100 /* FC Receive Control */
-#define IXGBE_FCRXCTRL_FCOELLI	(1 << 0)   /* Low latency interrupt */
-#define IXGBE_FCRXCTRL_SAVBAD	(1 << 1)   /* Save Bad Frames */
-#define IXGBE_FCRXCTRL_FRSTRDH	(1 << 2)   /* EN 1st Read Header */
-#define IXGBE_FCRXCTRL_LASTSEQH	(1 << 3)   /* EN Last Header in Seq */
-#define IXGBE_FCRXCTRL_ALLH	(1 << 4)   /* EN All Headers */
-#define IXGBE_FCRXCTRL_FRSTSEQH	(1 << 5)   /* EN 1st Seq. Header */
-#define IXGBE_FCRXCTRL_ICRC	(1 << 6)   /* Ignore Bad FC CRC */
-#define IXGBE_FCRXCTRL_FCCRCBO	(1 << 7)   /* FC CRC Byte Ordering */
-#define IXGBE_FCRXCTRL_FCOEVER	0x00000f00 /* FCoE Version: 4 bits */
-#define IXGBE_FCRXCTRL_FCOEVER_SHIFT	8
-/* FCoE Redirection */
-#define IXGBE_FCRECTL		0x0ED00 /* FC Redirection Control */
-#define IXGBE_FCRETA0		0x0ED10 /* FC Redirection Table 0 */
-#define IXGBE_FCRETA(_i)	(IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */
-#define IXGBE_FCRECTL_ENA	0x1 /* FCoE Redir Table Enable */
-#define IXGBE_FCRETASEL_ENA	0x2 /* FCoE FCRETASEL bit */
-#define IXGBE_FCRETA_SIZE	8 /* Max entries in FCRETA */
-#define IXGBE_FCRETA_ENTRY_MASK	0x0000007f /* 7 bits for the queue index */
-#define IXGBE_FCRETA_SIZE_X550	32 /* Max entries in FCRETA */
-/* Higher 7 bits for the queue index */
-#define IXGBE_FCRETA_ENTRY_HIGH_MASK	0x007F0000
-#define IXGBE_FCRETA_ENTRY_HIGH_SHIFT	16
-
-/* Stats registers */
-#define IXGBE_CRCERRS	0x04000
-#define IXGBE_ILLERRC	0x04004
-#define IXGBE_ERRBC	0x04008
-#define IXGBE_MSPDC	0x04010
-#define IXGBE_MPC(_i)	(0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/
-#define IXGBE_MLFC	0x04034
-#define IXGBE_MRFC	0x04038
-#define IXGBE_RLEC	0x04040
-#define IXGBE_LXONTXC	0x03F60
-#define IXGBE_LXONRXC	0x0CF60
-#define IXGBE_LXOFFTXC	0x03F68
-#define IXGBE_LXOFFRXC	0x0CF68
-#define IXGBE_LXONRXCNT		0x041A4
-#define IXGBE_LXOFFRXCNT	0x041A8
-#define IXGBE_PXONRXCNT(_i)	(0x04140 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_PXOFFRXCNT(_i)	(0x04160 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_PXON2OFFCNT(_i)	(0x03240 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_PXONTXC(_i)	(0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/
-#define IXGBE_PXONRXC(_i)	(0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/
-#define IXGBE_PXOFFTXC(_i)	(0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/
-#define IXGBE_PXOFFRXC(_i)	(0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/
-#define IXGBE_PRC64		0x0405C
-#define IXGBE_PRC127		0x04060
-#define IXGBE_PRC255		0x04064
-#define IXGBE_PRC511		0x04068
-#define IXGBE_PRC1023		0x0406C
-#define IXGBE_PRC1522		0x04070
-#define IXGBE_GPRC		0x04074
-#define IXGBE_BPRC		0x04078
-#define IXGBE_MPRC		0x0407C
-#define IXGBE_GPTC		0x04080
-#define IXGBE_GORCL		0x04088
-#define IXGBE_GORCH		0x0408C
-#define IXGBE_GOTCL		0x04090
-#define IXGBE_GOTCH		0x04094
-#define IXGBE_RNBC(_i)		(0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/
-#define IXGBE_RUC		0x040A4
-#define IXGBE_RFC		0x040A8
-#define IXGBE_ROC		0x040AC
-#define IXGBE_RJC		0x040B0
-#define IXGBE_MNGPRC		0x040B4
-#define IXGBE_MNGPDC		0x040B8
-#define IXGBE_MNGPTC		0x0CF90
-#define IXGBE_TORL		0x040C0
-#define IXGBE_TORH		0x040C4
-#define IXGBE_TPR		0x040D0
-#define IXGBE_TPT		0x040D4
-#define IXGBE_PTC64		0x040D8
-#define IXGBE_PTC127		0x040DC
-#define IXGBE_PTC255		0x040E0
-#define IXGBE_PTC511		0x040E4
-#define IXGBE_PTC1023		0x040E8
-#define IXGBE_PTC1522		0x040EC
-#define IXGBE_MPTC		0x040F0
-#define IXGBE_BPTC		0x040F4
-#define IXGBE_XEC		0x04120
-#define IXGBE_SSVPC		0x08780
-
-#define IXGBE_RQSMR(_i)	(0x02300 + ((_i) * 4))
-#define IXGBE_TQSMR(_i)	(((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
-			 (0x08600 + ((_i) * 4)))
-#define IXGBE_TQSM(_i)	(0x08600 + ((_i) * 4))
-
-#define IXGBE_QPRC(_i)	(0x01030 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QPTC(_i)	(0x06030 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QBRC(_i)	(0x01034 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QBTC(_i)	(0x06034 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QBRC_L(_i)	(0x01034 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QBRC_H(_i)	(0x01038 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QPRDC(_i)		(0x01430 + ((_i) * 0x40)) /* 16 of these */
-#define IXGBE_QBTC_L(_i)	(0x08700 + ((_i) * 0x8)) /* 16 of these */
-#define IXGBE_QBTC_H(_i)	(0x08704 + ((_i) * 0x8)) /* 16 of these */
-#define IXGBE_FCCRC		0x05118 /* Num of Good Eth CRC w/ Bad FC CRC */
-#define IXGBE_FCOERPDC		0x0241C /* FCoE Rx Packets Dropped Count */
-#define IXGBE_FCLAST		0x02424 /* FCoE Last Error Count */
-#define IXGBE_FCOEPRC		0x02428 /* Number of FCoE Packets Received */
-#define IXGBE_FCOEDWRC		0x0242C /* Number of FCoE DWords Received */
-#define IXGBE_FCOEPTC		0x08784 /* Number of FCoE Packets Transmitted */
-#define IXGBE_FCOEDWTC		0x08788 /* Number of FCoE DWords Transmitted */
-#define IXGBE_FCCRC_CNT_MASK	0x0000FFFF /* CRC_CNT: bit 0 - 15 */
-#define IXGBE_FCLAST_CNT_MASK	0x0000FFFF /* Last_CNT: bit 0 - 15 */
-#define IXGBE_O2BGPTC		0x041C4
-#define IXGBE_O2BSPC		0x087B0
-#define IXGBE_B2OSPC		0x041C0
-#define IXGBE_B2OGPRC		0x02F90
-#define IXGBE_BUPRC		0x04180
-#define IXGBE_BMPRC		0x04184
-#define IXGBE_BBPRC		0x04188
-#define IXGBE_BUPTC		0x0418C
-#define IXGBE_BMPTC		0x04190
-#define IXGBE_BBPTC		0x04194
-#define IXGBE_BCRCERRS		0x04198
-#define IXGBE_BXONRXC		0x0419C
-#define IXGBE_BXOFFRXC		0x041E0
-#define IXGBE_BXONTXC		0x041E4
-#define IXGBE_BXOFFTXC		0x041E8
-
-/* Management */
-#define IXGBE_MAVTV(_i)		(0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_MFUTP(_i)		(0x05030 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_MANC		0x05820
-#define IXGBE_MFVAL		0x05824
-#define IXGBE_MANC2H		0x05860
-#define IXGBE_MDEF(_i)		(0x05890 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_MIPAF		0x058B0
-#define IXGBE_MMAL(_i)		(0x05910 + ((_i) * 8)) /* 4 of these (0-3) */
-#define IXGBE_MMAH(_i)		(0x05914 + ((_i) * 8)) /* 4 of these (0-3) */
-#define IXGBE_FTFT		0x09400 /* 0x9400-0x97FC */
-#define IXGBE_METF(_i)		(0x05190 + ((_i) * 4)) /* 4 of these (0-3) */
-#define IXGBE_MDEF_EXT(_i)	(0x05160 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_LSWFW		0x15014
-#define IXGBE_BMCIP(_i)		(0x05050 + ((_i) * 4)) /* 0x5050-0x505C */
-#define IXGBE_BMCIPVAL		0x05060
-#define IXGBE_BMCIP_IPADDR_TYPE	0x00000001
-#define IXGBE_BMCIP_IPADDR_VALID	0x00000002
-
-/* Management Bit Fields and Masks */
-#define IXGBE_MANC_MPROXYE	0x40000000 /* Management Proxy Enable */
-#define IXGBE_MANC_RCV_TCO_EN	0x00020000 /* Rcv TCO packet enable */
-#define IXGBE_MANC_EN_BMC2OS	0x10000000 /* Ena BMC2OS and OS2BMC traffic */
-#define IXGBE_MANC_EN_BMC2OS_SHIFT	28
-
-/* Firmware Semaphore Register */
-#define IXGBE_FWSM_MODE_MASK	0xE
-#define IXGBE_FWSM_TS_ENABLED	0x1
-#define IXGBE_FWSM_FW_MODE_PT	0x4
-
-/* ARC Subsystem registers */
-#define IXGBE_HICR		0x15F00
-#define IXGBE_FWSTS		0x15F0C
-#define IXGBE_HSMC0R		0x15F04
-#define IXGBE_HSMC1R		0x15F08
-#define IXGBE_SWSR		0x15F10
-#define IXGBE_HFDR		0x15FE8
-#define IXGBE_FLEX_MNG		0x15800 /* 0x15800 - 0x15EFC */
-
-#define IXGBE_HICR_EN		0x01  /* Enable bit - RO */
-/* Driver sets this bit when done to put command in RAM */
-#define IXGBE_HICR_C		0x02
-#define IXGBE_HICR_SV		0x04  /* Status Validity */
-#define IXGBE_HICR_FW_RESET_ENABLE	0x40
-#define IXGBE_HICR_FW_RESET	0x80
-
-/* PCI-E registers */
-#define IXGBE_GCR		0x11000
-#define IXGBE_GTV		0x11004
-#define IXGBE_FUNCTAG		0x11008
-#define IXGBE_GLT		0x1100C
-#define IXGBE_PCIEPIPEADR	0x11004
-#define IXGBE_PCIEPIPEDAT	0x11008
-#define IXGBE_GSCL_1		0x11010
-#define IXGBE_GSCL_2		0x11014
-#define IXGBE_GSCL_3		0x11018
-#define IXGBE_GSCL_4		0x1101C
-#define IXGBE_GSCN_0		0x11020
-#define IXGBE_GSCN_1		0x11024
-#define IXGBE_GSCN_2		0x11028
-#define IXGBE_GSCN_3		0x1102C
-#define IXGBE_FACTPS		0x10150
-#define IXGBE_PCIEANACTL	0x11040
-#define IXGBE_SWSM		0x10140
-#define IXGBE_FWSM		0x10148
-#define IXGBE_GSSR		0x10160
-#define IXGBE_MREVID		0x11064
-#define IXGBE_DCA_ID		0x11070
-#define IXGBE_DCA_CTRL		0x11074
-#define IXGBE_SWFW_SYNC		IXGBE_GSSR
-
-/* PCI-E registers 82599-Specific */
-#define IXGBE_GCR_EXT		0x11050
-#define IXGBE_GSCL_5_82599	0x11030
-#define IXGBE_GSCL_6_82599	0x11034
-#define IXGBE_GSCL_7_82599	0x11038
-#define IXGBE_GSCL_8_82599	0x1103C
-#define IXGBE_PHYADR_82599	0x11040
-#define IXGBE_PHYDAT_82599	0x11044
-#define IXGBE_PHYCTL_82599	0x11048
-#define IXGBE_PBACLR_82599	0x11068
-#define IXGBE_CIAA_82599	0x11088
-#define IXGBE_CIAD_82599	0x1108C
-#define IXGBE_CIAA_X550		0x11508
-#define IXGBE_CIAD_X550		0x11510
-#define IXGBE_CIAA_BY_MAC(_hw)	((((_hw)->mac.type >= ixgbe_mac_X550) ? \
-				 IXGBE_CIAA_X550 : IXGBE_CIAA_82599))
-#define IXGBE_CIAD_BY_MAC(_hw)	((((_hw)->mac.type >= ixgbe_mac_X550) ? \
-				 IXGBE_CIAD_X550 : IXGBE_CIAD_82599))
-#define IXGBE_PICAUSE		0x110B0
-#define IXGBE_PIENA		0x110B8
-#define IXGBE_CDQ_MBR_82599	0x110B4
-#define IXGBE_PCIESPARE		0x110BC
-#define IXGBE_MISC_REG_82599	0x110F0
-#define IXGBE_ECC_CTRL_0_82599	0x11100
-#define IXGBE_ECC_CTRL_1_82599	0x11104
-#define IXGBE_ECC_STATUS_82599	0x110E0
-#define IXGBE_BAR_CTRL_82599	0x110F4
-
-/* PCI Express Control */
-#define IXGBE_GCR_CMPL_TMOUT_MASK	0x0000F000
-#define IXGBE_GCR_CMPL_TMOUT_10ms	0x00001000
-#define IXGBE_GCR_CMPL_TMOUT_RESEND	0x00010000
-#define IXGBE_GCR_CAP_VER2		0x00040000
-
-#define IXGBE_GCR_EXT_MSIX_EN		0x80000000
-#define IXGBE_GCR_EXT_BUFFERS_CLEAR	0x40000000
-#define IXGBE_GCR_EXT_VT_MODE_16	0x00000001
-#define IXGBE_GCR_EXT_VT_MODE_32	0x00000002
-#define IXGBE_GCR_EXT_VT_MODE_64	0x00000003
-#define IXGBE_GCR_EXT_SRIOV		(IXGBE_GCR_EXT_MSIX_EN | \
-					 IXGBE_GCR_EXT_VT_MODE_64)
-#define IXGBE_GCR_EXT_VT_MODE_MASK	0x00000003
-/* Time Sync Registers */
-#define IXGBE_TSYNCRXCTL	0x05188 /* Rx Time Sync Control register - RW */
-#define IXGBE_TSYNCTXCTL	0x08C00 /* Tx Time Sync Control register - RW */
-#define IXGBE_RXSTMPL	0x051E8 /* Rx timestamp Low - RO */
-#define IXGBE_RXSTMPH	0x051A4 /* Rx timestamp High - RO */
-#define IXGBE_RXSATRL	0x051A0 /* Rx timestamp attribute low - RO */
-#define IXGBE_RXSATRH	0x051A8 /* Rx timestamp attribute high - RO */
-#define IXGBE_RXMTRL	0x05120 /* RX message type register low - RW */
-#define IXGBE_TXSTMPL	0x08C04 /* Tx timestamp value Low - RO */
-#define IXGBE_TXSTMPH	0x08C08 /* Tx timestamp value High - RO */
-#define IXGBE_SYSTIML	0x08C0C /* System time register Low - RO */
-#define IXGBE_SYSTIMH	0x08C10 /* System time register High - RO */
-#define IXGBE_SYSTIMR	0x08C58 /* System time register Residue - RO */
-#define IXGBE_TIMINCA	0x08C14 /* Increment attributes register - RW */
-#define IXGBE_TIMADJL	0x08C18 /* Time Adjustment Offset register Low - RW */
-#define IXGBE_TIMADJH	0x08C1C /* Time Adjustment Offset register High - RW */
-#define IXGBE_TSAUXC	0x08C20 /* TimeSync Auxiliary Control register - RW */
-#define IXGBE_TRGTTIML0	0x08C24 /* Target Time Register 0 Low - RW */
-#define IXGBE_TRGTTIMH0	0x08C28 /* Target Time Register 0 High - RW */
-#define IXGBE_TRGTTIML1	0x08C2C /* Target Time Register 1 Low - RW */
-#define IXGBE_TRGTTIMH1	0x08C30 /* Target Time Register 1 High - RW */
-#define IXGBE_CLKTIML	0x08C34 /* Clock Out Time Register Low - RW */
-#define IXGBE_CLKTIMH	0x08C38 /* Clock Out Time Register High - RW */
-#define IXGBE_FREQOUT0	0x08C34 /* Frequency Out 0 Control register - RW */
-#define IXGBE_FREQOUT1	0x08C38 /* Frequency Out 1 Control register - RW */
-#define IXGBE_AUXSTMPL0	0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */
-#define IXGBE_AUXSTMPH0	0x08C40 /* Auxiliary Time Stamp 0 register High - RO */
-#define IXGBE_AUXSTMPL1	0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */
-#define IXGBE_AUXSTMPH1	0x08C48 /* Auxiliary Time Stamp 1 register High - RO */
-#define IXGBE_TSIM	0x08C68 /* TimeSync Interrupt Mask Register - RW */
-#define IXGBE_TSICR	0x08C60 /* TimeSync Interrupt Cause Register - WO */
-#define IXGBE_TSSDP	0x0003C /* TimeSync SDP Configuration Register - RW */
-
-/* Diagnostic Registers */
-#define IXGBE_RDSTATCTL		0x02C20
-#define IXGBE_RDSTAT(_i)	(0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
-#define IXGBE_RDHMPN		0x02F08
-#define IXGBE_RIC_DW(_i)	(0x02F10 + ((_i) * 4))
-#define IXGBE_RDPROBE		0x02F20
-#define IXGBE_RDMAM		0x02F30
-#define IXGBE_RDMAD		0x02F34
-#define IXGBE_TDHMPN		0x07F08
-#define IXGBE_TDHMPN2		0x082FC
-#define IXGBE_TXDESCIC		0x082CC
-#define IXGBE_TIC_DW(_i)	(0x07F10 + ((_i) * 4))
-#define IXGBE_TIC_DW2(_i)	(0x082B0 + ((_i) * 4))
-#define IXGBE_TDPROBE		0x07F20
-#define IXGBE_TXBUFCTRL		0x0C600
-#define IXGBE_TXBUFDATA0	0x0C610
-#define IXGBE_TXBUFDATA1	0x0C614
-#define IXGBE_TXBUFDATA2	0x0C618
-#define IXGBE_TXBUFDATA3	0x0C61C
-#define IXGBE_RXBUFCTRL		0x03600
-#define IXGBE_RXBUFDATA0	0x03610
-#define IXGBE_RXBUFDATA1	0x03614
-#define IXGBE_RXBUFDATA2	0x03618
-#define IXGBE_RXBUFDATA3	0x0361C
-#define IXGBE_PCIE_DIAG(_i)	(0x11090 + ((_i) * 4)) /* 8 of these */
-#define IXGBE_RFVAL		0x050A4
-#define IXGBE_MDFTC1		0x042B8
-#define IXGBE_MDFTC2		0x042C0
-#define IXGBE_MDFTFIFO1		0x042C4
-#define IXGBE_MDFTFIFO2		0x042C8
-#define IXGBE_MDFTS		0x042CC
-#define IXGBE_RXDATAWRPTR(_i)	(0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/
-#define IXGBE_RXDESCWRPTR(_i)	(0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/
-#define IXGBE_RXDATARDPTR(_i)	(0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/
-#define IXGBE_RXDESCRDPTR(_i)	(0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/
-#define IXGBE_TXDATAWRPTR(_i)	(0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/
-#define IXGBE_TXDESCWRPTR(_i)	(0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/
-#define IXGBE_TXDATARDPTR(_i)	(0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/
-#define IXGBE_TXDESCRDPTR(_i)	(0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/
-#define IXGBE_PCIEECCCTL	0x1106C
-#define IXGBE_RXWRPTR(_i)	(0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/
-#define IXGBE_RXUSED(_i)	(0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/
-#define IXGBE_RXRDPTR(_i)	(0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/
-#define IXGBE_RXRDWRPTR(_i)	(0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/
-#define IXGBE_TXWRPTR(_i)	(0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/
-#define IXGBE_TXUSED(_i)	(0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/
-#define IXGBE_TXRDPTR(_i)	(0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/
-#define IXGBE_TXRDWRPTR(_i)	(0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/
-#define IXGBE_PCIEECCCTL0	0x11100
-#define IXGBE_PCIEECCCTL1	0x11104
-#define IXGBE_RXDBUECC		0x03F70
-#define IXGBE_TXDBUECC		0x0CF70
-#define IXGBE_RXDBUEST		0x03F74
-#define IXGBE_TXDBUEST		0x0CF74
-#define IXGBE_PBTXECC		0x0C300
-#define IXGBE_PBRXECC		0x03300
-#define IXGBE_GHECCR		0x110B0
-
-/* MAC Registers */
-#define IXGBE_PCS1GCFIG		0x04200
-#define IXGBE_PCS1GLCTL		0x04208
-#define IXGBE_PCS1GLSTA		0x0420C
-#define IXGBE_PCS1GDBG0		0x04210
-#define IXGBE_PCS1GDBG1		0x04214
-#define IXGBE_PCS1GANA		0x04218
-#define IXGBE_PCS1GANLP		0x0421C
-#define IXGBE_PCS1GANNP		0x04220
-#define IXGBE_PCS1GANLPNP	0x04224
-#define IXGBE_HLREG0		0x04240
-#define IXGBE_HLREG1		0x04244
-#define IXGBE_PAP		0x04248
-#define IXGBE_MACA		0x0424C
-#define IXGBE_APAE		0x04250
-#define IXGBE_ARD		0x04254
-#define IXGBE_AIS		0x04258
-#define IXGBE_MSCA		0x0425C
-#define IXGBE_MSRWD		0x04260
-#define IXGBE_MLADD		0x04264
-#define IXGBE_MHADD		0x04268
-#define IXGBE_MAXFRS		0x04268
-#define IXGBE_TREG		0x0426C
-#define IXGBE_PCSS1		0x04288
-#define IXGBE_PCSS2		0x0428C
-#define IXGBE_XPCSS		0x04290
-#define IXGBE_MFLCN		0x04294
-#define IXGBE_SERDESC		0x04298
-#define IXGBE_MACS		0x0429C
-#define IXGBE_AUTOC		0x042A0
-#define IXGBE_LINKS		0x042A4
-#define IXGBE_LINKS2		0x04324
-#define IXGBE_AUTOC2		0x042A8
-#define IXGBE_AUTOC3		0x042AC
-#define IXGBE_ANLP1		0x042B0
-#define IXGBE_ANLP2		0x042B4
-#define IXGBE_MACC		0x04330
-#define IXGBE_ATLASCTL		0x04800
-#define IXGBE_MMNGC		0x042D0
-#define IXGBE_ANLPNP1		0x042D4
-#define IXGBE_ANLPNP2		0x042D8
-#define IXGBE_KRPCSFC		0x042E0
-#define IXGBE_KRPCSS		0x042E4
-#define IXGBE_FECS1		0x042E8
-#define IXGBE_FECS2		0x042EC
-#define IXGBE_SMADARCTL		0x14F10
-#define IXGBE_MPVC		0x04318
-#define IXGBE_SGMIIC		0x04314
-
-/* Statistics Registers */
-#define IXGBE_RXNFGPC		0x041B0
-#define IXGBE_RXNFGBCL		0x041B4
-#define IXGBE_RXNFGBCH		0x041B8
-#define IXGBE_RXDGPC		0x02F50
-#define IXGBE_RXDGBCL		0x02F54
-#define IXGBE_RXDGBCH		0x02F58
-#define IXGBE_RXDDGPC		0x02F5C
-#define IXGBE_RXDDGBCL		0x02F60
-#define IXGBE_RXDDGBCH		0x02F64
-#define IXGBE_RXLPBKGPC		0x02F68
-#define IXGBE_RXLPBKGBCL	0x02F6C
-#define IXGBE_RXLPBKGBCH	0x02F70
-#define IXGBE_RXDLPBKGPC	0x02F74
-#define IXGBE_RXDLPBKGBCL	0x02F78
-#define IXGBE_RXDLPBKGBCH	0x02F7C
-#define IXGBE_TXDGPC		0x087A0
-#define IXGBE_TXDGBCL		0x087A4
-#define IXGBE_TXDGBCH		0x087A8
-
-#define IXGBE_RXDSTATCTRL	0x02F40
-
-/* Copper Pond 2 link timeout */
-#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50
-
-/* Omer CORECTL */
-#define IXGBE_CORECTL			0x014F00
-/* BARCTRL */
-#define IXGBE_BARCTRL			0x110F4
-#define IXGBE_BARCTRL_FLSIZE		0x0700
-#define IXGBE_BARCTRL_FLSIZE_SHIFT	8
-#define IXGBE_BARCTRL_CSRSIZE		0x2000
-
-/* RSCCTL Bit Masks */
-#define IXGBE_RSCCTL_RSCEN	0x01
-#define IXGBE_RSCCTL_MAXDESC_1	0x00
-#define IXGBE_RSCCTL_MAXDESC_4	0x04
-#define IXGBE_RSCCTL_MAXDESC_8	0x08
-#define IXGBE_RSCCTL_MAXDESC_16	0x0C
-#define IXGBE_RSCCTL_TS_DIS	0x02
-
-/* RSCDBU Bit Masks */
-#define IXGBE_RSCDBU_RSCSMALDIS_MASK	0x0000007F
-#define IXGBE_RSCDBU_RSCACKDIS		0x00000080
-
-/* RDRXCTL Bit Masks */
-#define IXGBE_RDRXCTL_RDMTS_1_2		0x00000000 /* Rx Desc Min THLD Size */
-#define IXGBE_RDRXCTL_CRCSTRIP		0x00000002 /* CRC Strip */
-#define IXGBE_RDRXCTL_PSP		0x00000004 /* Pad Small Packet */
-#define IXGBE_RDRXCTL_MVMEN		0x00000020
-#define IXGBE_RDRXCTL_RSC_PUSH_DIS	0x00000020
-#define IXGBE_RDRXCTL_DMAIDONE		0x00000008 /* DMA init cycle done */
-#define IXGBE_RDRXCTL_RSC_PUSH		0x00000080
-#define IXGBE_RDRXCTL_AGGDIS		0x00010000 /* Aggregation disable */
-#define IXGBE_RDRXCTL_RSCFRSTSIZE	0x003E0000 /* RSC First packet size */
-#define IXGBE_RDRXCTL_RSCLLIDIS		0x00800000 /* Disable RSC compl on LLI*/
-#define IXGBE_RDRXCTL_RSCACKC		0x02000000 /* must set 1 when RSC ena */
-#define IXGBE_RDRXCTL_FCOE_WRFIX	0x04000000 /* must set 1 when RSC ena */
-#define IXGBE_RDRXCTL_MBINTEN		0x10000000
-#define IXGBE_RDRXCTL_MDP_EN		0x20000000
-
-/* RQTC Bit Masks and Shifts */
-#define IXGBE_RQTC_SHIFT_TC(_i)	((_i) * 4)
-#define IXGBE_RQTC_TC0_MASK	(0x7 << 0)
-#define IXGBE_RQTC_TC1_MASK	(0x7 << 4)
-#define IXGBE_RQTC_TC2_MASK	(0x7 << 8)
-#define IXGBE_RQTC_TC3_MASK	(0x7 << 12)
-#define IXGBE_RQTC_TC4_MASK	(0x7 << 16)
-#define IXGBE_RQTC_TC5_MASK	(0x7 << 20)
-#define IXGBE_RQTC_TC6_MASK	(0x7 << 24)
-#define IXGBE_RQTC_TC7_MASK	(0x7 << 28)
-
-/* PSRTYPE.RQPL Bit masks and shift */
-#define IXGBE_PSRTYPE_RQPL_MASK		0x7
-#define IXGBE_PSRTYPE_RQPL_SHIFT	29
-
-/* CTRL Bit Masks */
-#define IXGBE_CTRL_GIO_DIS	0x00000004 /* Global IO Master Disable bit */
-#define IXGBE_CTRL_LNK_RST	0x00000008 /* Link Reset. Resets everything. */
-#define IXGBE_CTRL_RST		0x04000000 /* Reset (SW) */
-#define IXGBE_CTRL_RST_MASK	(IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
-
-/* FACTPS */
-#define IXGBE_FACTPS_MNGCG	0x20000000 /* Manageblility Clock Gated */
-#define IXGBE_FACTPS_LFS	0x40000000 /* LAN Function Select */
-
-/* MHADD Bit Masks */
-#define IXGBE_MHADD_MFS_MASK	0xFFFF0000
-#define IXGBE_MHADD_MFS_SHIFT	16
-
-/* Extended Device Control */
-#define IXGBE_CTRL_EXT_PFRSTD	0x00004000 /* Physical Function Reset Done */
-#define IXGBE_CTRL_EXT_NS_DIS	0x00010000 /* No Snoop disable */
-#define IXGBE_CTRL_EXT_RO_DIS	0x00020000 /* Relaxed Ordering disable */
-#define IXGBE_CTRL_EXT_DRV_LOAD	0x10000000 /* Driver loaded bit for FW */
-
-/* Direct Cache Access (DCA) definitions */
-#define IXGBE_DCA_CTRL_DCA_ENABLE	0x00000000 /* DCA Enable */
-#define IXGBE_DCA_CTRL_DCA_DISABLE	0x00000001 /* DCA Disable */
-
-#define IXGBE_DCA_CTRL_DCA_MODE_CB1	0x00 /* DCA Mode CB1 */
-#define IXGBE_DCA_CTRL_DCA_MODE_CB2	0x02 /* DCA Mode CB2 */
-
-#define IXGBE_DCA_RXCTRL_CPUID_MASK	0x0000001F /* Rx CPUID Mask */
-#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599	0xFF000000 /* Rx CPUID Mask */
-#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599	24 /* Rx CPUID Shift */
-#define IXGBE_DCA_RXCTRL_DESC_DCA_EN	(1 << 5) /* Rx Desc enable */
-#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN	(1 << 6) /* Rx Desc header ena */
-#define IXGBE_DCA_RXCTRL_DATA_DCA_EN	(1 << 7) /* Rx Desc payload ena */
-#define IXGBE_DCA_RXCTRL_DESC_RRO_EN	(1 << 9) /* Rx rd Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DATA_WRO_EN	(1 << 13) /* Rx wr data Relax Order */
-#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN	(1 << 15) /* Rx wr header RO */
-
-#define IXGBE_DCA_TXCTRL_CPUID_MASK	0x0000001F /* Tx CPUID Mask */
-#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599	0xFF000000 /* Tx CPUID Mask */
-#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599	24 /* Tx CPUID Shift */
-#define IXGBE_DCA_TXCTRL_DESC_DCA_EN	(1 << 5) /* DCA Tx Desc enable */
-#define IXGBE_DCA_TXCTRL_DESC_RRO_EN	(1 << 9) /* Tx rd Desc Relax Order */
-#define IXGBE_DCA_TXCTRL_DESC_WRO_EN	(1 << 11) /* Tx Desc writeback RO bit */
-#define IXGBE_DCA_TXCTRL_DATA_RRO_EN	(1 << 13) /* Tx rd data Relax Order */
-#define IXGBE_DCA_MAX_QUEUES_82598	16 /* DCA regs only on 16 queues */
-
-/* MSCA Bit Masks */
-#define IXGBE_MSCA_NP_ADDR_MASK		0x0000FFFF /* MDI Addr (new prot) */
-#define IXGBE_MSCA_NP_ADDR_SHIFT	0
-#define IXGBE_MSCA_DEV_TYPE_MASK	0x001F0000 /* Dev Type (new prot) */
-#define IXGBE_MSCA_DEV_TYPE_SHIFT	16 /* Register Address (old prot */
-#define IXGBE_MSCA_PHY_ADDR_MASK	0x03E00000 /* PHY Address mask */
-#define IXGBE_MSCA_PHY_ADDR_SHIFT	21 /* PHY Address shift*/
-#define IXGBE_MSCA_OP_CODE_MASK		0x0C000000 /* OP CODE mask */
-#define IXGBE_MSCA_OP_CODE_SHIFT	26 /* OP CODE shift */
-#define IXGBE_MSCA_ADDR_CYCLE		0x00000000 /* OP CODE 00 (addr cycle) */
-#define IXGBE_MSCA_WRITE		0x04000000 /* OP CODE 01 (wr) */
-#define IXGBE_MSCA_READ			0x0C000000 /* OP CODE 11 (rd) */
-#define IXGBE_MSCA_READ_AUTOINC		0x08000000 /* OP CODE 10 (rd auto inc)*/
-#define IXGBE_MSCA_ST_CODE_MASK		0x30000000 /* ST Code mask */
-#define IXGBE_MSCA_ST_CODE_SHIFT	28 /* ST Code shift */
-#define IXGBE_MSCA_NEW_PROTOCOL		0x00000000 /* ST CODE 00 (new prot) */
-#define IXGBE_MSCA_OLD_PROTOCOL		0x10000000 /* ST CODE 01 (old prot) */
-#define IXGBE_MSCA_MDI_COMMAND		0x40000000 /* Initiate MDI command */
-#define IXGBE_MSCA_MDI_IN_PROG_EN	0x80000000 /* MDI in progress ena */
-
-/* MSRWD bit masks */
-#define IXGBE_MSRWD_WRITE_DATA_MASK	0x0000FFFF
-#define IXGBE_MSRWD_WRITE_DATA_SHIFT	0
-#define IXGBE_MSRWD_READ_DATA_MASK	0xFFFF0000
-#define IXGBE_MSRWD_READ_DATA_SHIFT	16
-
-/* Atlas registers */
-#define IXGBE_ATLAS_PDN_LPBK		0x24
-#define IXGBE_ATLAS_PDN_10G		0xB
-#define IXGBE_ATLAS_PDN_1G		0xC
-#define IXGBE_ATLAS_PDN_AN		0xD
-
-/* Atlas bit masks */
-#define IXGBE_ATLASCTL_WRITE_CMD	0x00010000
-#define IXGBE_ATLAS_PDN_TX_REG_EN	0x10
-#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL	0xF0
-#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL	0xF0
-#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL	0xF0
-
-/* Omer bit masks */
-#define IXGBE_CORECTL_WRITE_CMD		0x00010000
-
-/* Device Type definitions for new protocol MDIO commands */
-#define IXGBE_MDIO_PMA_PMD_DEV_TYPE		0x1
-#define IXGBE_MDIO_PCS_DEV_TYPE			0x3
-#define IXGBE_MDIO_PHY_XS_DEV_TYPE		0x4
-#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE		0x7
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE	0x1E   /* Device 30 */
-#define IXGBE_TWINAX_DEV			1
-
-#define IXGBE_MDIO_COMMAND_TIMEOUT	100 /* PHY Timeout for 1 GB mode */
-
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL		0x0 /* VS1 Ctrl Reg */
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS		0x1 /* VS1 Status Reg */
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS	0x0008 /* 1 = Link Up */
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS	0x0010 /* 0-10G, 1-1G */
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED		0x0018
-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED		0x0010
-
-#define IXGBE_MDIO_AUTO_NEG_CONTROL	0x0 /* AUTO_NEG Control Reg */
-#define IXGBE_MDIO_AUTO_NEG_STATUS	0x1 /* AUTO_NEG Status Reg */
-#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT	0xC800 /* AUTO_NEG Vendor Status Reg */
-#define IXGBE_MDIO_AUTO_NEG_ADVT	0x10 /* AUTO_NEG Advt Reg */
-#define IXGBE_MDIO_AUTO_NEG_LP		0x13 /* AUTO_NEG LP Status Reg */
-#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT	0x3C /* AUTO_NEG EEE Advt Reg */
-#define IXGBE_AUTO_NEG_10GBASE_EEE_ADVT	0x8  /* AUTO NEG EEE 10GBaseT Advt */
-#define IXGBE_AUTO_NEG_1000BASE_EEE_ADVT 0x4  /* AUTO NEG EEE 1000BaseT Advt */
-#define IXGBE_AUTO_NEG_100BASE_EEE_ADVT	0x2  /* AUTO NEG EEE 100BaseT Advt */
-#define IXGBE_MDIO_PHY_XS_CONTROL	0x0 /* PHY_XS Control Reg */
-#define IXGBE_MDIO_PHY_XS_RESET		0x8000 /* PHY_XS Reset */
-#define IXGBE_MDIO_PHY_ID_HIGH		0x2 /* PHY ID High Reg*/
-#define IXGBE_MDIO_PHY_ID_LOW		0x3 /* PHY ID Low Reg*/
-#define IXGBE_MDIO_PHY_SPEED_ABILITY	0x4 /* Speed Ability Reg */
-#define IXGBE_MDIO_PHY_SPEED_10G	0x0001 /* 10G capable */
-#define IXGBE_MDIO_PHY_SPEED_1G		0x0010 /* 1G capable */
-#define IXGBE_MDIO_PHY_SPEED_100M	0x0020 /* 100M capable */
-#define IXGBE_MDIO_PHY_EXT_ABILITY	0xB /* Ext Ability Reg */
-#define IXGBE_MDIO_PHY_10GBASET_ABILITY		0x0004 /* 10GBaseT capable */
-#define IXGBE_MDIO_PHY_1000BASET_ABILITY	0x0020 /* 1000BaseT capable */
-#define IXGBE_MDIO_PHY_100BASETX_ABILITY	0x0080 /* 100BaseTX capable */
-#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE	0x0800 /* Set low power mode */
-
-#define IXGBE_MDIO_TX_VENDOR_ALARMS_3		0xCC02 /* Vendor Alarms 3 Reg */
-#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK	0x3 /* PHY Reset Complete Mask */
-#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */
-#define IXGBE_MDIO_POWER_UP_STALL		0x8000 /* Power Up Stall */
-
-#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR	0x0000 /* PMA/PMD Control Reg */
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR	0xC30A /* PHY_XS SDA/SCL Addr Reg */
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA	0xC30B /* PHY_XS SDA/SCL Data Reg */
-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT	0xC30C /* PHY_XS SDA/SCL Status Reg */
-#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Transmit Dis Reg */
-#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Transmit Dis */
-
-#define IXGBE_PCRC8ECL		0x0E810 /* PCR CRC-8 Error Count Lo */
-#define IXGBE_PCRC8ECH		0x0E811 /* PCR CRC-8 Error Count Hi */
-#define IXGBE_PCRC8ECH_MASK	0x1F
-#define IXGBE_LDPCECL		0x0E820 /* PCR Uncorrected Error Count Lo */
-#define IXGBE_LDPCECH		0x0E821 /* PCR Uncorrected Error Count Hi */
-
-/* MII clause 22/28 definitions */
-#define IXGBE_MDIO_PHY_LOW_POWER_MODE	0x0800
-
-#define IXGBE_MDIO_XENPAK_LASI_STATUS		0x9005 /* XENPAK LASI Status register*/
-#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM	0x1 /* Link Status Alarm change */
-
-#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS		0x4 /* Indicates if link is up */
-
-#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK		0x7 /* Speed/Duplex Mask */
-#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF	0x0 /* 10Mb/s Half Duplex */
-#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL	0x1 /* 10Mb/s Full Duplex */
-#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF	0x2 /* 100Mb/s Half Duplex */
-#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL	0x3 /* 100Mb/s Full Duplex */
-#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF	0x4 /* 1Gb/s Half Duplex */
-#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL	0x5 /* 1Gb/s Full Duplex */
-#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF	0x6 /* 10Gb/s Half Duplex */
-#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL	0x7 /* 10Gb/s Full Duplex */
-
-#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG	0x20   /* 10G Control Reg */
-#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
-#define IXGBE_MII_AUTONEG_XNP_TX_REG		0x17   /* 1G XNP Transmit */
-#define IXGBE_MII_AUTONEG_ADVERTISE_REG		0x10   /* 100M Advertisement */
-#define IXGBE_MII_10GBASE_T_ADVERTISE		0x1000 /* full duplex, bit:12*/
-#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX	0x4000 /* full duplex, bit:14*/
-#define IXGBE_MII_1GBASE_T_ADVERTISE		0x8000 /* full duplex, bit:15*/
-#define IXGBE_MII_2_5GBASE_T_ADVERTISE		0x0400
-#define IXGBE_MII_5GBASE_T_ADVERTISE		0x0800
-#define IXGBE_MII_100BASE_T_ADVERTISE		0x0100 /* full duplex, bit:8 */
-#define IXGBE_MII_100BASE_T_ADVERTISE_HALF	0x0080 /* half duplex, bit:7 */
-#define IXGBE_MII_RESTART			0x200
-#define IXGBE_MII_AUTONEG_COMPLETE		0x20
-#define IXGBE_MII_AUTONEG_LINK_UP		0x04
-#define IXGBE_MII_AUTONEG_REG			0x0
-
-#define IXGBE_PHY_REVISION_MASK		0xFFFFFFF0
-#define IXGBE_MAX_PHY_ADDR		32
-
-/* PHY IDs*/
-#define TN1010_PHY_ID	0x00A19410
-#define TNX_FW_REV	0xB
-#define X540_PHY_ID	0x01540200
-#define X550_PHY_ID	0x01540220
-#define X557_PHY_ID	0x01540240
-#define AQ_FW_REV	0x20
-#define QT2022_PHY_ID	0x0043A400
-#define ATH_PHY_ID	0x03429050
-
-/* PHY Types */
-#define IXGBE_M88E1145_E_PHY_ID	0x01410CD0
-
-/* Special PHY Init Routine */
-#define IXGBE_PHY_INIT_OFFSET_NL	0x002B
-#define IXGBE_PHY_INIT_END_NL		0xFFFF
-#define IXGBE_CONTROL_MASK_NL		0xF000
-#define IXGBE_DATA_MASK_NL		0x0FFF
-#define IXGBE_CONTROL_SHIFT_NL		12
-#define IXGBE_DELAY_NL			0
-#define IXGBE_DATA_NL			1
-#define IXGBE_CONTROL_NL		0x000F
-#define IXGBE_CONTROL_EOL_NL		0x0FFF
-#define IXGBE_CONTROL_SOL_NL		0x0000
-
-/* General purpose Interrupt Enable */
-#define IXGBE_SDP0_GPIEN	0x00000001 /* SDP0 */
-#define IXGBE_SDP1_GPIEN	0x00000002 /* SDP1 */
-#define IXGBE_SDP2_GPIEN	0x00000004 /* SDP2 */
-#define IXGBE_SDP0_GPIEN_X540	0x00000002 /* SDP0 on X540 and X550 */
-#define IXGBE_SDP1_GPIEN_X540	0x00000004 /* SDP1 on X540 and X550 */
-#define IXGBE_SDP2_GPIEN_X540	0x00000008 /* SDP2 on X540 and X550 */
-#define IXGBE_SDP0_GPIEN_BY_MAC(_hw) ((_hw)->mac.type >= ixgbe_mac_X540 ? \
-				      IXGBE_SDP0_GPIEN_X540 : IXGBE_SDP0_GPIEN)
-#define IXGBE_SDP1_GPIEN_BY_MAC(_hw) ((_hw)->mac.type >= ixgbe_mac_X540 ? \
-				      IXGBE_SDP1_GPIEN_X540 : IXGBE_SDP1_GPIEN)
-#define IXGBE_SDP2_GPIEN_BY_MAC(_hw) ((_hw)->mac.type >= ixgbe_mac_X540 ? \
-				      IXGBE_SDP2_GPIEN_X540 : IXGBE_SDP2_GPIEN)
-#define IXGBE_GPIE_MSIX_MODE	0x00000010 /* MSI-X mode */
-#define IXGBE_GPIE_OCD		0x00000020 /* Other Clear Disable */
-#define IXGBE_GPIE_EIMEN	0x00000040 /* Immediate Interrupt Enable */
-#define IXGBE_GPIE_EIAME	0x40000000
-#define IXGBE_GPIE_PBA_SUPPORT	0x80000000
-#define IXGBE_GPIE_RSC_DELAY_SHIFT	11
-#define IXGBE_GPIE_VTMODE_MASK	0x0000C000 /* VT Mode Mask */
-#define IXGBE_GPIE_VTMODE_16	0x00004000 /* 16 VFs 8 queues per VF */
-#define IXGBE_GPIE_VTMODE_32	0x00008000 /* 32 VFs 4 queues per VF */
-#define IXGBE_GPIE_VTMODE_64	0x0000C000 /* 64 VFs 2 queues per VF */
-
-/* Packet Buffer Initialization */
-#define IXGBE_MAX_PACKET_BUFFERS	8
-
-#define IXGBE_TXPBSIZE_20KB	0x00005000 /* 20KB Packet Buffer */
-#define IXGBE_TXPBSIZE_40KB	0x0000A000 /* 40KB Packet Buffer */
-#define IXGBE_RXPBSIZE_48KB	0x0000C000 /* 48KB Packet Buffer */
-#define IXGBE_RXPBSIZE_64KB	0x00010000 /* 64KB Packet Buffer */
-#define IXGBE_RXPBSIZE_80KB	0x00014000 /* 80KB Packet Buffer */
-#define IXGBE_RXPBSIZE_128KB	0x00020000 /* 128KB Packet Buffer */
-#define IXGBE_RXPBSIZE_MAX	0x00080000 /* 512KB Packet Buffer */
-#define IXGBE_TXPBSIZE_MAX	0x00028000 /* 160KB Packet Buffer */
-
-#define IXGBE_TXPKT_SIZE_MAX	0xA /* Max Tx Packet size */
-#define IXGBE_MAX_PB		8
-
-/* Packet buffer allocation strategies */
-enum {
-	PBA_STRATEGY_EQUAL	= 0, /* Distribute PB space equally */
-#define PBA_STRATEGY_EQUAL	PBA_STRATEGY_EQUAL
-	PBA_STRATEGY_WEIGHTED	= 1, /* Weight front half of TCs */
-#define PBA_STRATEGY_WEIGHTED	PBA_STRATEGY_WEIGHTED
-};
-
-/* Transmit Flow Control status */
-#define IXGBE_TFCS_TXOFF	0x00000001
-#define IXGBE_TFCS_TXOFF0	0x00000100
-#define IXGBE_TFCS_TXOFF1	0x00000200
-#define IXGBE_TFCS_TXOFF2	0x00000400
-#define IXGBE_TFCS_TXOFF3	0x00000800
-#define IXGBE_TFCS_TXOFF4	0x00001000
-#define IXGBE_TFCS_TXOFF5	0x00002000
-#define IXGBE_TFCS_TXOFF6	0x00004000
-#define IXGBE_TFCS_TXOFF7	0x00008000
-
-/* TCP Timer */
-#define IXGBE_TCPTIMER_KS		0x00000100
-#define IXGBE_TCPTIMER_COUNT_ENABLE	0x00000200
-#define IXGBE_TCPTIMER_COUNT_FINISH	0x00000400
-#define IXGBE_TCPTIMER_LOOP		0x00000800
-#define IXGBE_TCPTIMER_DURATION_MASK	0x000000FF
-
-/* HLREG0 Bit Masks */
-#define IXGBE_HLREG0_TXCRCEN		0x00000001 /* bit  0 */
-#define IXGBE_HLREG0_RXCRCSTRP		0x00000002 /* bit  1 */
-#define IXGBE_HLREG0_JUMBOEN		0x00000004 /* bit  2 */
-#define IXGBE_HLREG0_TXPADEN		0x00000400 /* bit 10 */
-#define IXGBE_HLREG0_TXPAUSEEN		0x00001000 /* bit 12 */
-#define IXGBE_HLREG0_RXPAUSEEN		0x00004000 /* bit 14 */
-#define IXGBE_HLREG0_LPBK		0x00008000 /* bit 15 */
-#define IXGBE_HLREG0_MDCSPD		0x00010000 /* bit 16 */
-#define IXGBE_HLREG0_CONTMDC		0x00020000 /* bit 17 */
-#define IXGBE_HLREG0_CTRLFLTR		0x00040000 /* bit 18 */
-#define IXGBE_HLREG0_PREPEND		0x00F00000 /* bits 20-23 */
-#define IXGBE_HLREG0_PRIPAUSEEN		0x01000000 /* bit 24 */
-#define IXGBE_HLREG0_RXPAUSERECDA	0x06000000 /* bits 25-26 */
-#define IXGBE_HLREG0_RXLNGTHERREN	0x08000000 /* bit 27 */
-#define IXGBE_HLREG0_RXPADSTRIPEN	0x10000000 /* bit 28 */
-
-/* VMD_CTL bitmasks */
-#define IXGBE_VMD_CTL_VMDQ_EN		0x00000001
-#define IXGBE_VMD_CTL_VMDQ_FILTER	0x00000002
-
-/* VT_CTL bitmasks */
-#define IXGBE_VT_CTL_DIS_DEFPL		0x20000000 /* disable default pool */
-#define IXGBE_VT_CTL_REPLEN		0x40000000 /* replication enabled */
-#define IXGBE_VT_CTL_VT_ENABLE		0x00000001  /* Enable VT Mode */
-#define IXGBE_VT_CTL_POOL_SHIFT		7
-#define IXGBE_VT_CTL_POOL_MASK		(0x3F << IXGBE_VT_CTL_POOL_SHIFT)
-
-/* VMOLR bitmasks */
-#define IXGBE_VMOLR_AUPE	0x01000000 /* accept untagged packets */
-#define IXGBE_VMOLR_ROMPE	0x02000000 /* accept packets in MTA tbl */
-#define IXGBE_VMOLR_ROPE	0x04000000 /* accept packets in UC tbl */
-#define IXGBE_VMOLR_BAM		0x08000000 /* accept broadcast packets */
-#define IXGBE_VMOLR_MPE		0x10000000 /* multicast promiscuous */
-
-/* VFRE bitmask */
-#define IXGBE_VFRE_ENABLE_ALL	0xFFFFFFFF
-
-#define IXGBE_VF_INIT_TIMEOUT	200 /* Number of retries to clear RSTI */
-
-/* RDHMPN and TDHMPN bitmasks */
-#define IXGBE_RDHMPN_RDICADDR		0x007FF800
-#define IXGBE_RDHMPN_RDICRDREQ		0x00800000
-#define IXGBE_RDHMPN_RDICADDR_SHIFT	11
-#define IXGBE_TDHMPN_TDICADDR		0x003FF800
-#define IXGBE_TDHMPN_TDICRDREQ		0x00800000
-#define IXGBE_TDHMPN_TDICADDR_SHIFT	11
-
-#define IXGBE_RDMAM_MEM_SEL_SHIFT		13
-#define IXGBE_RDMAM_DWORD_SHIFT			9
-#define IXGBE_RDMAM_DESC_COMP_FIFO		1
-#define IXGBE_RDMAM_DFC_CMD_FIFO		2
-#define IXGBE_RDMAM_RSC_HEADER_ADDR		3
-#define IXGBE_RDMAM_TCN_STATUS_RAM		4
-#define IXGBE_RDMAM_WB_COLL_FIFO		5
-#define IXGBE_RDMAM_QSC_CNT_RAM			6
-#define IXGBE_RDMAM_QSC_FCOE_RAM		7
-#define IXGBE_RDMAM_QSC_QUEUE_CNT		8
-#define IXGBE_RDMAM_QSC_QUEUE_RAM		0xA
-#define IXGBE_RDMAM_QSC_RSC_RAM			0xB
-#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE		135
-#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT		4
-#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE		48
-#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT		7
-#define IXGBE_RDMAM_RSC_HEADER_ADDR_RANGE	32
-#define IXGBE_RDMAM_RSC_HEADER_ADDR_COUNT	4
-#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE	256
-#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT	9
-#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE		8
-#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT		4
-#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE		64
-#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT		4
-#define IXGBE_RDMAM_QSC_FCOE_RAM_RANGE		512
-#define IXGBE_RDMAM_QSC_FCOE_RAM_COUNT		5
-#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE		32
-#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT		4
-#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE		128
-#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT		8
-#define IXGBE_RDMAM_QSC_RSC_RAM_RANGE		32
-#define IXGBE_RDMAM_QSC_RSC_RAM_COUNT		8
-
-#define IXGBE_TXDESCIC_READY	0x80000000
-
-/* Receive Checksum Control */
-#define IXGBE_RXCSUM_IPPCSE	0x00001000 /* IP payload checksum enable */
-#define IXGBE_RXCSUM_PCSD	0x00002000 /* packet checksum disabled */
-
-/* FCRTL Bit Masks */
-#define IXGBE_FCRTL_XONE	0x80000000 /* XON enable */
-#define IXGBE_FCRTH_FCEN	0x80000000 /* Packet buffer fc enable */
-
-/* PAP bit masks*/
-#define IXGBE_PAP_TXPAUSECNT_MASK	0x0000FFFF /* Pause counter mask */
-
-/* RMCS Bit Masks */
-#define IXGBE_RMCS_RRM			0x00000002 /* Rx Recycle Mode enable */
-/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
-#define IXGBE_RMCS_RAC			0x00000004
-/* Deficit Fixed Prio ena */
-#define IXGBE_RMCS_DFP			IXGBE_RMCS_RAC
-#define IXGBE_RMCS_TFCE_802_3X		0x00000008 /* Tx Priority FC ena */
-#define IXGBE_RMCS_TFCE_PRIORITY	0x00000010 /* Tx Priority FC ena */
-#define IXGBE_RMCS_ARBDIS		0x00000040 /* Arbitration disable bit */
-
-/* FCCFG Bit Masks */
-#define IXGBE_FCCFG_TFCE_802_3X		0x00000008 /* Tx link FC enable */
-#define IXGBE_FCCFG_TFCE_PRIORITY	0x00000010 /* Tx priority FC enable */
-
-/* Interrupt register bitmasks */
-
-/* Extended Interrupt Cause Read */
-#define IXGBE_EICR_RTX_QUEUE	0x0000FFFF /* RTx Queue Interrupt */
-#define IXGBE_EICR_FLOW_DIR	0x00010000 /* FDir Exception */
-#define IXGBE_EICR_RX_MISS	0x00020000 /* Packet Buffer Overrun */
-#define IXGBE_EICR_PCI		0x00040000 /* PCI Exception */
-#define IXGBE_EICR_MAILBOX	0x00080000 /* VF to PF Mailbox Interrupt */
-#define IXGBE_EICR_LSC		0x00100000 /* Link Status Change */
-#define IXGBE_EICR_LINKSEC	0x00200000 /* PN Threshold */
-#define IXGBE_EICR_MNG		0x00400000 /* Manageability Event Interrupt */
-#define IXGBE_EICR_TS		0x00800000 /* Thermal Sensor Event */
-#define IXGBE_EICR_TIMESYNC	0x01000000 /* Timesync Event */
-#define IXGBE_EICR_GPI_SDP0	0x01000000 /* Gen Purpose Interrupt on SDP0 */
-#define IXGBE_EICR_GPI_SDP1	0x02000000 /* Gen Purpose Interrupt on SDP1 */
-#define IXGBE_EICR_GPI_SDP2	0x04000000 /* Gen Purpose Interrupt on SDP2 */
-#define IXGBE_EICR_ECC		0x10000000 /* ECC Error */
-#define IXGBE_EICR_GPI_SDP0_X540 0x02000000 /* Gen Purpose Interrupt on SDP0 */
-#define IXGBE_EICR_GPI_SDP1_X540 0x04000000 /* Gen Purpose Interrupt on SDP1 */
-#define IXGBE_EICR_GPI_SDP2_X540 0x08000000 /* Gen Purpose Interrupt on SDP2 */
-#define IXGBE_EICR_GPI_SDP0_BY_MAC(_hw)	((_hw)->mac.type >= ixgbe_mac_X540 ? \
-					 IXGBE_EICR_GPI_SDP0_X540 : \
-					 IXGBE_EICR_GPI_SDP0)
-#define IXGBE_EICR_GPI_SDP1_BY_MAC(_hw)	((_hw)->mac.type >= ixgbe_mac_X540 ? \
-					 IXGBE_EICR_GPI_SDP1_X540 : \
-					 IXGBE_EICR_GPI_SDP1)
-#define IXGBE_EICR_GPI_SDP2_BY_MAC(_hw)	((_hw)->mac.type >= ixgbe_mac_X540 ? \
-					 IXGBE_EICR_GPI_SDP2_X540 : \
-					 IXGBE_EICR_GPI_SDP2)
-#define IXGBE_EICR_PBUR		0x10000000 /* Packet Buffer Handler Error */
-#define IXGBE_EICR_DHER		0x20000000 /* Descriptor Handler Error */
-#define IXGBE_EICR_TCP_TIMER	0x40000000 /* TCP Timer */
-#define IXGBE_EICR_OTHER	0x80000000 /* Interrupt Cause Active */
-
-/* Extended Interrupt Cause Set */
-#define IXGBE_EICS_RTX_QUEUE	IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EICS_FLOW_DIR	IXGBE_EICR_FLOW_DIR  /* FDir Exception */
-#define IXGBE_EICS_RX_MISS	IXGBE_EICR_RX_MISS   /* Pkt Buffer Overrun */
-#define IXGBE_EICS_PCI		IXGBE_EICR_PCI /* PCI Exception */
-#define IXGBE_EICS_MAILBOX	IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
-#define IXGBE_EICS_LSC		IXGBE_EICR_LSC /* Link Status Change */
-#define IXGBE_EICS_MNG		IXGBE_EICR_MNG /* MNG Event Interrupt */
-#define IXGBE_EICS_TIMESYNC	IXGBE_EICR_TIMESYNC /* Timesync Event */
-#define IXGBE_EICS_GPI_SDP0	IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
-#define IXGBE_EICS_GPI_SDP1	IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
-#define IXGBE_EICS_GPI_SDP2	IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
-#define IXGBE_EICS_ECC		IXGBE_EICR_ECC /* ECC Error */
-#define IXGBE_EICS_GPI_SDP0_BY_MAC(_hw)	IXGBE_EICR_GPI_SDP0_BY_MAC(_hw)
-#define IXGBE_EICS_GPI_SDP1_BY_MAC(_hw)	IXGBE_EICR_GPI_SDP1_BY_MAC(_hw)
-#define IXGBE_EICS_GPI_SDP2_BY_MAC(_hw)	IXGBE_EICR_GPI_SDP2_BY_MAC(_hw)
-#define IXGBE_EICS_PBUR		IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
-#define IXGBE_EICS_DHER		IXGBE_EICR_DHER /* Desc Handler Error */
-#define IXGBE_EICS_TCP_TIMER	IXGBE_EICR_TCP_TIMER /* TCP Timer */
-#define IXGBE_EICS_OTHER	IXGBE_EICR_OTHER /* INT Cause Active */
-
-/* Extended Interrupt Mask Set */
-#define IXGBE_EIMS_RTX_QUEUE	IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EIMS_FLOW_DIR	IXGBE_EICR_FLOW_DIR /* FDir Exception */
-#define IXGBE_EIMS_RX_MISS	IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
-#define IXGBE_EIMS_PCI		IXGBE_EICR_PCI /* PCI Exception */
-#define IXGBE_EIMS_MAILBOX	IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
-#define IXGBE_EIMS_LSC		IXGBE_EICR_LSC /* Link Status Change */
-#define IXGBE_EIMS_MNG		IXGBE_EICR_MNG /* MNG Event Interrupt */
-#define IXGBE_EIMS_TS		IXGBE_EICR_TS /* Thermal Sensor Event */
-#define IXGBE_EIMS_TIMESYNC	IXGBE_EICR_TIMESYNC /* Timesync Event */
-#define IXGBE_EIMS_GPI_SDP0	IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
-#define IXGBE_EIMS_GPI_SDP1	IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
-#define IXGBE_EIMS_GPI_SDP2	IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
-#define IXGBE_EIMS_ECC		IXGBE_EICR_ECC /* ECC Error */
-#define IXGBE_EIMS_GPI_SDP0_BY_MAC(_hw)	IXGBE_EICR_GPI_SDP0_BY_MAC(_hw)
-#define IXGBE_EIMS_GPI_SDP1_BY_MAC(_hw)	IXGBE_EICR_GPI_SDP1_BY_MAC(_hw)
-#define IXGBE_EIMS_GPI_SDP2_BY_MAC(_hw)	IXGBE_EICR_GPI_SDP2_BY_MAC(_hw)
-#define IXGBE_EIMS_PBUR		IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
-#define IXGBE_EIMS_DHER		IXGBE_EICR_DHER /* Descr Handler Error */
-#define IXGBE_EIMS_TCP_TIMER	IXGBE_EICR_TCP_TIMER /* TCP Timer */
-#define IXGBE_EIMS_OTHER	IXGBE_EICR_OTHER /* INT Cause Active */
-
-/* Extended Interrupt Mask Clear */
-#define IXGBE_EIMC_RTX_QUEUE	IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EIMC_FLOW_DIR	IXGBE_EICR_FLOW_DIR /* FDir Exception */
-#define IXGBE_EIMC_RX_MISS	IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
-#define IXGBE_EIMC_PCI		IXGBE_EICR_PCI /* PCI Exception */
-#define IXGBE_EIMC_MAILBOX	IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
-#define IXGBE_EIMC_LSC		IXGBE_EICR_LSC /* Link Status Change */
-#define IXGBE_EIMC_MNG		IXGBE_EICR_MNG /* MNG Event Interrupt */
-#define IXGBE_EIMC_TIMESYNC	IXGBE_EICR_TIMESYNC /* Timesync Event */
-#define IXGBE_EIMC_GPI_SDP0	IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
-#define IXGBE_EIMC_GPI_SDP1	IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
-#define IXGBE_EIMC_GPI_SDP2	IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */
-#define IXGBE_EIMC_ECC		IXGBE_EICR_ECC /* ECC Error */
-#define IXGBE_EIMC_GPI_SDP0_BY_MAC(_hw)	IXGBE_EICR_GPI_SDP0_BY_MAC(_hw)
-#define IXGBE_EIMC_GPI_SDP1_BY_MAC(_hw)	IXGBE_EICR_GPI_SDP1_BY_MAC(_hw)
-#define IXGBE_EIMC_GPI_SDP2_BY_MAC(_hw)	IXGBE_EICR_GPI_SDP2_BY_MAC(_hw)
-#define IXGBE_EIMC_PBUR		IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
-#define IXGBE_EIMC_DHER		IXGBE_EICR_DHER /* Desc Handler Err */
-#define IXGBE_EIMC_TCP_TIMER	IXGBE_EICR_TCP_TIMER /* TCP Timer */
-#define IXGBE_EIMC_OTHER	IXGBE_EICR_OTHER /* INT Cause Active */
-
-#define IXGBE_EIMS_ENABLE_MASK ( \
-				IXGBE_EIMS_RTX_QUEUE	| \
-				IXGBE_EIMS_LSC		| \
-				IXGBE_EIMS_TCP_TIMER	| \
-				IXGBE_EIMS_OTHER)
-
-/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
-#define IXGBE_IMIR_PORT_IM_EN	0x00010000  /* TCP port enable */
-#define IXGBE_IMIR_PORT_BP	0x00020000  /* TCP port check bypass */
-#define IXGBE_IMIREXT_SIZE_BP	0x00001000  /* Packet size bypass */
-#define IXGBE_IMIREXT_CTRL_URG	0x00002000  /* Check URG bit in header */
-#define IXGBE_IMIREXT_CTRL_ACK	0x00004000  /* Check ACK bit in header */
-#define IXGBE_IMIREXT_CTRL_PSH	0x00008000  /* Check PSH bit in header */
-#define IXGBE_IMIREXT_CTRL_RST	0x00010000  /* Check RST bit in header */
-#define IXGBE_IMIREXT_CTRL_SYN	0x00020000  /* Check SYN bit in header */
-#define IXGBE_IMIREXT_CTRL_FIN	0x00040000  /* Check FIN bit in header */
-#define IXGBE_IMIREXT_CTRL_BP	0x00080000  /* Bypass check of control bits */
-#define IXGBE_IMIR_SIZE_BP_82599	0x00001000 /* Packet size bypass */
-#define IXGBE_IMIR_CTRL_URG_82599	0x00002000 /* Check URG bit in header */
-#define IXGBE_IMIR_CTRL_ACK_82599	0x00004000 /* Check ACK bit in header */
-#define IXGBE_IMIR_CTRL_PSH_82599	0x00008000 /* Check PSH bit in header */
-#define IXGBE_IMIR_CTRL_RST_82599	0x00010000 /* Check RST bit in header */
-#define IXGBE_IMIR_CTRL_SYN_82599	0x00020000 /* Check SYN bit in header */
-#define IXGBE_IMIR_CTRL_FIN_82599	0x00040000 /* Check FIN bit in header */
-#define IXGBE_IMIR_CTRL_BP_82599	0x00080000 /* Bypass chk of ctrl bits */
-#define IXGBE_IMIR_LLI_EN_82599		0x00100000 /* Enables low latency Int */
-#define IXGBE_IMIR_RX_QUEUE_MASK_82599	0x0000007F /* Rx Queue Mask */
-#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599	21 /* Rx Queue Shift */
-#define IXGBE_IMIRVP_PRIORITY_MASK	0x00000007 /* VLAN priority mask */
-#define IXGBE_IMIRVP_PRIORITY_EN	0x00000008 /* VLAN priority enable */
-
-#define IXGBE_MAX_FTQF_FILTERS		128
-#define IXGBE_FTQF_PROTOCOL_MASK	0x00000003
-#define IXGBE_FTQF_PROTOCOL_TCP		0x00000000
-#define IXGBE_FTQF_PROTOCOL_UDP		0x00000001
-#define IXGBE_FTQF_PROTOCOL_SCTP	2
-#define IXGBE_FTQF_PRIORITY_MASK	0x00000007
-#define IXGBE_FTQF_PRIORITY_SHIFT	2
-#define IXGBE_FTQF_POOL_MASK		0x0000003F
-#define IXGBE_FTQF_POOL_SHIFT		8
-#define IXGBE_FTQF_5TUPLE_MASK_MASK	0x0000001F
-#define IXGBE_FTQF_5TUPLE_MASK_SHIFT	25
-#define IXGBE_FTQF_SOURCE_ADDR_MASK	0x1E
-#define IXGBE_FTQF_DEST_ADDR_MASK	0x1D
-#define IXGBE_FTQF_SOURCE_PORT_MASK	0x1B
-#define IXGBE_FTQF_DEST_PORT_MASK	0x17
-#define IXGBE_FTQF_PROTOCOL_COMP_MASK	0x0F
-#define IXGBE_FTQF_POOL_MASK_EN		0x40000000
-#define IXGBE_FTQF_QUEUE_ENABLE		0x80000000
-
-/* Interrupt clear mask */
-#define IXGBE_IRQ_CLEAR_MASK	0xFFFFFFFF
-
-/* Interrupt Vector Allocation Registers */
-#define IXGBE_IVAR_REG_NUM		25
-#define IXGBE_IVAR_REG_NUM_82599	64
-#define IXGBE_IVAR_TXRX_ENTRY		96
-#define IXGBE_IVAR_RX_ENTRY		64
-#define IXGBE_IVAR_RX_QUEUE(_i)		(0 + (_i))
-#define IXGBE_IVAR_TX_QUEUE(_i)		(64 + (_i))
-#define IXGBE_IVAR_TX_ENTRY		32
-
-#define IXGBE_IVAR_TCP_TIMER_INDEX	96 /* 0 based index */
-#define IXGBE_IVAR_OTHER_CAUSES_INDEX	97 /* 0 based index */
-
-#define IXGBE_MSIX_VECTOR(_i)		(0 + (_i))
-
-#define IXGBE_IVAR_ALLOC_VAL		0x80 /* Interrupt Allocation valid */
-
-/* ETYPE Queue Filter/Select Bit Masks */
-#define IXGBE_MAX_ETQF_FILTERS		8
-#define IXGBE_ETQF_FCOE			0x08000000 /* bit 27 */
-#define IXGBE_ETQF_BCN			0x10000000 /* bit 28 */
-#define IXGBE_ETQF_TX_ANTISPOOF		0x20000000 /* bit 29 */
-#define IXGBE_ETQF_1588			0x40000000 /* bit 30 */
-#define IXGBE_ETQF_FILTER_EN		0x80000000 /* bit 31 */
-#define IXGBE_ETQF_POOL_ENABLE		(1 << 26) /* bit 26 */
-#define IXGBE_ETQF_POOL_SHIFT		20
-
-#define IXGBE_ETQS_RX_QUEUE		0x007F0000 /* bits 22:16 */
-#define IXGBE_ETQS_RX_QUEUE_SHIFT	16
-#define IXGBE_ETQS_LLI			0x20000000 /* bit 29 */
-#define IXGBE_ETQS_QUEUE_EN		0x80000000 /* bit 31 */
-
-/*
- * ETQF filter list: one static filter per filter consumer. This is
- *		   to avoid filter collisions later. Add new filters
- *		   here!!
- *
- * Current filters:
- *	EAPOL 802.1x (0x888e): Filter 0
- *	FCoE (0x8906):	 Filter 2
- *	1588 (0x88f7):	 Filter 3
- *	FIP  (0x8914):	 Filter 4
- *	LLDP (0x88CC):	 Filter 5
- *	LACP (0x8809):	 Filter 6
- */
-#define IXGBE_ETQF_FILTER_EAPOL		0
-#define IXGBE_ETQF_FILTER_FCOE		2
-#define IXGBE_ETQF_FILTER_1588		3
-#define IXGBE_ETQF_FILTER_FIP		4
-#define IXGBE_ETQF_FILTER_LLDP		5
-#define IXGBE_ETQF_FILTER_LACP		6
-/* VLAN Control Bit Masks */
-#define IXGBE_VLNCTRL_VET		0x0000FFFF  /* bits 0-15 */
-#define IXGBE_VLNCTRL_CFI		0x10000000  /* bit 28 */
-#define IXGBE_VLNCTRL_CFIEN		0x20000000  /* bit 29 */
-#define IXGBE_VLNCTRL_VFE		0x40000000  /* bit 30 */
-#define IXGBE_VLNCTRL_VME		0x80000000  /* bit 31 */
-
-/* VLAN pool filtering masks */
-#define IXGBE_VLVF_VIEN			0x80000000  /* filter is valid */
-#define IXGBE_VLVF_ENTRIES		64
-#define IXGBE_VLVF_VLANID_MASK		0x00000FFF
-/* Per VF Port VLAN insertion rules */
-#define IXGBE_VMVIR_VLANA_DEFAULT	0x40000000 /* Always use default VLAN */
-#define IXGBE_VMVIR_VLANA_NEVER		0x80000000 /* Never insert VLAN tag */
-
-#define IXGBE_ETHERNET_IEEE_VLAN_TYPE	0x8100  /* 802.1q protocol */
-
-/* STATUS Bit Masks */
-#define IXGBE_STATUS_LAN_ID		0x0000000C /* LAN ID */
-#define IXGBE_STATUS_LAN_ID_SHIFT	2 /* LAN ID Shift*/
-#define IXGBE_STATUS_GIO		0x00080000 /* GIO Master Ena Status */
-
-#define IXGBE_STATUS_LAN_ID_0	0x00000000 /* LAN ID 0 */
-#define IXGBE_STATUS_LAN_ID_1	0x00000004 /* LAN ID 1 */
-
-/* ESDP Bit Masks */
-#define IXGBE_ESDP_SDP0		0x00000001 /* SDP0 Data Value */
-#define IXGBE_ESDP_SDP1		0x00000002 /* SDP1 Data Value */
-#define IXGBE_ESDP_SDP2		0x00000004 /* SDP2 Data Value */
-#define IXGBE_ESDP_SDP3		0x00000008 /* SDP3 Data Value */
-#define IXGBE_ESDP_SDP4		0x00000010 /* SDP4 Data Value */
-#define IXGBE_ESDP_SDP5		0x00000020 /* SDP5 Data Value */
-#define IXGBE_ESDP_SDP6		0x00000040 /* SDP6 Data Value */
-#define IXGBE_ESDP_SDP7		0x00000080 /* SDP7 Data Value */
-#define IXGBE_ESDP_SDP0_DIR	0x00000100 /* SDP0 IO direction */
-#define IXGBE_ESDP_SDP1_DIR	0x00000200 /* SDP1 IO direction */
-#define IXGBE_ESDP_SDP2_DIR	0x00000400 /* SDP1 IO direction */
-#define IXGBE_ESDP_SDP3_DIR	0x00000800 /* SDP3 IO direction */
-#define IXGBE_ESDP_SDP4_DIR	0x00001000 /* SDP4 IO direction */
-#define IXGBE_ESDP_SDP5_DIR	0x00002000 /* SDP5 IO direction */
-#define IXGBE_ESDP_SDP6_DIR	0x00004000 /* SDP6 IO direction */
-#define IXGBE_ESDP_SDP7_DIR	0x00008000 /* SDP7 IO direction */
-#define IXGBE_ESDP_SDP0_NATIVE	0x00010000 /* SDP0 IO mode */
-#define IXGBE_ESDP_SDP1_NATIVE	0x00020000 /* SDP1 IO mode */
-
-
-/* LEDCTL Bit Masks */
-#define IXGBE_LED_IVRT_BASE		0x00000040
-#define IXGBE_LED_BLINK_BASE		0x00000080
-#define IXGBE_LED_MODE_MASK_BASE	0x0000000F
-#define IXGBE_LED_OFFSET(_base, _i)	(_base << (8 * (_i)))
-#define IXGBE_LED_MODE_SHIFT(_i)	(8*(_i))
-#define IXGBE_LED_IVRT(_i)	IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
-#define IXGBE_LED_BLINK(_i)	IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
-#define IXGBE_LED_MODE_MASK(_i)	IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
-
-/* LED modes */
-#define IXGBE_LED_LINK_UP	0x0
-#define IXGBE_LED_LINK_10G	0x1
-#define IXGBE_LED_MAC		0x2
-#define IXGBE_LED_FILTER	0x3
-#define IXGBE_LED_LINK_ACTIVE	0x4
-#define IXGBE_LED_LINK_1G	0x5
-#define IXGBE_LED_ON		0xE
-#define IXGBE_LED_OFF		0xF
-
-/* AUTOC Bit Masks */
-#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000
-#define IXGBE_AUTOC_KX4_SUPP	0x80000000
-#define IXGBE_AUTOC_KX_SUPP	0x40000000
-#define IXGBE_AUTOC_PAUSE	0x30000000
-#define IXGBE_AUTOC_ASM_PAUSE	0x20000000
-#define IXGBE_AUTOC_SYM_PAUSE	0x10000000
-#define IXGBE_AUTOC_RF		0x08000000
-#define IXGBE_AUTOC_PD_TMR	0x06000000
-#define IXGBE_AUTOC_AN_RX_LOOSE	0x01000000
-#define IXGBE_AUTOC_AN_RX_DRIFT	0x00800000
-#define IXGBE_AUTOC_AN_RX_ALIGN	0x007C0000
-#define IXGBE_AUTOC_FECA	0x00040000
-#define IXGBE_AUTOC_FECR	0x00020000
-#define IXGBE_AUTOC_KR_SUPP	0x00010000
-#define IXGBE_AUTOC_AN_RESTART	0x00001000
-#define IXGBE_AUTOC_FLU		0x00000001
-#define IXGBE_AUTOC_LMS_SHIFT	13
-#define IXGBE_AUTOC_LMS_10G_SERIAL	(0x3 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_KX_KR	(0x4 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_SGMII_1G_100M	(0x5 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN	(0x6 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII	(0x7 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_MASK		(0x7 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN	(0x0 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN	(0x1 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_1G_AN		(0x2 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_AN		(0x4 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN	(0x6 << IXGBE_AUTOC_LMS_SHIFT)
-#define IXGBE_AUTOC_LMS_ATTACH_TYPE	(0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-
-#define IXGBE_AUTOC_1G_PMA_PMD_MASK	0x00000200
-#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT	9
-#define IXGBE_AUTOC_10G_PMA_PMD_MASK	0x00000180
-#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT	7
-#define IXGBE_AUTOC_10G_XAUI	(0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_10G_KX4	(0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_10G_CX4	(0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_BX	(0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_KX	(0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_SFI	(0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_KX_BX	(0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-
-#define IXGBE_AUTOC2_UPPER_MASK	0xFFFF0000
-#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK	0x00030000
-#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT	16
-#define IXGBE_AUTOC2_10G_KR	(0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_XFI	(0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_SFI	(0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK	0x50000000
-#define IXGBE_AUTOC2_LINK_DISABLE_MASK		0x70000000
-
-#define IXGBE_MACC_FLU		0x00000001
-#define IXGBE_MACC_FSV_10G	0x00030000
-#define IXGBE_MACC_FS		0x00040000
-#define IXGBE_MAC_RX2TX_LPBK	0x00000002
-
-/* Veto Bit definiton */
-#define IXGBE_MMNGC_MNG_VETO	0x00000001
-
-/* LINKS Bit Masks */
-#define IXGBE_LINKS_KX_AN_COMP	0x80000000
-#define IXGBE_LINKS_UP		0x40000000
-#define IXGBE_LINKS_SPEED	0x20000000
-#define IXGBE_LINKS_MODE	0x18000000
-#define IXGBE_LINKS_RX_MODE	0x06000000
-#define IXGBE_LINKS_TX_MODE	0x01800000
-#define IXGBE_LINKS_XGXS_EN	0x00400000
-#define IXGBE_LINKS_SGMII_EN	0x02000000
-#define IXGBE_LINKS_PCS_1G_EN	0x00200000
-#define IXGBE_LINKS_1G_AN_EN	0x00100000
-#define IXGBE_LINKS_KX_AN_IDLE	0x00080000
-#define IXGBE_LINKS_1G_SYNC	0x00040000
-#define IXGBE_LINKS_10G_ALIGN	0x00020000
-#define IXGBE_LINKS_10G_LANE_SYNC	0x00017000
-#define IXGBE_LINKS_TL_FAULT		0x00001000
-#define IXGBE_LINKS_SIGNAL		0x00000F00
-
-#define IXGBE_LINKS_SPEED_NON_STD	0x08000000
-#define IXGBE_LINKS_SPEED_82599		0x30000000
-#define IXGBE_LINKS_SPEED_10G_82599	0x30000000
-#define IXGBE_LINKS_SPEED_1G_82599	0x20000000
-#define IXGBE_LINKS_SPEED_100_82599	0x10000000
-#define IXGBE_LINK_UP_TIME		90 /* 9.0 Seconds */
-#define IXGBE_AUTO_NEG_TIME		45 /* 4.5 Seconds */
-
-#define IXGBE_LINKS2_AN_SUPPORTED	0x00000040
-
-/* PCS1GLSTA Bit Masks */
-#define IXGBE_PCS1GLSTA_LINK_OK		1
-#define IXGBE_PCS1GLSTA_SYNK_OK		0x10
-#define IXGBE_PCS1GLSTA_AN_COMPLETE	0x10000
-#define IXGBE_PCS1GLSTA_AN_PAGE_RX	0x20000
-#define IXGBE_PCS1GLSTA_AN_TIMED_OUT	0x40000
-#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT	0x80000
-#define IXGBE_PCS1GLSTA_AN_ERROR_RWS	0x100000
-
-#define IXGBE_PCS1GANA_SYM_PAUSE	0x80
-#define IXGBE_PCS1GANA_ASM_PAUSE	0x100
-
-/* PCS1GLCTL Bit Masks */
-#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */
-#define IXGBE_PCS1GLCTL_FLV_LINK_UP	1
-#define IXGBE_PCS1GLCTL_FORCE_LINK	0x20
-#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH	0x40
-#define IXGBE_PCS1GLCTL_AN_ENABLE	0x10000
-#define IXGBE_PCS1GLCTL_AN_RESTART	0x20000
-
-/* ANLP1 Bit Masks */
-#define IXGBE_ANLP1_PAUSE		0x0C00
-#define IXGBE_ANLP1_SYM_PAUSE		0x0400
-#define IXGBE_ANLP1_ASM_PAUSE		0x0800
-#define IXGBE_ANLP1_AN_STATE_MASK	0x000f0000
-
-/* SW Semaphore Register bitmasks */
-#define IXGBE_SWSM_SMBI		0x00000001 /* Driver Semaphore bit */
-#define IXGBE_SWSM_SWESMBI	0x00000002 /* FW Semaphore bit */
-#define IXGBE_SWSM_WMNG		0x00000004 /* Wake MNG Clock */
-#define IXGBE_SWFW_REGSMP	0x80000000 /* Register Semaphore bit 31 */
-
-/* SW_FW_SYNC/GSSR definitions */
-#define IXGBE_GSSR_EEP_SM		0x0001
-#define IXGBE_GSSR_PHY0_SM		0x0002
-#define IXGBE_GSSR_PHY1_SM		0x0004
-#define IXGBE_GSSR_MAC_CSR_SM		0x0008
-#define IXGBE_GSSR_FLASH_SM		0x0010
-#define IXGBE_GSSR_NVM_UPDATE_SM	0x0200
-#define IXGBE_GSSR_SW_MNG_SM		0x0400
-#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys and both I2Cs */
-#define IXGBE_GSSR_I2C_MASK	0x1800
-#define IXGBE_GSSR_NVM_PHY_MASK	0xF
-
-/* FW Status register bitmask */
-#define IXGBE_FWSTS_FWRI	0x00000200 /* Firmware Reset Indication */
-
-/* EEC Register */
-#define IXGBE_EEC_SK		0x00000001 /* EEPROM Clock */
-#define IXGBE_EEC_CS		0x00000002 /* EEPROM Chip Select */
-#define IXGBE_EEC_DI		0x00000004 /* EEPROM Data In */
-#define IXGBE_EEC_DO		0x00000008 /* EEPROM Data Out */
-#define IXGBE_EEC_FWE_MASK	0x00000030 /* FLASH Write Enable */
-#define IXGBE_EEC_FWE_DIS	0x00000010 /* Disable FLASH writes */
-#define IXGBE_EEC_FWE_EN	0x00000020 /* Enable FLASH writes */
-#define IXGBE_EEC_FWE_SHIFT	4
-#define IXGBE_EEC_REQ		0x00000040 /* EEPROM Access Request */
-#define IXGBE_EEC_GNT		0x00000080 /* EEPROM Access Grant */
-#define IXGBE_EEC_PRES		0x00000100 /* EEPROM Present */
-#define IXGBE_EEC_ARD		0x00000200 /* EEPROM Auto Read Done */
-#define IXGBE_EEC_FLUP		0x00800000 /* Flash update command */
-#define IXGBE_EEC_SEC1VAL	0x02000000 /* Sector 1 Valid */
-#define IXGBE_EEC_FLUDONE	0x04000000 /* Flash update done */
-/* EEPROM Addressing bits based on type (0-small, 1-large) */
-#define IXGBE_EEC_ADDR_SIZE	0x00000400
-#define IXGBE_EEC_SIZE		0x00007800 /* EEPROM Size */
-#define IXGBE_EERD_MAX_ADDR	0x00003FFF /* EERD alows 14 bits for addr. */
-
-#define IXGBE_EEC_SIZE_SHIFT		11
-#define IXGBE_EEPROM_WORD_SIZE_SHIFT	6
-#define IXGBE_EEPROM_OPCODE_BITS	8
-
-/* FLA Register */
-#define IXGBE_FLA_LOCKED	0x00000040
-
-/* Part Number String Length */
-#define IXGBE_PBANUM_LENGTH	11
-
-/* Checksum and EEPROM pointers */
-#define IXGBE_PBANUM_PTR_GUARD		0xFAFA
-#define IXGBE_EEPROM_CHECKSUM		0x3F
-#define IXGBE_EEPROM_SUM		0xBABA
-#define IXGBE_PCIE_ANALOG_PTR		0x03
-#define IXGBE_ATLAS0_CONFIG_PTR		0x04
-#define IXGBE_PHY_PTR			0x04
-#define IXGBE_ATLAS1_CONFIG_PTR		0x05
-#define IXGBE_OPTION_ROM_PTR		0x05
-#define IXGBE_PCIE_GENERAL_PTR		0x06
-#define IXGBE_PCIE_CONFIG0_PTR		0x07
-#define IXGBE_PCIE_CONFIG1_PTR		0x08
-#define IXGBE_CORE0_PTR			0x09
-#define IXGBE_CORE1_PTR			0x0A
-#define IXGBE_MAC0_PTR			0x0B
-#define IXGBE_MAC1_PTR			0x0C
-#define IXGBE_CSR0_CONFIG_PTR		0x0D
-#define IXGBE_CSR1_CONFIG_PTR		0x0E
-#define IXGBE_PCIE_ANALOG_PTR_X550	0x02
-#define IXGBE_SHADOW_RAM_SIZE_X550	0x4000
-#define IXGBE_IXGBE_PCIE_GENERAL_SIZE	0x24
-#define IXGBE_PCIE_CONFIG_SIZE		0x08
-#define IXGBE_EEPROM_LAST_WORD		0x41
-#define IXGBE_FW_PTR			0x0F
-#define IXGBE_PBANUM0_PTR		0x15
-#define IXGBE_PBANUM1_PTR		0x16
-#define IXGBE_ALT_MAC_ADDR_PTR		0x37
-#define IXGBE_FREE_SPACE_PTR		0X3E
-
-/* External Thermal Sensor Config */
-#define IXGBE_ETS_CFG			0x26
-#define IXGBE_ETS_LTHRES_DELTA_MASK	0x07C0
-#define IXGBE_ETS_LTHRES_DELTA_SHIFT	6
-#define IXGBE_ETS_TYPE_MASK		0x0038
-#define IXGBE_ETS_TYPE_SHIFT		3
-#define IXGBE_ETS_TYPE_EMC		0x000
-#define IXGBE_ETS_NUM_SENSORS_MASK	0x0007
-#define IXGBE_ETS_DATA_LOC_MASK		0x3C00
-#define IXGBE_ETS_DATA_LOC_SHIFT	10
-#define IXGBE_ETS_DATA_INDEX_MASK	0x0300
-#define IXGBE_ETS_DATA_INDEX_SHIFT	8
-#define IXGBE_ETS_DATA_HTHRESH_MASK	0x00FF
-
-#define IXGBE_SAN_MAC_ADDR_PTR		0x28
-#define IXGBE_DEVICE_CAPS		0x2C
-#define IXGBE_SERIAL_NUMBER_MAC_ADDR	0x11
-#define IXGBE_PCIE_MSIX_82599_CAPS	0x72
-#define IXGBE_MAX_MSIX_VECTORS_82599	0x40
-#define IXGBE_PCIE_MSIX_82598_CAPS	0x62
-#define IXGBE_MAX_MSIX_VECTORS_82598	0x13
-
-/* MSI-X capability fields masks */
-#define IXGBE_PCIE_MSIX_TBL_SZ_MASK	0x7FF
-
-/* Legacy EEPROM word offsets */
-#define IXGBE_ISCSI_BOOT_CAPS		0x0033
-#define IXGBE_ISCSI_SETUP_PORT_0	0x0030
-#define IXGBE_ISCSI_SETUP_PORT_1	0x0034
-
-/* EEPROM Commands - SPI */
-#define IXGBE_EEPROM_MAX_RETRY_SPI	5000 /* Max wait 5ms for RDY signal */
-#define IXGBE_EEPROM_STATUS_RDY_SPI	0x01
-#define IXGBE_EEPROM_READ_OPCODE_SPI	0x03  /* EEPROM read opcode */
-#define IXGBE_EEPROM_WRITE_OPCODE_SPI	0x02  /* EEPROM write opcode */
-#define IXGBE_EEPROM_A8_OPCODE_SPI	0x08  /* opcode bit-3 = addr bit-8 */
-#define IXGBE_EEPROM_WREN_OPCODE_SPI	0x06  /* EEPROM set Write Ena latch */
-/* EEPROM reset Write Enable latch */
-#define IXGBE_EEPROM_WRDI_OPCODE_SPI	0x04
-#define IXGBE_EEPROM_RDSR_OPCODE_SPI	0x05  /* EEPROM read Status reg */
-#define IXGBE_EEPROM_WRSR_OPCODE_SPI	0x01  /* EEPROM write Status reg */
-#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI	0x20  /* EEPROM ERASE 4KB */
-#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI	0xD8  /* EEPROM ERASE 64KB */
-#define IXGBE_EEPROM_ERASE256_OPCODE_SPI	0xDB  /* EEPROM ERASE 256B */
-
-/* EEPROM Read Register */
-#define IXGBE_EEPROM_RW_REG_DATA	16 /* data offset in EEPROM read reg */
-#define IXGBE_EEPROM_RW_REG_DONE	2 /* Offset to READ done bit */
-#define IXGBE_EEPROM_RW_REG_START	1 /* First bit to start operation */
-#define IXGBE_EEPROM_RW_ADDR_SHIFT	2 /* Shift to the address bits */
-#define IXGBE_NVM_POLL_WRITE		1 /* Flag for polling for wr complete */
-#define IXGBE_NVM_POLL_READ		0 /* Flag for polling for rd complete */
-
-#define IXGBE_ETH_LENGTH_OF_ADDRESS	6
-
-#define IXGBE_EEPROM_PAGE_SIZE_MAX	128
-#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT	256 /* words rd in burst */
-#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT	256 /* words wr in burst */
-#define IXGBE_EEPROM_CTRL_2		1 /* EEPROM CTRL word 2 */
-#define IXGBE_EEPROM_CCD_BIT		2
-
-#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
-#define IXGBE_EEPROM_GRANT_ATTEMPTS	1000 /* EEPROM attempts to gain grant */
-#endif
-
-/* Number of 5 microseconds we wait for EERD read and
- * EERW write to complete */
-#define IXGBE_EERD_EEWR_ATTEMPTS	100000
-
-/* # attempts we wait for flush update to complete */
-#define IXGBE_FLUDONE_ATTEMPTS		20000
-
-#define IXGBE_PCIE_CTRL2		0x5   /* PCIe Control 2 Offset */
-#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE	0x8   /* Dummy Function Enable */
-#define IXGBE_PCIE_CTRL2_LAN_DISABLE	0x2   /* LAN PCI Disable */
-#define IXGBE_PCIE_CTRL2_DISABLE_SELECT	0x1   /* LAN Disable Select */
-
-#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET		0x0
-#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET		0x3
-#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP		0x1
-#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS		0x2
-#define IXGBE_FW_LESM_PARAMETERS_PTR		0x2
-#define IXGBE_FW_LESM_STATE_1			0x1
-#define IXGBE_FW_LESM_STATE_ENABLED		0x8000 /* LESM Enable bit */
-#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR	0x4
-#define IXGBE_FW_PATCH_VERSION_4		0x7
-#define IXGBE_FCOE_IBA_CAPS_BLK_PTR		0x33 /* iSCSI/FCOE block */
-#define IXGBE_FCOE_IBA_CAPS_FCOE		0x20 /* FCOE flags */
-#define IXGBE_ISCSI_FCOE_BLK_PTR		0x17 /* iSCSI/FCOE block */
-#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET		0x0 /* FCOE flags */
-#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE		0x1 /* FCOE flags enable bit */
-#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR		0x27 /* Alt. SAN MAC block */
-#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET	0x0 /* Alt SAN MAC capability */
-#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET	0x1 /* Alt SAN MAC 0 offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET	0x4 /* Alt SAN MAC 1 offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET	0x7 /* Alt WWNN prefix offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET	0x8 /* Alt WWPN prefix offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC	0x0 /* Alt SAN MAC exists */
-#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN	0x1 /* Alt WWN base exists */
-
-/* FW header offset */
-#define IXGBE_X540_FW_PASSTHROUGH_PATCH_CONFIG_PTR	0x4
-#define IXGBE_X540_FW_MODULE_MASK			0x7FFF
-/* 4KB multiplier */
-#define IXGBE_X540_FW_MODULE_LENGTH			0x1000
-/* version word 2 (month & day) */
-#define IXGBE_X540_FW_PATCH_VERSION_2		0x5
-/* version word 3 (silicon compatibility & year) */
-#define IXGBE_X540_FW_PATCH_VERSION_3		0x6
-/* version word 4 (major & minor numbers) */
-#define IXGBE_X540_FW_PATCH_VERSION_4		0x7
-
-#define IXGBE_DEVICE_CAPS_WOL_PORT0_1	0x4 /* WoL supported on ports 0 & 1 */
-#define IXGBE_DEVICE_CAPS_WOL_PORT0	0x8 /* WoL supported on port 0 */
-#define IXGBE_DEVICE_CAPS_WOL_MASK	0xC /* Mask for WoL capabilities */
-
-/* PCI Bus Info */
-#define IXGBE_PCI_DEVICE_STATUS		0xAA
-#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING	0x0020
-#define IXGBE_PCI_LINK_STATUS		0xB2
-#define IXGBE_PCI_DEVICE_CONTROL2	0xC8
-#define IXGBE_PCI_LINK_WIDTH		0x3F0
-#define IXGBE_PCI_LINK_WIDTH_1		0x10
-#define IXGBE_PCI_LINK_WIDTH_2		0x20
-#define IXGBE_PCI_LINK_WIDTH_4		0x40
-#define IXGBE_PCI_LINK_WIDTH_8		0x80
-#define IXGBE_PCI_LINK_SPEED		0xF
-#define IXGBE_PCI_LINK_SPEED_2500	0x1
-#define IXGBE_PCI_LINK_SPEED_5000	0x2
-#define IXGBE_PCI_LINK_SPEED_8000	0x3
-#define IXGBE_PCI_HEADER_TYPE_REGISTER	0x0E
-#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC	0x80
-#define IXGBE_PCI_DEVICE_CONTROL2_16ms	0x0005
-
-#define IXGBE_PCIDEVCTRL2_TIMEO_MASK	0xf
-#define IXGBE_PCIDEVCTRL2_16_32ms_def	0x0
-#define IXGBE_PCIDEVCTRL2_50_100us	0x1
-#define IXGBE_PCIDEVCTRL2_1_2ms		0x2
-#define IXGBE_PCIDEVCTRL2_16_32ms	0x5
-#define IXGBE_PCIDEVCTRL2_65_130ms	0x6
-#define IXGBE_PCIDEVCTRL2_260_520ms	0x9
-#define IXGBE_PCIDEVCTRL2_1_2s		0xa
-#define IXGBE_PCIDEVCTRL2_4_8s		0xd
-#define IXGBE_PCIDEVCTRL2_17_34s	0xe
-
-/* Number of 100 microseconds we wait for PCI Express master disable */
-#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT	800
-
-/* Check whether address is multicast. This is little-endian specific check.*/
-#define IXGBE_IS_MULTICAST(Address) \
-		(bool)(((u8 *)(Address))[0] & ((u8)0x01))
-
-/* Check whether an address is broadcast. */
-#define IXGBE_IS_BROADCAST(Address) \
-		((((u8 *)(Address))[0] == ((u8)0xff)) && \
-		(((u8 *)(Address))[1] == ((u8)0xff)))
-
-/* RAH */
-#define IXGBE_RAH_VIND_MASK	0x003C0000
-#define IXGBE_RAH_VIND_SHIFT	18
-#define IXGBE_RAH_AV		0x80000000
-#define IXGBE_CLEAR_VMDQ_ALL	0xFFFFFFFF
-
-/* Header split receive */
-#define IXGBE_RFCTL_ISCSI_DIS		0x00000001
-#define IXGBE_RFCTL_ISCSI_DWC_MASK	0x0000003E
-#define IXGBE_RFCTL_ISCSI_DWC_SHIFT	1
-#define IXGBE_RFCTL_RSC_DIS		0x00000020
-#define IXGBE_RFCTL_NFSW_DIS		0x00000040
-#define IXGBE_RFCTL_NFSR_DIS		0x00000080
-#define IXGBE_RFCTL_NFS_VER_MASK	0x00000300
-#define IXGBE_RFCTL_NFS_VER_SHIFT	8
-#define IXGBE_RFCTL_NFS_VER_2		0
-#define IXGBE_RFCTL_NFS_VER_3		1
-#define IXGBE_RFCTL_NFS_VER_4		2
-#define IXGBE_RFCTL_IPV6_DIS		0x00000400
-#define IXGBE_RFCTL_IPV6_XSUM_DIS	0x00000800
-#define IXGBE_RFCTL_IPFRSP_DIS		0x00004000
-#define IXGBE_RFCTL_IPV6_EX_DIS		0x00010000
-#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS	0x00020000
-
-/* Transmit Config masks */
-#define IXGBE_TXDCTL_ENABLE		0x02000000 /* Ena specific Tx Queue */
-#define IXGBE_TXDCTL_SWFLSH		0x04000000 /* Tx Desc. wr-bk flushing */
-#define IXGBE_TXDCTL_WTHRESH_SHIFT	16 /* shift to WTHRESH bits */
-/* Enable short packet padding to 64 bytes */
-#define IXGBE_TX_PAD_ENABLE		0x00000400
-#define IXGBE_JUMBO_FRAME_ENABLE	0x00000004  /* Allow jumbo frames */
-/* This allows for 16K packets + 4k for vlan */
-#define IXGBE_MAX_FRAME_SZ		0x40040000
-
-#define IXGBE_TDWBAL_HEAD_WB_ENABLE	0x1 /* Tx head write-back enable */
-#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE	0x2 /* Tx seq# write-back enable */
-
-/* Receive Config masks */
-#define IXGBE_RXCTRL_RXEN		0x00000001 /* Enable Receiver */
-#define IXGBE_RXCTRL_DMBYPS		0x00000002 /* Desc Monitor Bypass */
-#define IXGBE_RXDCTL_ENABLE		0x02000000 /* Ena specific Rx Queue */
-#define IXGBE_RXDCTL_SWFLSH		0x04000000 /* Rx Desc wr-bk flushing */
-#define IXGBE_RXDCTL_RLPMLMASK		0x00003FFF /* X540 supported only */
-#define IXGBE_RXDCTL_RLPML_EN		0x00008000
-#define IXGBE_RXDCTL_VME		0x40000000 /* VLAN mode enable */
-
-#define IXGBE_TSAUXC_EN_CLK		0x00000004
-#define IXGBE_TSAUXC_SYNCLK		0x00000008
-#define IXGBE_TSAUXC_SDP0_INT		0x00000040
-#define IXGBE_TSAUXC_EN_TT0		0x00000001
-#define IXGBE_TSAUXC_EN_TT1		0x00000002
-#define IXGBE_TSAUXC_ST0		0x00000010
-#define IXGBE_TSAUXC_DISABLE_SYSTIME	0x80000000
-
-#define IXGBE_TSSDP_TS_SDP0_SEL_MASK	0x000000C0
-#define IXGBE_TSSDP_TS_SDP0_CLK0	0x00000080
-#define IXGBE_TSSDP_TS_SDP0_EN		0x00000100
-
-#define IXGBE_TSYNCTXCTL_VALID		0x00000001 /* Tx timestamp valid */
-#define IXGBE_TSYNCTXCTL_ENABLED	0x00000010 /* Tx timestamping enabled */
-
-#define IXGBE_TSYNCRXCTL_VALID		0x00000001 /* Rx timestamp valid */
-#define IXGBE_TSYNCRXCTL_TYPE_MASK	0x0000000E /* Rx type mask */
-#define IXGBE_TSYNCRXCTL_TYPE_L2_V2	0x00
-#define IXGBE_TSYNCRXCTL_TYPE_L4_V1	0x02
-#define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2	0x04
-#define IXGBE_TSYNCRXCTL_TYPE_ALL	0x08
-#define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2	0x0A
-#define IXGBE_TSYNCRXCTL_ENABLED	0x00000010 /* Rx Timestamping enabled */
-#define IXGBE_TSYNCRXCTL_TSIP_UT_EN	0x00800000 /* Rx Timestamp in Packet */
-#define IXGBE_TSYNCRXCTL_TSIP_UP_MASK	0xFF000000 /* Rx Timestamp UP Mask */
-
-#define IXGBE_TSIM_SYS_WRAP		0x00000001
-#define IXGBE_TSIM_TXTS			0x00000002
-#define IXGBE_TSIM_TADJ			0x00000080
-
-#define IXGBE_TSICR_SYS_WRAP		IXGBE_TSIM_SYS_WRAP
-#define IXGBE_TSICR_TXTS		IXGBE_TSIM_TXTS
-#define IXGBE_TSICR_TADJ		IXGBE_TSIM_TADJ
-
-#define IXGBE_RXMTRL_V1_CTRLT_MASK	0x000000FF
-#define IXGBE_RXMTRL_V1_SYNC_MSG	0x00
-#define IXGBE_RXMTRL_V1_DELAY_REQ_MSG	0x01
-#define IXGBE_RXMTRL_V1_FOLLOWUP_MSG	0x02
-#define IXGBE_RXMTRL_V1_DELAY_RESP_MSG	0x03
-#define IXGBE_RXMTRL_V1_MGMT_MSG	0x04
-
-#define IXGBE_RXMTRL_V2_MSGID_MASK	0x0000FF00
-#define IXGBE_RXMTRL_V2_SYNC_MSG	0x0000
-#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG	0x0100
-#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG	0x0200
-#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG	0x0300
-#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG	0x0800
-#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG	0x0900
-#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00
-#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG	0x0B00
-#define IXGBE_RXMTRL_V2_SIGNALLING_MSG	0x0C00
-#define IXGBE_RXMTRL_V2_MGMT_MSG	0x0D00
-
-#define IXGBE_FCTRL_SBP		0x00000002 /* Store Bad Packet */
-#define IXGBE_FCTRL_MPE		0x00000100 /* Multicast Promiscuous Ena*/
-#define IXGBE_FCTRL_UPE		0x00000200 /* Unicast Promiscuous Ena */
-#define IXGBE_FCTRL_BAM		0x00000400 /* Broadcast Accept Mode */
-#define IXGBE_FCTRL_PMCF	0x00001000 /* Pass MAC Control Frames */
-#define IXGBE_FCTRL_DPF		0x00002000 /* Discard Pause Frame */
-/* Receive Priority Flow Control Enable */
-#define IXGBE_FCTRL_RPFCE	0x00004000
-#define IXGBE_FCTRL_RFCE	0x00008000 /* Receive Flow Control Ena */
-#define IXGBE_MFLCN_PMCF	0x00000001 /* Pass MAC Control Frames */
-#define IXGBE_MFLCN_DPF		0x00000002 /* Discard Pause Frame */
-#define IXGBE_MFLCN_RPFCE	0x00000004 /* Receive Priority FC Enable */
-#define IXGBE_MFLCN_RFCE	0x00000008 /* Receive FC Enable */
-#define IXGBE_MFLCN_RPFCE_MASK	0x00000FF4 /* Rx Priority FC bitmap mask */
-#define IXGBE_MFLCN_RPFCE_SHIFT	4 /* Rx Priority FC bitmap shift */
-
-/* Multiple Receive Queue Control */
-#define IXGBE_MRQC_RSSEN	0x00000001  /* RSS Enable */
-#define IXGBE_MRQC_MRQE_MASK	0xF /* Bits 3:0 */
-#define IXGBE_MRQC_RT8TCEN	0x00000002 /* 8 TC no RSS */
-#define IXGBE_MRQC_RT4TCEN	0x00000003 /* 4 TC no RSS */
-#define IXGBE_MRQC_RTRSS8TCEN	0x00000004 /* 8 TC w/ RSS */
-#define IXGBE_MRQC_RTRSS4TCEN	0x00000005 /* 4 TC w/ RSS */
-#define IXGBE_MRQC_VMDQEN	0x00000008 /* VMDq2 64 pools no RSS */
-#define IXGBE_MRQC_VMDQRSS32EN	0x0000000A /* VMDq2 32 pools w/ RSS */
-#define IXGBE_MRQC_VMDQRSS64EN	0x0000000B /* VMDq2 64 pools w/ RSS */
-#define IXGBE_MRQC_VMDQRT8TCEN	0x0000000C /* VMDq2/RT 16 pool 8 TC */
-#define IXGBE_MRQC_VMDQRT4TCEN	0x0000000D /* VMDq2/RT 32 pool 4 TC */
-#define IXGBE_MRQC_RSS_FIELD_MASK	0xFFFF0000
-#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP	0x00010000
-#define IXGBE_MRQC_RSS_FIELD_IPV4	0x00020000
-#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000
-#define IXGBE_MRQC_RSS_FIELD_IPV6_EX	0x00080000
-#define IXGBE_MRQC_RSS_FIELD_IPV6	0x00100000
-#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP	0x00200000
-#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP	0x00400000
-#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP	0x00800000
-#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
-#define IXGBE_MRQC_MULTIPLE_RSS		0x00002000
-#define IXGBE_MRQC_L3L4TXSWEN		0x00008000
-
-/* Queue Drop Enable */
-#define IXGBE_QDE_ENABLE	0x00000001
-#define IXGBE_QDE_HIDE_VLAN	0x00000002
-#define IXGBE_QDE_IDX_MASK	0x00007F00
-#define IXGBE_QDE_IDX_SHIFT	8
-#define IXGBE_QDE_WRITE		0x00010000
-#define IXGBE_QDE_READ		0x00020000
-
-#define IXGBE_TXD_POPTS_IXSM	0x01 /* Insert IP checksum */
-#define IXGBE_TXD_POPTS_TXSM	0x02 /* Insert TCP/UDP checksum */
-#define IXGBE_TXD_CMD_EOP	0x01000000 /* End of Packet */
-#define IXGBE_TXD_CMD_IFCS	0x02000000 /* Insert FCS (Ethernet CRC) */
-#define IXGBE_TXD_CMD_IC	0x04000000 /* Insert Checksum */
-#define IXGBE_TXD_CMD_RS	0x08000000 /* Report Status */
-#define IXGBE_TXD_CMD_DEXT	0x20000000 /* Desc extension (0 = legacy) */
-#define IXGBE_TXD_CMD_VLE	0x40000000 /* Add VLAN tag */
-#define IXGBE_TXD_STAT_DD	0x00000001 /* Descriptor Done */
-
-#define IXGBE_RXDADV_IPSEC_STATUS_SECP		0x00020000
-#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
-#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH	0x10000000
-#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED	0x18000000
-#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK	0x18000000
-/* Multiple Transmit Queue Command Register */
-#define IXGBE_MTQC_RT_ENA	0x1 /* DCB Enable */
-#define IXGBE_MTQC_VT_ENA	0x2 /* VMDQ2 Enable */
-#define IXGBE_MTQC_64Q_1PB	0x0 /* 64 queues 1 pack buffer */
-#define IXGBE_MTQC_32VF		0x8 /* 4 TX Queues per pool w/32VF's */
-#define IXGBE_MTQC_64VF		0x4 /* 2 TX Queues per pool w/64VF's */
-#define IXGBE_MTQC_4TC_4TQ	0x8 /* 4 TC if RT_ENA and VT_ENA */
-#define IXGBE_MTQC_8TC_8TQ	0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
-
-/* Receive Descriptor bit definitions */
-#define IXGBE_RXD_STAT_DD	0x01 /* Descriptor Done */
-#define IXGBE_RXD_STAT_EOP	0x02 /* End of Packet */
-#define IXGBE_RXD_STAT_FLM	0x04 /* FDir Match */
-#define IXGBE_RXD_STAT_VP	0x08 /* IEEE VLAN Packet */
-#define IXGBE_RXDADV_NEXTP_MASK	0x000FFFF0 /* Next Descriptor Index */
-#define IXGBE_RXDADV_NEXTP_SHIFT	0x00000004
-#define IXGBE_RXD_STAT_UDPCS	0x10 /* UDP xsum calculated */
-#define IXGBE_RXD_STAT_L4CS	0x20 /* L4 xsum calculated */
-#define IXGBE_RXD_STAT_IPCS	0x40 /* IP xsum calculated */
-#define IXGBE_RXD_STAT_PIF	0x80 /* passed in-exact filter */
-#define IXGBE_RXD_STAT_CRCV	0x100 /* Speculative CRC Valid */
-#define IXGBE_RXD_STAT_OUTERIPCS	0x100 /* Cloud IP xsum calculated */
-#define IXGBE_RXD_STAT_VEXT	0x200 /* 1st VLAN found */
-#define IXGBE_RXD_STAT_UDPV	0x400 /* Valid UDP checksum */
-#define IXGBE_RXD_STAT_DYNINT	0x800 /* Pkt caused INT via DYNINT */
-#define IXGBE_RXD_STAT_LLINT	0x800 /* Pkt caused Low Latency Interrupt */
-#define IXGBE_RXD_STAT_TSIP	0x08000 /* Time Stamp in packet buffer */
-#define IXGBE_RXD_STAT_TS	0x10000 /* Time Stamp */
-#define IXGBE_RXD_STAT_SECP	0x20000 /* Security Processing */
-#define IXGBE_RXD_STAT_LB	0x40000 /* Loopback Status */
-#define IXGBE_RXD_STAT_ACK	0x8000 /* ACK Packet indication */
-#define IXGBE_RXD_ERR_CE	0x01 /* CRC Error */
-#define IXGBE_RXD_ERR_LE	0x02 /* Length Error */
-#define IXGBE_RXD_ERR_PE	0x08 /* Packet Error */
-#define IXGBE_RXD_ERR_OSE	0x10 /* Oversize Error */
-#define IXGBE_RXD_ERR_USE	0x20 /* Undersize Error */
-#define IXGBE_RXD_ERR_TCPE	0x40 /* TCP/UDP Checksum Error */
-#define IXGBE_RXD_ERR_IPE	0x80 /* IP Checksum Error */
-#define IXGBE_RXDADV_ERR_MASK		0xfff00000 /* RDESC.ERRORS mask */
-#define IXGBE_RXDADV_ERR_SHIFT		20 /* RDESC.ERRORS shift */
-#define IXGBE_RXDADV_ERR_OUTERIPER	0x04000000 /* CRC IP Header error */
-#define IXGBE_RXDADV_ERR_RXE		0x20000000 /* Any MAC Error */
-#define IXGBE_RXDADV_ERR_FCEOFE		0x80000000 /* FCoEFe/IPE */
-#define IXGBE_RXDADV_ERR_FCERR		0x00700000 /* FCERR/FDIRERR */
-#define IXGBE_RXDADV_ERR_FDIR_LEN	0x00100000 /* FDIR Length error */
-#define IXGBE_RXDADV_ERR_FDIR_DROP	0x00200000 /* FDIR Drop error */
-#define IXGBE_RXDADV_ERR_FDIR_COLL	0x00400000 /* FDIR Collision error */
-#define IXGBE_RXDADV_ERR_HBO	0x00800000 /*Header Buffer Overflow */
-#define IXGBE_RXDADV_ERR_CE	0x01000000 /* CRC Error */
-#define IXGBE_RXDADV_ERR_LE	0x02000000 /* Length Error */
-#define IXGBE_RXDADV_ERR_PE	0x08000000 /* Packet Error */
-#define IXGBE_RXDADV_ERR_OSE	0x10000000 /* Oversize Error */
-#define IXGBE_RXDADV_ERR_USE	0x20000000 /* Undersize Error */
-#define IXGBE_RXDADV_ERR_TCPE	0x40000000 /* TCP/UDP Checksum Error */
-#define IXGBE_RXDADV_ERR_IPE	0x80000000 /* IP Checksum Error */
-#define IXGBE_RXD_VLAN_ID_MASK	0x0FFF  /* VLAN ID is in lower 12 bits */
-#define IXGBE_RXD_PRI_MASK	0xE000  /* Priority is in upper 3 bits */
-#define IXGBE_RXD_PRI_SHIFT	13
-#define IXGBE_RXD_CFI_MASK	0x1000  /* CFI is bit 12 */
-#define IXGBE_RXD_CFI_SHIFT	12
-
-#define IXGBE_RXDADV_STAT_DD		IXGBE_RXD_STAT_DD  /* Done */
-#define IXGBE_RXDADV_STAT_EOP		IXGBE_RXD_STAT_EOP /* End of Packet */
-#define IXGBE_RXDADV_STAT_FLM		IXGBE_RXD_STAT_FLM /* FDir Match */
-#define IXGBE_RXDADV_STAT_VP		IXGBE_RXD_STAT_VP  /* IEEE VLAN Pkt */
-#define IXGBE_RXDADV_STAT_MASK		0x000fffff /* Stat/NEXTP: bit 0-19 */
-#define IXGBE_RXDADV_STAT_FCEOFS	0x00000040 /* FCoE EOF/SOF Stat */
-#define IXGBE_RXDADV_STAT_FCSTAT	0x00000030 /* FCoE Pkt Stat */
-#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH	0x00000000 /* 00: No Ctxt Match */
-#define IXGBE_RXDADV_STAT_FCSTAT_NODDP	0x00000010 /* 01: Ctxt w/o DDP */
-#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP	0x00000020 /* 10: Recv. FCP_RSP */
-#define IXGBE_RXDADV_STAT_FCSTAT_DDP	0x00000030 /* 11: Ctxt w/ DDP */
-#define IXGBE_RXDADV_STAT_TS		0x00010000 /* IEEE1588 Time Stamp */
-#define IXGBE_RXDADV_STAT_TSIP		0x00008000 /* Time Stamp in packet buffer */
-
-/* PSRTYPE bit definitions */
-#define IXGBE_PSRTYPE_TCPHDR	0x00000010
-#define IXGBE_PSRTYPE_UDPHDR	0x00000020
-#define IXGBE_PSRTYPE_IPV4HDR	0x00000100
-#define IXGBE_PSRTYPE_IPV6HDR	0x00000200
-#define IXGBE_PSRTYPE_L2HDR	0x00001000
-
-/* SRRCTL bit definitions */
-#define IXGBE_SRRCTL_BSIZEPKT_SHIFT	10 /* so many KBs */
-#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT	2 /* 64byte resolution (>> 6)
-					   * + at bit 8 offset (<< 8)
-					   *  = (<< 2)
-					   */
-#define IXGBE_SRRCTL_RDMTS_SHIFT	22
-#define IXGBE_SRRCTL_RDMTS_MASK		0x01C00000
-#define IXGBE_SRRCTL_DROP_EN		0x10000000
-#define IXGBE_SRRCTL_BSIZEPKT_MASK	0x0000007F
-#define IXGBE_SRRCTL_BSIZEHDR_MASK	0x00003F00
-#define IXGBE_SRRCTL_DESCTYPE_LEGACY	0x00000000
-#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
-#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT	0x04000000
-#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
-#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
-#define IXGBE_SRRCTL_DESCTYPE_MASK	0x0E000000
-
-#define IXGBE_RXDPS_HDRSTAT_HDRSP	0x00008000
-#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK	0x000003FF
-
-#define IXGBE_RXDADV_RSSTYPE_MASK	0x0000000F
-#define IXGBE_RXDADV_PKTTYPE_MASK	0x0000FFF0
-#define IXGBE_RXDADV_PKTTYPE_MASK_EX	0x0001FFF0
-#define IXGBE_RXDADV_HDRBUFLEN_MASK	0x00007FE0
-#define IXGBE_RXDADV_RSCCNT_MASK	0x001E0000
-#define IXGBE_RXDADV_RSCCNT_SHIFT	17
-#define IXGBE_RXDADV_HDRBUFLEN_SHIFT	5
-#define IXGBE_RXDADV_SPLITHEADER_EN	0x00001000
-#define IXGBE_RXDADV_SPH		0x8000
-
-/* RSS Hash results */
-#define IXGBE_RXDADV_RSSTYPE_NONE	0x00000000
-#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP	0x00000001
-#define IXGBE_RXDADV_RSSTYPE_IPV4	0x00000002
-#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP	0x00000003
-#define IXGBE_RXDADV_RSSTYPE_IPV6_EX	0x00000004
-#define IXGBE_RXDADV_RSSTYPE_IPV6	0x00000005
-#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
-#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP	0x00000007
-#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP	0x00000008
-#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
-
-/* RSS Packet Types as indicated in the receive descriptor. */
-#define IXGBE_RXDADV_PKTTYPE_NONE	0x00000000
-#define IXGBE_RXDADV_PKTTYPE_IPV4	0x00000010 /* IPv4 hdr present */
-#define IXGBE_RXDADV_PKTTYPE_IPV4_EX	0x00000020 /* IPv4 hdr + extensions */
-#define IXGBE_RXDADV_PKTTYPE_IPV6	0x00000040 /* IPv6 hdr present */
-#define IXGBE_RXDADV_PKTTYPE_IPV6_EX	0x00000080 /* IPv6 hdr + extensions */
-#define IXGBE_RXDADV_PKTTYPE_TCP	0x00000100 /* TCP hdr present */
-#define IXGBE_RXDADV_PKTTYPE_UDP	0x00000200 /* UDP hdr present */
-#define IXGBE_RXDADV_PKTTYPE_SCTP	0x00000400 /* SCTP hdr present */
-#define IXGBE_RXDADV_PKTTYPE_NFS	0x00000800 /* NFS hdr present */
-#define IXGBE_RXDADV_PKTTYPE_VXLAN	0x00000800 /* VXLAN hdr present */
-#define IXGBE_RXDADV_PKTTYPE_TUNNEL	0x00010000 /* Tunnel type */
-#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP	0x00001000 /* IPSec ESP */
-#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH	0x00002000 /* IPSec AH */
-#define IXGBE_RXDADV_PKTTYPE_LINKSEC	0x00004000 /* LinkSec Encap */
-#define IXGBE_RXDADV_PKTTYPE_ETQF	0x00008000 /* PKTTYPE is ETQF index */
-#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK	0x00000070 /* ETQF has 8 indices */
-#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT	4 /* Right-shift 4 bits */
-
-/* Security Processing bit Indication */
-#define IXGBE_RXDADV_LNKSEC_STATUS_SECP		0x00020000
-#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH	0x08000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR	0x10000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK	0x18000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG	0x18000000
-
-/* Masks to determine if packets should be dropped due to frame errors */
-#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
-				IXGBE_RXD_ERR_CE | \
-				IXGBE_RXD_ERR_LE | \
-				IXGBE_RXD_ERR_PE | \
-				IXGBE_RXD_ERR_OSE | \
-				IXGBE_RXD_ERR_USE)
-
-#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
-				IXGBE_RXDADV_ERR_CE | \
-				IXGBE_RXDADV_ERR_LE | \
-				IXGBE_RXDADV_ERR_PE | \
-				IXGBE_RXDADV_ERR_OSE | \
-				IXGBE_RXDADV_ERR_USE)
-
-#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK_82599	IXGBE_RXDADV_ERR_RXE
-
-/* Multicast bit mask */
-#define IXGBE_MCSTCTRL_MFE	0x4
-
-/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
-#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE	8
-#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE	8
-#define IXGBE_REQ_TX_BUFFER_GRANULARITY		1024
-
-/* Vlan-specific macros */
-#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK	0x0FFF /* VLAN ID in lower 12 bits */
-#define IXGBE_RX_DESC_SPECIAL_PRI_MASK	0xE000 /* Priority in upper 3 bits */
-#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT	0x000D /* Priority in upper 3 of 16 */
-#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT	IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
-
-/* SR-IOV specific macros */
-#define IXGBE_MBVFICR_INDEX(vf_number)	(vf_number >> 4)
-#define IXGBE_MBVFICR(_i)		(0x00710 + ((_i) * 4))
-#define IXGBE_VFLRE(_i)			(((_i & 1) ? 0x001C0 : 0x00600))
-#define IXGBE_VFLREC(_i)		 (0x00700 + ((_i) * 4))
-/* Translated register #defines */
-#define IXGBE_PVFCTRL(P)	(0x00300 + (4 * (P)))
-#define IXGBE_PVFSTATUS(P)	(0x00008 + (0 * (P)))
-#define IXGBE_PVFLINKS(P)	(0x042A4 + (0 * (P)))
-#define IXGBE_PVFRTIMER(P)	(0x00048 + (0 * (P)))
-#define IXGBE_PVFMAILBOX(P)	(0x04C00 + (4 * (P)))
-#define IXGBE_PVFRXMEMWRAP(P)	(0x03190 + (0 * (P)))
-#define IXGBE_PVTEICR(P)	(0x00B00 + (4 * (P)))
-#define IXGBE_PVTEICS(P)	(0x00C00 + (4 * (P)))
-#define IXGBE_PVTEIMS(P)	(0x00D00 + (4 * (P)))
-#define IXGBE_PVTEIMC(P)	(0x00E00 + (4 * (P)))
-#define IXGBE_PVTEIAC(P)	(0x00F00 + (4 * (P)))
-#define IXGBE_PVTEIAM(P)	(0x04D00 + (4 * (P)))
-#define IXGBE_PVTEITR(P)	(((P) < 24) ? (0x00820 + ((P) * 4)) : \
-				 (0x012300 + (((P) - 24) * 4)))
-#define IXGBE_PVTIVAR(P)	(0x12500 + (4 * (P)))
-#define IXGBE_PVTIVAR_MISC(P)	(0x04E00 + (4 * (P)))
-#define IXGBE_PVTRSCINT(P)	(0x12000 + (4 * (P)))
-#define IXGBE_VFPBACL(P)	(0x110C8 + (4 * (P)))
-#define IXGBE_PVFRDBAL(P)	((P < 64) ? (0x01000 + (0x40 * (P))) \
-				 : (0x0D000 + (0x40 * ((P) - 64))))
-#define IXGBE_PVFRDBAH(P)	((P < 64) ? (0x01004 + (0x40 * (P))) \
-				 : (0x0D004 + (0x40 * ((P) - 64))))
-#define IXGBE_PVFRDLEN(P)	((P < 64) ? (0x01008 + (0x40 * (P))) \
-				 : (0x0D008 + (0x40 * ((P) - 64))))
-#define IXGBE_PVFRDH(P)		((P < 64) ? (0x01010 + (0x40 * (P))) \
-				 : (0x0D010 + (0x40 * ((P) - 64))))
-#define IXGBE_PVFRDT(P)		((P < 64) ? (0x01018 + (0x40 * (P))) \
-				 : (0x0D018 + (0x40 * ((P) - 64))))
-#define IXGBE_PVFRXDCTL(P)	((P < 64) ? (0x01028 + (0x40 * (P))) \
-				 : (0x0D028 + (0x40 * ((P) - 64))))
-#define IXGBE_PVFSRRCTL(P)	((P < 64) ? (0x01014 + (0x40 * (P))) \
-				 : (0x0D014 + (0x40 * ((P) - 64))))
-#define IXGBE_PVFPSRTYPE(P)	(0x0EA00 + (4 * (P)))
-#define IXGBE_PVFTDBAL(P)	(0x06000 + (0x40 * (P)))
-#define IXGBE_PVFTDBAH(P)	(0x06004 + (0x40 * (P)))
-#define IXGBE_PVFTTDLEN(P)	(0x06008 + (0x40 * (P)))
-#define IXGBE_PVFTDH(P)		(0x06010 + (0x40 * (P)))
-#define IXGBE_PVFTDT(P)		(0x06018 + (0x40 * (P)))
-#define IXGBE_PVFTXDCTL(P)	(0x06028 + (0x40 * (P)))
-#define IXGBE_PVFTDWBAL(P)	(0x06038 + (0x40 * (P)))
-#define IXGBE_PVFTDWBAH(P)	(0x0603C + (0x40 * (P)))
-#define IXGBE_PVFDCA_RXCTRL(P)	(((P) < 64) ? (0x0100C + (0x40 * (P))) \
-				 : (0x0D00C + (0x40 * ((P) - 64))))
-#define IXGBE_PVFDCA_TXCTRL(P)	(0x0600C + (0x40 * (P)))
-#define IXGBE_PVFGPRC(x)	(0x0101C + (0x40 * (x)))
-#define IXGBE_PVFGPTC(x)	(0x08300 + (0x04 * (x)))
-#define IXGBE_PVFGORC_LSB(x)	(0x01020 + (0x40 * (x)))
-#define IXGBE_PVFGORC_MSB(x)	(0x0D020 + (0x40 * (x)))
-#define IXGBE_PVFGOTC_LSB(x)	(0x08400 + (0x08 * (x)))
-#define IXGBE_PVFGOTC_MSB(x)	(0x08404 + (0x08 * (x)))
-#define IXGBE_PVFMPRC(x)	(0x0D01C + (0x40 * (x)))
-
-#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \
-		(IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index)))
-#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \
-		(IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index)))
-
-#define IXGBE_PVFTDHn(q_per_pool, vf_number, vf_q_index) \
-		(IXGBE_PVFTDH((q_per_pool)*(vf_number) + (vf_q_index)))
-#define IXGBE_PVFTDTn(q_per_pool, vf_number, vf_q_index) \
-		(IXGBE_PVFTDT((q_per_pool)*(vf_number) + (vf_q_index)))
-
-/* Little Endian defines */
-#ifndef __le16
-#define __le16  u16
-#endif
-#ifndef __le32
-#define __le32  u32
-#endif
-#ifndef __le64
-#define __le64  u64
-
-#endif
-#ifndef __be16
-/* Big Endian defines */
-#define __be16  u16
-#define __be32  u32
-#define __be64  u64
-
-#endif
-enum ixgbe_fdir_pballoc_type {
-	IXGBE_FDIR_PBALLOC_NONE = 0,
-	IXGBE_FDIR_PBALLOC_64K  = 1,
-	IXGBE_FDIR_PBALLOC_128K = 2,
-	IXGBE_FDIR_PBALLOC_256K = 3,
-};
-
-/* Flow Director register values */
-#define IXGBE_FDIRCTRL_PBALLOC_64K		0x00000001
-#define IXGBE_FDIRCTRL_PBALLOC_128K		0x00000002
-#define IXGBE_FDIRCTRL_PBALLOC_256K		0x00000003
-#define IXGBE_FDIRCTRL_INIT_DONE		0x00000008
-#define IXGBE_FDIRCTRL_PERFECT_MATCH		0x00000010
-#define IXGBE_FDIRCTRL_REPORT_STATUS		0x00000020
-#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS	0x00000080
-#define IXGBE_FDIRCTRL_DROP_Q_SHIFT		8
-#define IXGBE_FDIRCTRL_FLEX_SHIFT		16
-#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT		21
-#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN	0x0001 /* bit 23:21, 001b */
-#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD		0x0002 /* bit 23:21, 010b */
-#define IXGBE_FDIRCTRL_SEARCHLIM		0x00800000
-#define IXGBE_FDIRCTRL_FILTERMODE_MASK		0x00E00000
-#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT		24
-#define IXGBE_FDIRCTRL_FULL_THRESH_MASK		0xF0000000
-#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT	28
-
-#define IXGBE_FDIRTCPM_DPORTM_SHIFT		16
-#define IXGBE_FDIRUDPM_DPORTM_SHIFT		16
-#define IXGBE_FDIRIP6M_DIPM_SHIFT		16
-#define IXGBE_FDIRM_VLANID			0x00000001
-#define IXGBE_FDIRM_VLANP			0x00000002
-#define IXGBE_FDIRM_POOL			0x00000004
-#define IXGBE_FDIRM_L4P				0x00000008
-#define IXGBE_FDIRM_FLEX			0x00000010
-#define IXGBE_FDIRM_DIPv6			0x00000020
-#define IXGBE_FDIRM_L3P				0x00000040
-
-#define IXGBE_FDIRIP6M_INNER_MAC	0x03F0 /* bit 9:4 */
-#define IXGBE_FDIRIP6M_TUNNEL_TYPE	0x0800 /* bit 11 */
-#define IXGBE_FDIRIP6M_TNI_VNI		0xF000 /* bit 15:12 */
-#define IXGBE_FDIRIP6M_TNI_VNI_24	0x1000 /* bit 12 */
-#define IXGBE_FDIRIP6M_ALWAYS_MASK	0x040F /* bit 10, 3:0 */
-
-#define IXGBE_FDIRFREE_FREE_MASK		0xFFFF
-#define IXGBE_FDIRFREE_FREE_SHIFT		0
-#define IXGBE_FDIRFREE_COLL_MASK		0x7FFF0000
-#define IXGBE_FDIRFREE_COLL_SHIFT		16
-#define IXGBE_FDIRLEN_MAXLEN_MASK		0x3F
-#define IXGBE_FDIRLEN_MAXLEN_SHIFT		0
-#define IXGBE_FDIRLEN_MAXHASH_MASK		0x7FFF0000
-#define IXGBE_FDIRLEN_MAXHASH_SHIFT		16
-#define IXGBE_FDIRUSTAT_ADD_MASK		0xFFFF
-#define IXGBE_FDIRUSTAT_ADD_SHIFT		0
-#define IXGBE_FDIRUSTAT_REMOVE_MASK		0xFFFF0000
-#define IXGBE_FDIRUSTAT_REMOVE_SHIFT		16
-#define IXGBE_FDIRFSTAT_FADD_MASK		0x00FF
-#define IXGBE_FDIRFSTAT_FADD_SHIFT		0
-#define IXGBE_FDIRFSTAT_FREMOVE_MASK		0xFF00
-#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT		8
-#define IXGBE_FDIRPORT_DESTINATION_SHIFT	16
-#define IXGBE_FDIRVLAN_FLEX_SHIFT		16
-#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT	15
-#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT	16
-
-#define IXGBE_FDIRCMD_CMD_MASK			0x00000003
-#define IXGBE_FDIRCMD_CMD_ADD_FLOW		0x00000001
-#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW		0x00000002
-#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT	0x00000003
-#define IXGBE_FDIRCMD_FILTER_VALID		0x00000004
-#define IXGBE_FDIRCMD_FILTER_UPDATE		0x00000008
-#define IXGBE_FDIRCMD_IPv6DMATCH		0x00000010
-#define IXGBE_FDIRCMD_L4TYPE_UDP		0x00000020
-#define IXGBE_FDIRCMD_L4TYPE_TCP		0x00000040
-#define IXGBE_FDIRCMD_L4TYPE_SCTP		0x00000060
-#define IXGBE_FDIRCMD_IPV6			0x00000080
-#define IXGBE_FDIRCMD_CLEARHT			0x00000100
-#define IXGBE_FDIRCMD_DROP			0x00000200
-#define IXGBE_FDIRCMD_INT			0x00000400
-#define IXGBE_FDIRCMD_LAST			0x00000800
-#define IXGBE_FDIRCMD_COLLISION			0x00001000
-#define IXGBE_FDIRCMD_QUEUE_EN			0x00008000
-#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT		5
-#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT		16
-#define IXGBE_FDIRCMD_TUNNEL_FILTER_SHIFT	23
-#define IXGBE_FDIRCMD_VT_POOL_SHIFT		24
-#define IXGBE_FDIR_INIT_DONE_POLL		10
-#define IXGBE_FDIRCMD_CMD_POLL			10
-#define IXGBE_FDIRCMD_TUNNEL_FILTER		0x00800000
-#define IXGBE_FDIR_DROP_QUEUE			127
-
-
-/* Manageablility Host Interface defines */
-#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH	1792 /* Num of bytes in range */
-#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH	448 /* Num of dwords in range */
-#define IXGBE_HI_COMMAND_TIMEOUT	500 /* Process HI command limit */
-#define IXGBE_HI_FLASH_ERASE_TIMEOUT	1000 /* Process Erase command limit */
-#define IXGBE_HI_FLASH_UPDATE_TIMEOUT	5000 /* Process Update command limit */
-#define IXGBE_HI_FLASH_APPLY_TIMEOUT	0 /* Process Apply command limit */
-
-/* CEM Support */
-#define FW_CEM_HDR_LEN			0x4
-#define FW_CEM_CMD_DRIVER_INFO		0xDD
-#define FW_CEM_CMD_DRIVER_INFO_LEN	0x5
-#define FW_CEM_CMD_RESERVED		0X0
-#define FW_CEM_UNUSED_VER		0x0
-#define FW_CEM_MAX_RETRIES		3
-#define FW_CEM_RESP_STATUS_SUCCESS	0x1
-#define FW_READ_SHADOW_RAM_CMD		0x31
-#define FW_READ_SHADOW_RAM_LEN		0x6
-#define FW_WRITE_SHADOW_RAM_CMD		0x33
-#define FW_WRITE_SHADOW_RAM_LEN		0xA /* 8 plus 1 WORD to write */
-#define FW_SHADOW_RAM_DUMP_CMD		0x36
-#define FW_SHADOW_RAM_DUMP_LEN		0
-#define FW_DEFAULT_CHECKSUM		0xFF /* checksum always 0xFF */
-#define FW_NVM_DATA_OFFSET		3
-#define FW_MAX_READ_BUFFER_SIZE		1024
-#define FW_DISABLE_RXEN_CMD		0xDE
-#define FW_DISABLE_RXEN_LEN		0x1
-/* Host Interface Command Structures */
-
-struct ixgbe_hic_hdr {
-	u8 cmd;
-	u8 buf_len;
-	union {
-		u8 cmd_resv;
-		u8 ret_status;
-	} cmd_or_resp;
-	u8 checksum;
-};
-
-struct ixgbe_hic_hdr2_req {
-	u8 cmd;
-	u8 buf_lenh;
-	u8 buf_lenl;
-	u8 checksum;
-};
-
-struct ixgbe_hic_hdr2_rsp {
-	u8 cmd;
-	u8 buf_lenl;
-	u8 buf_lenh_status;	/* 7-5: high bits of buf_len, 4-0: status */
-	u8 checksum;
-};
-
-union ixgbe_hic_hdr2 {
-	struct ixgbe_hic_hdr2_req req;
-	struct ixgbe_hic_hdr2_rsp rsp;
-};
-
-struct ixgbe_hic_drv_info {
-	struct ixgbe_hic_hdr hdr;
-	u8 port_num;
-	u8 ver_sub;
-	u8 ver_build;
-	u8 ver_min;
-	u8 ver_maj;
-	u8 pad; /* end spacing to ensure length is mult. of dword */
-	u16 pad2; /* end spacing to ensure length is mult. of dword2 */
-};
-
-/* These need to be dword aligned */
-struct ixgbe_hic_read_shadow_ram {
-	union ixgbe_hic_hdr2 hdr;
-	u32 address;
-	u16 length;
-	u16 pad2;
-	u16 data;
-	u16 pad3;
-};
-
-struct ixgbe_hic_write_shadow_ram {
-	union ixgbe_hic_hdr2 hdr;
-	u32 address;
-	u16 length;
-	u16 pad2;
-	u16 data;
-	u16 pad3;
-};
-
-struct ixgbe_hic_disable_rxen {
-	struct ixgbe_hic_hdr hdr;
-	u8  port_number;
-	u8  pad2;
-	u16 pad3;
-};
-
-
-/* Transmit Descriptor - Legacy */
-struct ixgbe_legacy_tx_desc {
-	u64 buffer_addr; /* Address of the descriptor's data buffer */
-	union {
-		__le32 data;
-		struct {
-			__le16 length; /* Data buffer length */
-			u8 cso; /* Checksum offset */
-			u8 cmd; /* Descriptor control */
-		} flags;
-	} lower;
-	union {
-		__le32 data;
-		struct {
-			u8 status; /* Descriptor status */
-			u8 css; /* Checksum start */
-			__le16 vlan;
-		} fields;
-	} upper;
-};
-
-/* Transmit Descriptor - Advanced */
-union ixgbe_adv_tx_desc {
-	struct {
-		__le64 buffer_addr; /* Address of descriptor's data buf */
-		__le32 cmd_type_len;
-		__le32 olinfo_status;
-	} read;
-	struct {
-		__le64 rsvd; /* Reserved */
-		__le32 nxtseq_seed;
-		__le32 status;
-	} wb;
-};
-
-/* Receive Descriptor - Legacy */
-struct ixgbe_legacy_rx_desc {
-	__le64 buffer_addr; /* Address of the descriptor's data buffer */
-	__le16 length; /* Length of data DMAed into data buffer */
-	__le16 csum; /* Packet checksum */
-	u8 status;   /* Descriptor status */
-	u8 errors;   /* Descriptor Errors */
-	__le16 vlan;
-};
-
-/* Receive Descriptor - Advanced */
-union ixgbe_adv_rx_desc {
-	struct {
-		__le64 pkt_addr; /* Packet buffer address */
-		__le64 hdr_addr; /* Header buffer address */
-	} read;
-	struct {
-		struct {
-			union {
-				__le32 data;
-				struct {
-					__le16 pkt_info; /* RSS, Pkt type */
-					__le16 hdr_info; /* Splithdr, hdrlen */
-				} hs_rss;
-			} lo_dword;
-			union {
-				__le32 rss; /* RSS Hash */
-				struct {
-					__le16 ip_id; /* IP id */
-					__le16 csum; /* Packet Checksum */
-				} csum_ip;
-			} hi_dword;
-		} lower;
-		struct {
-			__le32 status_error; /* ext status/error */
-			__le16 length; /* Packet length */
-			__le16 vlan; /* VLAN tag */
-		} upper;
-	} wb;  /* writeback */
-};
-
-/* Context descriptors */
-struct ixgbe_adv_tx_context_desc {
-	__le32 vlan_macip_lens;
-	__le32 seqnum_seed;
-	__le32 type_tucmd_mlhl;
-	__le32 mss_l4len_idx;
-};
-
-/* Adv Transmit Descriptor Config Masks */
-#define IXGBE_ADVTXD_DTALEN_MASK	0x0000FFFF /* Data buf length(bytes) */
-#define IXGBE_ADVTXD_MAC_LINKSEC	0x00040000 /* Insert LinkSec */
-#define IXGBE_ADVTXD_MAC_TSTAMP		0x00080000 /* IEEE1588 time stamp */
-#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */
-#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK	0x000001FF /* IPSec ESP length */
-#define IXGBE_ADVTXD_DTYP_MASK		0x00F00000 /* DTYP mask */
-#define IXGBE_ADVTXD_DTYP_CTXT		0x00200000 /* Adv Context Desc */
-#define IXGBE_ADVTXD_DTYP_DATA		0x00300000 /* Adv Data Descriptor */
-#define IXGBE_ADVTXD_DCMD_EOP		IXGBE_TXD_CMD_EOP  /* End of Packet */
-#define IXGBE_ADVTXD_DCMD_IFCS		IXGBE_TXD_CMD_IFCS /* Insert FCS */
-#define IXGBE_ADVTXD_DCMD_RS		IXGBE_TXD_CMD_RS /* Report Status */
-#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI	0x10000000 /* DDP hdr type or iSCSI */
-#define IXGBE_ADVTXD_DCMD_DEXT		IXGBE_TXD_CMD_DEXT /* Desc ext 1=Adv */
-#define IXGBE_ADVTXD_DCMD_VLE		IXGBE_TXD_CMD_VLE  /* VLAN pkt enable */
-#define IXGBE_ADVTXD_DCMD_TSE		0x80000000 /* TCP Seg enable */
-#define IXGBE_ADVTXD_STAT_DD		IXGBE_TXD_STAT_DD  /* Descriptor Done */
-#define IXGBE_ADVTXD_STAT_SN_CRC	0x00000002 /* NXTSEQ/SEED pres in WB */
-#define IXGBE_ADVTXD_STAT_RSV		0x0000000C /* STA Reserved */
-#define IXGBE_ADVTXD_IDX_SHIFT		4 /* Adv desc Index shift */
-#define IXGBE_ADVTXD_CC			0x00000080 /* Check Context */
-#define IXGBE_ADVTXD_POPTS_SHIFT	8  /* Adv desc POPTS shift */
-#define IXGBE_ADVTXD_POPTS_IXSM		(IXGBE_TXD_POPTS_IXSM << \
-					 IXGBE_ADVTXD_POPTS_SHIFT)
-#define IXGBE_ADVTXD_POPTS_TXSM		(IXGBE_TXD_POPTS_TXSM << \
-					 IXGBE_ADVTXD_POPTS_SHIFT)
-#define IXGBE_ADVTXD_POPTS_ISCO_1ST	0x00000000 /* 1st TSO of iSCSI PDU */
-#define IXGBE_ADVTXD_POPTS_ISCO_MDL	0x00000800 /* Middle TSO of iSCSI PDU */
-#define IXGBE_ADVTXD_POPTS_ISCO_LAST	0x00001000 /* Last TSO of iSCSI PDU */
-/* 1st&Last TSO-full iSCSI PDU */
-#define IXGBE_ADVTXD_POPTS_ISCO_FULL	0x00001800
-#define IXGBE_ADVTXD_POPTS_RSV		0x00002000 /* POPTS Reserved */
-#define IXGBE_ADVTXD_PAYLEN_SHIFT	14 /* Adv desc PAYLEN shift */
-#define IXGBE_ADVTXD_MACLEN_SHIFT	9  /* Adv ctxt desc mac len shift */
-#define IXGBE_ADVTXD_VLAN_SHIFT		16  /* Adv ctxt vlan tag shift */
-#define IXGBE_ADVTXD_TUCMD_IPV4		0x00000400 /* IP Packet Type: 1=IPv4 */
-#define IXGBE_ADVTXD_TUCMD_IPV6		0x00000000 /* IP Packet Type: 0=IPv6 */
-#define IXGBE_ADVTXD_TUCMD_L4T_UDP	0x00000000 /* L4 Packet TYPE of UDP */
-#define IXGBE_ADVTXD_TUCMD_L4T_TCP	0x00000800 /* L4 Packet TYPE of TCP */
-#define IXGBE_ADVTXD_TUCMD_L4T_SCTP	0x00001000 /* L4 Packet TYPE of SCTP */
-#define IXGBE_ADVTXD_TUCMD_MKRREQ	0x00002000 /* req Markers and CRC */
-#define IXGBE_ADVTXD_POPTS_IPSEC	0x00000400 /* IPSec offload request */
-#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
-#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
-#define IXGBE_ADVTXT_TUCMD_FCOE		0x00008000 /* FCoE Frame Type */
-#define IXGBE_ADVTXD_FCOEF_EOF_MASK	(0x3 << 10) /* FC EOF index */
-#define IXGBE_ADVTXD_FCOEF_SOF		((1 << 2) << 10) /* FC SOF index */
-#define IXGBE_ADVTXD_FCOEF_PARINC	((1 << 3) << 10) /* Rel_Off in F_CTL */
-#define IXGBE_ADVTXD_FCOEF_ORIE		((1 << 4) << 10) /* Orientation End */
-#define IXGBE_ADVTXD_FCOEF_ORIS		((1 << 5) << 10) /* Orientation Start */
-#define IXGBE_ADVTXD_FCOEF_EOF_N	(0x0 << 10) /* 00: EOFn */
-#define IXGBE_ADVTXD_FCOEF_EOF_T	(0x1 << 10) /* 01: EOFt */
-#define IXGBE_ADVTXD_FCOEF_EOF_NI	(0x2 << 10) /* 10: EOFni */
-#define IXGBE_ADVTXD_FCOEF_EOF_A	(0x3 << 10) /* 11: EOFa */
-#define IXGBE_ADVTXD_L4LEN_SHIFT	8  /* Adv ctxt L4LEN shift */
-#define IXGBE_ADVTXD_MSS_SHIFT		16  /* Adv ctxt MSS shift */
-
-#define IXGBE_ADVTXD_OUTER_IPLEN	16 /* Adv ctxt OUTERIPLEN shift */
-#define IXGBE_ADVTXD_TUNNEL_LEN 	24 /* Adv ctxt TUNNELLEN shift */
-#define IXGBE_ADVTXD_TUNNEL_TYPE_SHIFT	16 /* Adv Tx Desc Tunnel Type shift */
-#define IXGBE_ADVTXD_OUTERIPCS_SHIFT	17 /* Adv Tx Desc OUTERIPCS Shift */
-#define IXGBE_ADVTXD_TUNNEL_TYPE_NVGRE	1  /* Adv Tx Desc Tunnel Type NVGRE */
-
-/* Autonegotiation advertised speeds */
-typedef u32 ixgbe_autoneg_advertised;
-/* Link speed */
-typedef u32 ixgbe_link_speed;
-#define IXGBE_LINK_SPEED_UNKNOWN	0
-#define IXGBE_LINK_SPEED_100_FULL	0x0008
-#define IXGBE_LINK_SPEED_1GB_FULL	0x0020
-#define IXGBE_LINK_SPEED_2_5GB_FULL	0x0400
-#define IXGBE_LINK_SPEED_5GB_FULL	0x0800
-#define IXGBE_LINK_SPEED_10GB_FULL	0x0080
-#define IXGBE_LINK_SPEED_82598_AUTONEG	(IXGBE_LINK_SPEED_1GB_FULL | \
-					 IXGBE_LINK_SPEED_10GB_FULL)
-#define IXGBE_LINK_SPEED_82599_AUTONEG	(IXGBE_LINK_SPEED_100_FULL | \
-					 IXGBE_LINK_SPEED_1GB_FULL | \
-					 IXGBE_LINK_SPEED_10GB_FULL)
-
-/* Physical layer type */
-typedef u32 ixgbe_physical_layer;
-#define IXGBE_PHYSICAL_LAYER_UNKNOWN		0
-#define IXGBE_PHYSICAL_LAYER_10GBASE_T		0x0001
-#define IXGBE_PHYSICAL_LAYER_1000BASE_T		0x0002
-#define IXGBE_PHYSICAL_LAYER_100BASE_TX		0x0004
-#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU	0x0008
-#define IXGBE_PHYSICAL_LAYER_10GBASE_LR		0x0010
-#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM	0x0020
-#define IXGBE_PHYSICAL_LAYER_10GBASE_SR		0x0040
-#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4	0x0080
-#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4	0x0100
-#define IXGBE_PHYSICAL_LAYER_1000BASE_KX	0x0200
-#define IXGBE_PHYSICAL_LAYER_1000BASE_BX	0x0400
-#define IXGBE_PHYSICAL_LAYER_10GBASE_KR		0x0800
-#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI	0x1000
-#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA	0x2000
-#define IXGBE_PHYSICAL_LAYER_1000BASE_SX	0x4000
-
-/* Flow Control Data Sheet defined values
- * Calculation and defines taken from 802.1bb Annex O
- */
-
-/* BitTimes (BT) conversion */
-#define IXGBE_BT2KB(BT)		((BT + (8 * 1024 - 1)) / (8 * 1024))
-#define IXGBE_B2BT(BT)		(BT * 8)
-
-/* Calculate Delay to respond to PFC */
-#define IXGBE_PFC_D	672
-
-/* Calculate Cable Delay */
-#define IXGBE_CABLE_DC	5556 /* Delay Copper */
-#define IXGBE_CABLE_DO	5000 /* Delay Optical */
-
-/* Calculate Interface Delay X540 */
-#define IXGBE_PHY_DC	25600 /* Delay 10G BASET */
-#define IXGBE_MAC_DC	8192  /* Delay Copper XAUI interface */
-#define IXGBE_XAUI_DC	(2 * 2048) /* Delay Copper Phy */
-
-#define IXGBE_ID_X540	(IXGBE_MAC_DC + IXGBE_XAUI_DC + IXGBE_PHY_DC)
-
-/* Calculate Interface Delay 82598, 82599 */
-#define IXGBE_PHY_D	12800
-#define IXGBE_MAC_D	4096
-#define IXGBE_XAUI_D	(2 * 1024)
-
-#define IXGBE_ID	(IXGBE_MAC_D + IXGBE_XAUI_D + IXGBE_PHY_D)
-
-/* Calculate Delay incurred from higher layer */
-#define IXGBE_HD	6144
-
-/* Calculate PCI Bus delay for low thresholds */
-#define IXGBE_PCI_DELAY	10000
-
-/* Calculate X540 delay value in bit times */
-#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \
-			((36 * \
-			  (IXGBE_B2BT(_max_frame_link) + \
-			   IXGBE_PFC_D + \
-			   (2 * IXGBE_CABLE_DC) + \
-			   (2 * IXGBE_ID_X540) + \
-			   IXGBE_HD) / 25 + 1) + \
-			 2 * IXGBE_B2BT(_max_frame_tc))
-
-/* Calculate 82599, 82598 delay value in bit times */
-#define IXGBE_DV(_max_frame_link, _max_frame_tc) \
-			((36 * \
-			  (IXGBE_B2BT(_max_frame_link) + \
-			   IXGBE_PFC_D + \
-			   (2 * IXGBE_CABLE_DC) + \
-			   (2 * IXGBE_ID) + \
-			   IXGBE_HD) / 25 + 1) + \
-			 2 * IXGBE_B2BT(_max_frame_tc))
-
-/* Calculate low threshold delay values */
-#define IXGBE_LOW_DV_X540(_max_frame_tc) \
-			(2 * IXGBE_B2BT(_max_frame_tc) + \
-			(36 * IXGBE_PCI_DELAY / 25) + 1)
-#define IXGBE_LOW_DV(_max_frame_tc) \
-			(2 * IXGBE_LOW_DV_X540(_max_frame_tc))
-
-/* Software ATR hash keys */
-#define IXGBE_ATR_BUCKET_HASH_KEY	0x3DAD14E2
-#define IXGBE_ATR_SIGNATURE_HASH_KEY	0x174D3614
-
-/* Software ATR input stream values and masks */
-#define IXGBE_ATR_HASH_MASK		0x7fff
-#define IXGBE_ATR_L4TYPE_MASK		0x3
-#define IXGBE_ATR_L4TYPE_UDP		0x1
-#define IXGBE_ATR_L4TYPE_TCP		0x2
-#define IXGBE_ATR_L4TYPE_SCTP		0x3
-#define IXGBE_ATR_L4TYPE_IPV6_MASK	0x4
-#define IXGBE_ATR_L4TYPE_TUNNEL_MASK	0x10
-enum ixgbe_atr_flow_type {
-	IXGBE_ATR_FLOW_TYPE_IPV4	= 0x0,
-	IXGBE_ATR_FLOW_TYPE_UDPV4	= 0x1,
-	IXGBE_ATR_FLOW_TYPE_TCPV4	= 0x2,
-	IXGBE_ATR_FLOW_TYPE_SCTPV4	= 0x3,
-	IXGBE_ATR_FLOW_TYPE_IPV6	= 0x4,
-	IXGBE_ATR_FLOW_TYPE_UDPV6	= 0x5,
-	IXGBE_ATR_FLOW_TYPE_TCPV6	= 0x6,
-	IXGBE_ATR_FLOW_TYPE_SCTPV6	= 0x7,
-	IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4	= 0x10,
-	IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4	= 0x11,
-	IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4	= 0x12,
-	IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4	= 0x13,
-	IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6	= 0x14,
-	IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6	= 0x15,
-	IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6	= 0x16,
-	IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6	= 0x17,
-};
-
-/* Flow Director ATR input struct. */
-union ixgbe_atr_input {
-	/*
-	 * Byte layout in order, all values with MSB first:
-	 *
-	 * vm_pool	- 1 byte
-	 * flow_type	- 1 byte
-	 * vlan_id	- 2 bytes
-	 * src_ip	- 16 bytes
-	 * inner_mac	- 6 bytes
-	 * cloud_mode	- 2 bytes
-	 * tni_vni	- 4 bytes
-	 * dst_ip	- 16 bytes
-	 * src_port	- 2 bytes
-	 * dst_port	- 2 bytes
-	 * flex_bytes	- 2 bytes
-	 * bkt_hash	- 2 bytes
-	 */
-	struct {
-		u8 vm_pool;
-		u8 flow_type;
-		__be16 vlan_id;
-		__be32 dst_ip[4];
-		__be32 src_ip[4];
-		u8 inner_mac[6];
-		__be16 tunnel_type;
-		__be32 tni_vni;
-		__be16 src_port;
-		__be16 dst_port;
-		__be16 flex_bytes;
-		__be16 bkt_hash;
-	} formatted;
-	__be32 dword_stream[14];
-};
-
-/* Flow Director compressed ATR hash input struct */
-union ixgbe_atr_hash_dword {
-	struct {
-		u8 vm_pool;
-		u8 flow_type;
-		__be16 vlan_id;
-	} formatted;
-	__be32 ip;
-	struct {
-		__be16 src;
-		__be16 dst;
-	} port;
-	__be16 flex_bytes;
-	__be32 dword;
-};
-
-
-/*
- * Unavailable: The FCoE Boot Option ROM is not present in the flash.
- * Disabled: Present; boot order is not set for any targets on the port.
- * Enabled: Present; boot order is set for at least one target on the port.
- */
-enum ixgbe_fcoe_boot_status {
-	ixgbe_fcoe_bootstatus_disabled = 0,
-	ixgbe_fcoe_bootstatus_enabled = 1,
-	ixgbe_fcoe_bootstatus_unavailable = 0xFFFF
-};
-
-enum ixgbe_eeprom_type {
-	ixgbe_eeprom_uninitialized = 0,
-	ixgbe_eeprom_spi,
-	ixgbe_flash,
-	ixgbe_eeprom_none /* No NVM support */
-};
-
-enum ixgbe_mac_type {
-	ixgbe_mac_unknown = 0,
-	ixgbe_mac_82598EB,
-	ixgbe_mac_82599EB,
-	ixgbe_mac_82599_vf,
-	ixgbe_mac_X540,
-	ixgbe_mac_X540_vf,
-	/*
-	 * X550EM MAC type decoder:
-	 * ixgbe_mac_X550EM_x: "x" = Xeon
-	 * ixgbe_mac_X550EM_a: "a" = Atom
-	 */
-	ixgbe_mac_X550,
-	ixgbe_mac_X550EM_x,
-	ixgbe_mac_X550_vf,
-	ixgbe_mac_X550EM_x_vf,
-	ixgbe_num_macs
-};
-
-enum ixgbe_phy_type {
-	ixgbe_phy_unknown = 0,
-	ixgbe_phy_none,
-	ixgbe_phy_tn,
-	ixgbe_phy_aq,
-	ixgbe_phy_x550em_kr,
-	ixgbe_phy_x550em_kx4,
-	ixgbe_phy_x550em_ext_t,
-	ixgbe_phy_cu_unknown,
-	ixgbe_phy_qt,
-	ixgbe_phy_xaui,
-	ixgbe_phy_nl,
-	ixgbe_phy_sfp_passive_tyco,
-	ixgbe_phy_sfp_passive_unknown,
-	ixgbe_phy_sfp_active_unknown,
-	ixgbe_phy_sfp_avago,
-	ixgbe_phy_sfp_ftl,
-	ixgbe_phy_sfp_ftl_active,
-	ixgbe_phy_sfp_unknown,
-	ixgbe_phy_sfp_intel,
-	ixgbe_phy_qsfp_passive_unknown,
-	ixgbe_phy_qsfp_active_unknown,
-	ixgbe_phy_qsfp_intel,
-	ixgbe_phy_qsfp_unknown,
-	ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/
-	ixgbe_phy_generic
-};
-
-/*
- * SFP+ module type IDs:
- *
- * ID	Module Type
- * =============
- * 0	SFP_DA_CU
- * 1	SFP_SR
- * 2	SFP_LR
- * 3	SFP_DA_CU_CORE0 - 82599-specific
- * 4	SFP_DA_CU_CORE1 - 82599-specific
- * 5	SFP_SR/LR_CORE0 - 82599-specific
- * 6	SFP_SR/LR_CORE1 - 82599-specific
- */
-enum ixgbe_sfp_type {
-	ixgbe_sfp_type_da_cu = 0,
-	ixgbe_sfp_type_sr = 1,
-	ixgbe_sfp_type_lr = 2,
-	ixgbe_sfp_type_da_cu_core0 = 3,
-	ixgbe_sfp_type_da_cu_core1 = 4,
-	ixgbe_sfp_type_srlr_core0 = 5,
-	ixgbe_sfp_type_srlr_core1 = 6,
-	ixgbe_sfp_type_da_act_lmt_core0 = 7,
-	ixgbe_sfp_type_da_act_lmt_core1 = 8,
-	ixgbe_sfp_type_1g_cu_core0 = 9,
-	ixgbe_sfp_type_1g_cu_core1 = 10,
-	ixgbe_sfp_type_1g_sx_core0 = 11,
-	ixgbe_sfp_type_1g_sx_core1 = 12,
-	ixgbe_sfp_type_1g_lx_core0 = 13,
-	ixgbe_sfp_type_1g_lx_core1 = 14,
-	ixgbe_sfp_type_not_present = 0xFFFE,
-	ixgbe_sfp_type_unknown = 0xFFFF
-};
-
-enum ixgbe_media_type {
-	ixgbe_media_type_unknown = 0,
-	ixgbe_media_type_fiber,
-	ixgbe_media_type_fiber_qsfp,
-	ixgbe_media_type_fiber_lco,
-	ixgbe_media_type_copper,
-	ixgbe_media_type_backplane,
-	ixgbe_media_type_cx4,
-	ixgbe_media_type_virtual
-};
-
-/* Flow Control Settings */
-enum ixgbe_fc_mode {
-	ixgbe_fc_none = 0,
-	ixgbe_fc_rx_pause,
-	ixgbe_fc_tx_pause,
-	ixgbe_fc_full,
-	ixgbe_fc_default
-};
-
-/* Smart Speed Settings */
-#define IXGBE_SMARTSPEED_MAX_RETRIES	3
-enum ixgbe_smart_speed {
-	ixgbe_smart_speed_auto = 0,
-	ixgbe_smart_speed_on,
-	ixgbe_smart_speed_off
-};
-
-/* PCI bus types */
-enum ixgbe_bus_type {
-	ixgbe_bus_type_unknown = 0,
-	ixgbe_bus_type_pci,
-	ixgbe_bus_type_pcix,
-	ixgbe_bus_type_pci_express,
-	ixgbe_bus_type_internal,
-	ixgbe_bus_type_reserved
-};
-
-/* PCI bus speeds */
-enum ixgbe_bus_speed {
-	ixgbe_bus_speed_unknown	= 0,
-	ixgbe_bus_speed_33	= 33,
-	ixgbe_bus_speed_66	= 66,
-	ixgbe_bus_speed_100	= 100,
-	ixgbe_bus_speed_120	= 120,
-	ixgbe_bus_speed_133	= 133,
-	ixgbe_bus_speed_2500	= 2500,
-	ixgbe_bus_speed_5000	= 5000,
-	ixgbe_bus_speed_8000	= 8000,
-	ixgbe_bus_speed_reserved
-};
-
-/* PCI bus widths */
-enum ixgbe_bus_width {
-	ixgbe_bus_width_unknown	= 0,
-	ixgbe_bus_width_pcie_x1	= 1,
-	ixgbe_bus_width_pcie_x2	= 2,
-	ixgbe_bus_width_pcie_x4	= 4,
-	ixgbe_bus_width_pcie_x8	= 8,
-	ixgbe_bus_width_32	= 32,
-	ixgbe_bus_width_64	= 64,
-	ixgbe_bus_width_reserved
-};
-
-struct ixgbe_addr_filter_info {
-	u32 num_mc_addrs;
-	u32 rar_used_count;
-	u32 mta_in_use;
-	u32 overflow_promisc;
-	bool user_set_promisc;
-};
-
-/* Bus parameters */
-struct ixgbe_bus_info {
-	enum ixgbe_bus_speed speed;
-	enum ixgbe_bus_width width;
-	enum ixgbe_bus_type type;
-
-	u16 func;
-	u16 lan_id;
-};
-
-/* Flow control parameters */
-struct ixgbe_fc_info {
-	u32 high_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl High-water */
-	u32 low_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl Low-water */
-	u16 pause_time; /* Flow Control Pause timer */
-	bool send_xon; /* Flow control send XON */
-	bool strict_ieee; /* Strict IEEE mode */
-	bool disable_fc_autoneg; /* Do not autonegotiate FC */
-	bool fc_was_autonegged; /* Is current_mode the result of autonegging? */
-	enum ixgbe_fc_mode current_mode; /* FC mode in effect */
-	enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */
-};
-
-/* Statistics counters collected by the MAC */
-struct ixgbe_hw_stats {
-	u64 crcerrs;
-	u64 illerrc;
-	u64 errbc;
-	u64 mspdc;
-	u64 mpctotal;
-	u64 mpc[8];
-	u64 mlfc;
-	u64 mrfc;
-	u64 rlec;
-	u64 lxontxc;
-	u64 lxonrxc;
-	u64 lxofftxc;
-	u64 lxoffrxc;
-	u64 pxontxc[8];
-	u64 pxonrxc[8];
-	u64 pxofftxc[8];
-	u64 pxoffrxc[8];
-	u64 prc64;
-	u64 prc127;
-	u64 prc255;
-	u64 prc511;
-	u64 prc1023;
-	u64 prc1522;
-	u64 gprc;
-	u64 bprc;
-	u64 mprc;
-	u64 gptc;
-	u64 gorc;
-	u64 gotc;
-	u64 rnbc[8];
-	u64 ruc;
-	u64 rfc;
-	u64 roc;
-	u64 rjc;
-	u64 mngprc;
-	u64 mngpdc;
-	u64 mngptc;
-	u64 tor;
-	u64 tpr;
-	u64 tpt;
-	u64 ptc64;
-	u64 ptc127;
-	u64 ptc255;
-	u64 ptc511;
-	u64 ptc1023;
-	u64 ptc1522;
-	u64 mptc;
-	u64 bptc;
-	u64 xec;
-	u64 qprc[16];
-	u64 qptc[16];
-	u64 qbrc[16];
-	u64 qbtc[16];
-	u64 qprdc[16];
-	u64 pxon2offc[8];
-	u64 fdirustat_add;
-	u64 fdirustat_remove;
-	u64 fdirfstat_fadd;
-	u64 fdirfstat_fremove;
-	u64 fdirmatch;
-	u64 fdirmiss;
-	u64 fccrc;
-	u64 fclast;
-	u64 fcoerpdc;
-	u64 fcoeprc;
-	u64 fcoeptc;
-	u64 fcoedwrc;
-	u64 fcoedwtc;
-	u64 fcoe_noddp;
-	u64 fcoe_noddp_ext_buff;
-	u64 ldpcec;
-	u64 pcrc8ec;
-	u64 b2ospc;
-	u64 b2ogprc;
-	u64 o2bgptc;
-	u64 o2bspc;
-};
-
-/* forward declaration */
-struct ixgbe_hw;
-
-/* iterator type for walking multicast address lists */
-typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
-				  u32 *vmdq);
-
-/* Function pointer table */
-struct ixgbe_eeprom_operations {
-	s32 (*init_params)(struct ixgbe_hw *);
-	s32 (*read)(struct ixgbe_hw *, u16, u16 *);
-	s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
-	s32 (*write)(struct ixgbe_hw *, u16, u16);
-	s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
-	s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
-	s32 (*update_checksum)(struct ixgbe_hw *);
-	s32 (*calc_checksum)(struct ixgbe_hw *);
-};
-
-struct ixgbe_mac_operations {
-	s32 (*init_hw)(struct ixgbe_hw *);
-	s32 (*reset_hw)(struct ixgbe_hw *);
-	s32 (*start_hw)(struct ixgbe_hw *);
-	s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
-	void (*enable_relaxed_ordering)(struct ixgbe_hw *);
-	enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
-	u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
-	s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
-	s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
-	s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *);
-	s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
-	s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
-	s32 (*get_fcoe_boot_status)(struct ixgbe_hw *, u16 *);
-	s32 (*stop_adapter)(struct ixgbe_hw *);
-	s32 (*get_bus_info)(struct ixgbe_hw *);
-	void (*set_lan_id)(struct ixgbe_hw *);
-	s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
-	s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
-	s32 (*setup_sfp)(struct ixgbe_hw *);
-	s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
-	s32 (*disable_sec_rx_path)(struct ixgbe_hw *);
-	s32 (*enable_sec_rx_path)(struct ixgbe_hw *);
-	s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
-	void (*release_swfw_sync)(struct ixgbe_hw *, u32);
-	s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
-	s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
-
-	/* Link */
-	void (*disable_tx_laser)(struct ixgbe_hw *);
-	void (*enable_tx_laser)(struct ixgbe_hw *);
-	void (*flap_tx_laser)(struct ixgbe_hw *);
-	s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
-	s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
-	s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
-				     bool *);
-
-	/* Packet Buffer manipulation */
-	void (*setup_rxpba)(struct ixgbe_hw *, int, u32, int);
-
-	/* LED */
-	s32 (*led_on)(struct ixgbe_hw *, u32);
-	s32 (*led_off)(struct ixgbe_hw *, u32);
-	s32 (*blink_led_start)(struct ixgbe_hw *, u32);
-	s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
-
-	/* RAR, Multicast, VLAN */
-	s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
-	s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
-	s32 (*clear_rar)(struct ixgbe_hw *, u32);
-	s32 (*insert_mac_addr)(struct ixgbe_hw *, u8 *, u32);
-	s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
-	s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
-	s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
-	s32 (*init_rx_addrs)(struct ixgbe_hw *);
-	s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32,
-				   ixgbe_mc_addr_itr);
-	s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
-				   ixgbe_mc_addr_itr, bool clear);
-	s32 (*enable_mc)(struct ixgbe_hw *);
-	s32 (*disable_mc)(struct ixgbe_hw *);
-	s32 (*clear_vfta)(struct ixgbe_hw *);
-	s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
-	s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, bool *);
-	s32 (*init_uta_tables)(struct ixgbe_hw *);
-	void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
-	void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
-
-	/* Flow Control */
-	s32 (*fc_enable)(struct ixgbe_hw *);
-
-	/* Manageability interface */
-	s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
-	s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
-	s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
-	s32 (*dmac_config)(struct ixgbe_hw *hw);
-	s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
-	s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
-	void (*get_rtrup2tc)(struct ixgbe_hw *hw, u8 *map);
-	s32 (*setup_eee)(struct ixgbe_hw *hw, bool enable_eee);
-	void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int);
-	void (*set_source_address_pruning)(struct ixgbe_hw *, bool,
-					   unsigned int);
-	void (*disable_rx)(struct ixgbe_hw *hw);
-	void (*enable_rx)(struct ixgbe_hw *hw);
-	s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
-	s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
-	void (*disable_mdd)(struct ixgbe_hw *hw);
-	void (*enable_mdd)(struct ixgbe_hw *hw);
-	void (*mdd_event)(struct ixgbe_hw *hw, u32 *vf_bitmap);
-	void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf);
-};
-
-struct ixgbe_phy_operations {
-	s32 (*identify)(struct ixgbe_hw *);
-	s32 (*identify_sfp)(struct ixgbe_hw *);
-	s32 (*init)(struct ixgbe_hw *);
-	s32 (*reset)(struct ixgbe_hw *);
-	s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
-	s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
-	s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
-	s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
-	s32 (*setup_link)(struct ixgbe_hw *);
-	s32 (*setup_internal_link)(struct ixgbe_hw *);
-	s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
-	s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
-	s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
-	s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
-	s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
-	s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
-	s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
-	s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
-	void (*i2c_bus_clear)(struct ixgbe_hw *);
-	s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
-	s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
-	s32 (*check_overtemp)(struct ixgbe_hw *);
-	s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
-};
-
-struct ixgbe_eeprom_info {
-	struct ixgbe_eeprom_operations ops;
-	enum ixgbe_eeprom_type type;
-	u32 semaphore_delay;
-	u16 word_size;
-	u16 address_bits;
-	u16 word_page_size;
-};
-
-#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED	0x01
-struct ixgbe_mac_info {
-	struct ixgbe_mac_operations ops;
-	enum ixgbe_mac_type type;
-	u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
-	u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
-	u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
-	/* prefix for World Wide Node Name (WWNN) */
-	u16 wwnn_prefix;
-	/* prefix for World Wide Port Name (WWPN) */
-	u16 wwpn_prefix;
-#define IXGBE_MAX_MTA			128
-	u32 mta_shadow[IXGBE_MAX_MTA];
-	s32 mc_filter_type;
-	u32 mcft_size;
-	u32 vft_size;
-	u32 num_rar_entries;
-	u32 rar_highwater;
-	u32 rx_pb_size;
-	u32 max_tx_queues;
-	u32 max_rx_queues;
-	u32 orig_autoc;
-	u8  san_mac_rar_index;
-	bool get_link_status;
-	u32 orig_autoc2;
-	u16 max_msix_vectors;
-	bool arc_subsystem_valid;
-	bool orig_link_settings_stored;
-	bool autotry_restart;
-	u8 flags;
-	struct ixgbe_thermal_sensor_data  thermal_sensor_data;
-	bool thermal_sensor_enabled;
-	struct ixgbe_dmac_config dmac_config;
-	bool set_lben;
-};
-
-struct ixgbe_phy_info {
-	struct ixgbe_phy_operations ops;
-	enum ixgbe_phy_type type;
-	u32 addr;
-	u32 id;
-	enum ixgbe_sfp_type sfp_type;
-	bool sfp_setup_needed;
-	u32 revision;
-	enum ixgbe_media_type media_type;
-	u32 phy_semaphore_mask;
-	bool reset_disable;
-	ixgbe_autoneg_advertised autoneg_advertised;
-	enum ixgbe_smart_speed smart_speed;
-	bool smart_speed_active;
-	bool multispeed_fiber;
-	bool reset_if_overtemp;
-	bool qsfp_shared_i2c_bus;
-};
-
-#include "ixgbe_mbx.h"
-
-struct ixgbe_mbx_operations {
-	void (*init_params)(struct ixgbe_hw *hw);
-	s32  (*read)(struct ixgbe_hw *, u32 *, u16,  u16);
-	s32  (*write)(struct ixgbe_hw *, u32 *, u16, u16);
-	s32  (*read_posted)(struct ixgbe_hw *, u32 *, u16,  u16);
-	s32  (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
-	s32  (*check_for_msg)(struct ixgbe_hw *, u16);
-	s32  (*check_for_ack)(struct ixgbe_hw *, u16);
-	s32  (*check_for_rst)(struct ixgbe_hw *, u16);
-};
-
-struct ixgbe_mbx_stats {
-	u32 msgs_tx;
-	u32 msgs_rx;
-
-	u32 acks;
-	u32 reqs;
-	u32 rsts;
-};
-
-struct ixgbe_mbx_info {
-	struct ixgbe_mbx_operations ops;
-	struct ixgbe_mbx_stats stats;
-	u32 timeout;
-	u32 usec_delay;
-	u32 v2p_mailbox;
-	u16 size;
-};
-
-struct ixgbe_hw {
-	u8 IOMEM *hw_addr;
-	void *back;
-	struct ixgbe_mac_info mac;
-	struct ixgbe_addr_filter_info addr_ctrl;
-	struct ixgbe_fc_info fc;
-	struct ixgbe_phy_info phy;
-	struct ixgbe_eeprom_info eeprom;
-	struct ixgbe_bus_info bus;
-	struct ixgbe_mbx_info mbx;
-	u16 device_id;
-	u16 vendor_id;
-	u16 subsystem_device_id;
-	u16 subsystem_vendor_id;
-	u8 revision_id;
-	bool adapter_stopped;
-	int api_version;
-	bool force_full_reset;
-	bool allow_unsupported_sfp;
-	bool wol_enabled;
-};
-
-#define ixgbe_call_func(hw, func, params, error) \
-		(func != NULL) ? func params : error
-
-
-/* Error Codes */
-#define IXGBE_SUCCESS				0
-#define IXGBE_ERR_EEPROM			-1
-#define IXGBE_ERR_EEPROM_CHECKSUM		-2
-#define IXGBE_ERR_PHY				-3
-#define IXGBE_ERR_CONFIG			-4
-#define IXGBE_ERR_PARAM				-5
-#define IXGBE_ERR_MAC_TYPE			-6
-#define IXGBE_ERR_UNKNOWN_PHY			-7
-#define IXGBE_ERR_LINK_SETUP			-8
-#define IXGBE_ERR_ADAPTER_STOPPED		-9
-#define IXGBE_ERR_INVALID_MAC_ADDR		-10
-#define IXGBE_ERR_DEVICE_NOT_SUPPORTED		-11
-#define IXGBE_ERR_MASTER_REQUESTS_PENDING	-12
-#define IXGBE_ERR_INVALID_LINK_SETTINGS		-13
-#define IXGBE_ERR_AUTONEG_NOT_COMPLETE		-14
-#define IXGBE_ERR_RESET_FAILED			-15
-#define IXGBE_ERR_SWFW_SYNC			-16
-#define IXGBE_ERR_PHY_ADDR_INVALID		-17
-#define IXGBE_ERR_I2C				-18
-#define IXGBE_ERR_SFP_NOT_SUPPORTED		-19
-#define IXGBE_ERR_SFP_NOT_PRESENT		-20
-#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT	-21
-#define IXGBE_ERR_NO_SAN_ADDR_PTR		-22
-#define IXGBE_ERR_FDIR_REINIT_FAILED		-23
-#define IXGBE_ERR_EEPROM_VERSION		-24
-#define IXGBE_ERR_NO_SPACE			-25
-#define IXGBE_ERR_OVERTEMP			-26
-#define IXGBE_ERR_FC_NOT_NEGOTIATED		-27
-#define IXGBE_ERR_FC_NOT_SUPPORTED		-28
-#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE	-30
-#define IXGBE_ERR_PBA_SECTION			-31
-#define IXGBE_ERR_INVALID_ARGUMENT		-32
-#define IXGBE_ERR_HOST_INTERFACE_COMMAND	-33
-#define IXGBE_ERR_OUT_OF_MEM			-34
-#define IXGBE_ERR_FEATURE_NOT_SUPPORTED		-36
-#define IXGBE_ERR_EEPROM_PROTECTED_REGION	-37
-#define IXGBE_ERR_FDIR_CMD_INCOMPLETE		-38
-
-#define IXGBE_NOT_IMPLEMENTED			0x7FFFFFFF
-
-
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P)	((P == 0) ? (0x4010) : (0x8010))
-#define IXGBE_KRM_LINK_CTRL_1(P)	((P == 0) ? (0x420C) : (0x820C))
-#define IXGBE_KRM_DSP_TXFFE_STATE_4(P)	((P == 0) ? (0x4634) : (0x8634))
-#define IXGBE_KRM_DSP_TXFFE_STATE_5(P)	((P == 0) ? (0x4638) : (0x8638))
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P)	((P == 0) ? (0x4B00) : (0x8B00))
-#define IXGBE_KRM_PMD_DFX_BURNIN(P)	((P == 0) ? (0x4E00) : (0x8E00))
-#define IXGBE_KRM_TX_COEFF_CTRL_1(P)	((P == 0) ? (0x5520) : (0x9520))
-#define IXGBE_KRM_RX_ANA_CTL(P)		((P == 0) ? (0x5A00) : (0x9A00))
-
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B		(1 << 9)
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS		(1 << 11)
-
-#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK	(0x7 << 8)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G	(2 << 8)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G	(4 << 8)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ		(1 << 14)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC		(1 << 15)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX		(1 << 16)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR		(1 << 18)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX		(1 << 24)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR		(1 << 26)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE		(1 << 29)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART		(1 << 31)
-
-#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN			(1 << 6)
-#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN		(1 << 15)
-#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN		(1 << 16)
-
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL	(1 << 4)
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS	(1 << 2)
-
-#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK	(0x3 << 16)
-
-#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN	(1 << 1)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN	(1 << 2)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN		(1 << 3)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN		(1 << 31)
-
-#define IXGBE_KX4_LINK_CNTL_1				0x4C
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX		(1 << 16)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4		(1 << 17)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX		(1 << 24)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4		(1 << 25)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE		(1 << 29)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP	(1 << 30)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART		(1 << 31)
-
-#define IXGBE_SB_IOSF_INDIRECT_CTRL	0x00011144
-#define IXGBE_SB_IOSF_INDIRECT_DATA	0x00011148
-
-#define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT		0
-#define IXGBE_SB_IOSF_CTRL_ADDR_MASK		0xFF
-#define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT	18
-#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK	\
-				(0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT)
-#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT	20
-#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK	\
-				(0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT)
-#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT	28
-#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK	0x7
-#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT		31
-#define IXGBE_SB_IOSF_CTRL_BUSY		(1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
-#define IXGBE_SB_IOSF_TARGET_KR_PHY	0
-#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY	1
-#define IXGBE_SB_IOSF_TARGET_KX4_PCS0	2
-#define IXGBE_SB_IOSF_TARGET_KX4_PCS1	3
-
-#endif /* _IXGBE_TYPE_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.c
deleted file mode 100644
index e99b17d..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.c
+++ /dev/null
@@ -1,724 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-
-#include "ixgbe_api.h"
-#include "ixgbe_type.h"
-#include "ixgbe_vf.h"
-
-#ifndef IXGBE_VFWRITE_REG
-#define IXGBE_VFWRITE_REG IXGBE_WRITE_REG
-#endif
-#ifndef IXGBE_VFREAD_REG
-#define IXGBE_VFREAD_REG IXGBE_READ_REG
-#endif
-
-/**
- *  ixgbe_init_ops_vf - Initialize the pointers for vf
- *  @hw: pointer to hardware structure
- *
- *  This will assign function pointers, adapter-specific functions can
- *  override the assignment of generic function pointers by assigning
- *  their own adapter-specific function pointers.
- *  Does not touch the hardware.
- **/
-s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw)
-{
-	/* MAC */
-	hw->mac.ops.init_hw = ixgbe_init_hw_vf;
-	hw->mac.ops.reset_hw = ixgbe_reset_hw_vf;
-	hw->mac.ops.start_hw = ixgbe_start_hw_vf;
-	/* Cannot clear stats on VF */
-	hw->mac.ops.clear_hw_cntrs = NULL;
-	hw->mac.ops.get_media_type = NULL;
-	hw->mac.ops.get_mac_addr = ixgbe_get_mac_addr_vf;
-	hw->mac.ops.stop_adapter = ixgbe_stop_adapter_vf;
-	hw->mac.ops.get_bus_info = NULL;
-
-	/* Link */
-	hw->mac.ops.setup_link = ixgbe_setup_mac_link_vf;
-	hw->mac.ops.check_link = ixgbe_check_mac_link_vf;
-	hw->mac.ops.get_link_capabilities = NULL;
-
-	/* RAR, Multicast, VLAN */
-	hw->mac.ops.set_rar = ixgbe_set_rar_vf;
-	hw->mac.ops.set_uc_addr = ixgbevf_set_uc_addr_vf;
-	hw->mac.ops.init_rx_addrs = NULL;
-	hw->mac.ops.update_mc_addr_list = ixgbe_update_mc_addr_list_vf;
-	hw->mac.ops.enable_mc = NULL;
-	hw->mac.ops.disable_mc = NULL;
-	hw->mac.ops.clear_vfta = NULL;
-	hw->mac.ops.set_vfta = ixgbe_set_vfta_vf;
-
-	hw->mac.max_tx_queues = 1;
-	hw->mac.max_rx_queues = 1;
-
-	hw->mbx.ops.init_params = ixgbe_init_mbx_params_vf;
-
-	return IXGBE_SUCCESS;
-}
-
-/* ixgbe_virt_clr_reg - Set register to default (power on) state.
- *  @hw: pointer to hardware structure
- */
-static void ixgbe_virt_clr_reg(struct ixgbe_hw *hw)
-{
-	int i;
-	u32 vfsrrctl;
-	u32 vfdca_rxctrl;
-	u32 vfdca_txctrl;
-
-	/* VRSRRCTL default values (BSIZEPACKET = 2048, BSIZEHEADER = 256) */
-	vfsrrctl = 0x100 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
-	vfsrrctl |= 0x800 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-
-	/* DCA_RXCTRL default value */
-	vfdca_rxctrl = IXGBE_DCA_RXCTRL_DESC_RRO_EN |
-		       IXGBE_DCA_RXCTRL_DATA_WRO_EN |
-		       IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
-
-	/* DCA_TXCTRL default value */
-	vfdca_txctrl = IXGBE_DCA_TXCTRL_DESC_RRO_EN |
-		       IXGBE_DCA_TXCTRL_DESC_WRO_EN |
-		       IXGBE_DCA_TXCTRL_DATA_RRO_EN;
-
-	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
-
-	for (i = 0; i < 7; i++) {
-		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), vfsrrctl);
-		IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(i), vfdca_rxctrl);
-		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), vfdca_txctrl);
-	}
-
-	IXGBE_WRITE_FLUSH(hw);
-}
-
-/**
- *  ixgbe_start_hw_vf - Prepare hardware for Tx/Rx
- *  @hw: pointer to hardware structure
- *
- *  Starts the hardware by filling the bus info structure and media type, clears
- *  all on chip counters, initializes receive address registers, multicast
- *  table, VLAN filter table, calls routine to set up link and flow control
- *  settings, and leaves transmit and receive units disabled and uninitialized
- **/
-s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw)
-{
-	/* Clear adapter stopped flag */
-	hw->adapter_stopped = false;
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_init_hw_vf - virtual function hardware initialization
- *  @hw: pointer to hardware structure
- *
- *  Initialize the hardware by resetting the hardware and then starting
- *  the hardware
- **/
-s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw)
-{
-	s32 status = hw->mac.ops.start_hw(hw);
-
-	hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
-
-	return status;
-}
-
-/**
- *  ixgbe_reset_hw_vf - Performs hardware reset
- *  @hw: pointer to hardware structure
- *
- *  Resets the hardware by reseting the transmit and receive units, masks and
- *  clears all interrupts.
- **/
-s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
-{
-	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	u32 timeout = IXGBE_VF_INIT_TIMEOUT;
-	s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
-	u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
-	u8 *addr = (u8 *)(&msgbuf[1]);
-
-	DEBUGFUNC("ixgbevf_reset_hw_vf");
-
-	/* Call adapter stop to disable tx/rx and clear interrupts */
-	hw->mac.ops.stop_adapter(hw);
-
-	/* reset the api version */
-	hw->api_version = ixgbe_mbox_api_10;
-
-	DEBUGOUT("Issuing a function level reset to MAC\n");
-
-	IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
-	IXGBE_WRITE_FLUSH(hw);
-
-	msec_delay(50);
-
-	/* we cannot reset while the RSTI / RSTD bits are asserted */
-	while (!mbx->ops.check_for_rst(hw, 0) && timeout) {
-		timeout--;
-		usec_delay(5);
-	}
-
-	if (!timeout)
-		return IXGBE_ERR_RESET_FAILED;
-
-	/* Reset VF registers to initial values */
-	ixgbe_virt_clr_reg(hw);
-
-	/* mailbox timeout can now become active */
-	mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
-
-	msgbuf[0] = IXGBE_VF_RESET;
-	mbx->ops.write_posted(hw, msgbuf, 1, 0);
-
-	msec_delay(10);
-
-	/*
-	 * set our "perm_addr" based on info provided by PF
-	 * also set up the mc_filter_type which is piggy backed
-	 * on the mac address in word 3
-	 */
-	ret_val = mbx->ops.read_posted(hw, msgbuf,
-			IXGBE_VF_PERMADDR_MSG_LEN, 0);
-	if (ret_val)
-		return ret_val;
-
-	if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
-	    msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
-		return IXGBE_ERR_INVALID_MAC_ADDR;
-
-	memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
-	hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_stop_adapter_vf - Generic stop Tx/Rx units
- *  @hw: pointer to hardware structure
- *
- *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
- *  disables transmit and receive units. The adapter_stopped flag is used by
- *  the shared code and drivers to determine if the adapter is in a stopped
- *  state and should not touch the hardware.
- **/
-s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw)
-{
-	u32 reg_val;
-	u16 i;
-
-	/*
-	 * Set the adapter_stopped flag so other driver functions stop touching
-	 * the hardware
-	 */
-	hw->adapter_stopped = true;
-
-	/* Clear interrupt mask to stop from interrupts being generated */
-	IXGBE_VFWRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
-
-	/* Clear any pending interrupts, flush previous writes */
-	IXGBE_VFREAD_REG(hw, IXGBE_VTEICR);
-
-	/* Disable the transmit unit.  Each queue must be disabled. */
-	for (i = 0; i < hw->mac.max_tx_queues; i++)
-		IXGBE_VFWRITE_REG(hw, IXGBE_VFTXDCTL(i), IXGBE_TXDCTL_SWFLSH);
-
-	/* Disable the receive unit by stopping each queue */
-	for (i = 0; i < hw->mac.max_rx_queues; i++) {
-		reg_val = IXGBE_VFREAD_REG(hw, IXGBE_VFRXDCTL(i));
-		reg_val &= ~IXGBE_RXDCTL_ENABLE;
-		IXGBE_VFWRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
-	}
-	/* Clear packet split and pool config */
-	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
-
-	/* flush all queues disables */
-	IXGBE_WRITE_FLUSH(hw);
-	msec_delay(2);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
- *  @hw: pointer to hardware structure
- *  @mc_addr: the multicast address
- *
- *  Extracts the 12 bits, from a multicast address, to determine which
- *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
- *  incoming rx multicast addresses, to determine the bit-vector to check in
- *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
- *  by the MO field of the MCSTCTRL. The MO field is set during initialization
- *  to mc_filter_type.
- **/
-STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
-{
-	u32 vector = 0;
-
-	switch (hw->mac.mc_filter_type) {
-	case 0:   /* use bits [47:36] of the address */
-		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
-		break;
-	case 1:   /* use bits [46:35] of the address */
-		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
-		break;
-	case 2:   /* use bits [45:34] of the address */
-		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
-		break;
-	case 3:   /* use bits [43:32] of the address */
-		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
-		break;
-	default:  /* Invalid mc_filter_type */
-		DEBUGOUT("MC filter type param set incorrectly\n");
-		ASSERT(0);
-		break;
-	}
-
-	/* vector can only be 12-bits or boundary will be exceeded */
-	vector &= 0xFFF;
-	return vector;
-}
-
-STATIC void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
-					u32 *msg, u16 size)
-{
-	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	u32 retmsg[IXGBE_VFMAILBOX_SIZE];
-	s32 retval = mbx->ops.write_posted(hw, msg, size, 0);
-
-	if (!retval)
-		mbx->ops.read_posted(hw, retmsg, size, 0);
-}
-
-/**
- *  ixgbe_set_rar_vf - set device MAC address
- *  @hw: pointer to hardware structure
- *  @index: Receive address register to write
- *  @addr: Address to put into receive address register
- *  @vmdq: VMDq "set" or "pool" index
- *  @enable_addr: set flag that address is active
- **/
-s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
-		     u32 enable_addr)
-{
-	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	u32 msgbuf[3];
-	u8 *msg_addr = (u8 *)(&msgbuf[1]);
-	s32 ret_val;
-	UNREFERENCED_3PARAMETER(vmdq, enable_addr, index);
-
-	memset(msgbuf, 0, 12);
-	msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
-	memcpy(msg_addr, addr, 6);
-	ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0);
-
-	if (!ret_val)
-		ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
-
-	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
-
-	/* if nacked the address was rejected, use "perm_addr" */
-	if (!ret_val &&
-	    (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
-		ixgbe_get_mac_addr_vf(hw, hw->mac.addr);
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_update_mc_addr_list_vf - Update Multicast addresses
- *  @hw: pointer to the HW structure
- *  @mc_addr_list: array of multicast addresses to program
- *  @mc_addr_count: number of multicast addresses to program
- *  @next: caller supplied function to return next address in list
- *
- *  Updates the Multicast Table Array.
- **/
-s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
-				 u32 mc_addr_count, ixgbe_mc_addr_itr next,
-				 bool clear)
-{
-	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
-	u16 *vector_list = (u16 *)&msgbuf[1];
-	u32 vector;
-	u32 cnt, i;
-	u32 vmdq;
-
-	UNREFERENCED_1PARAMETER(clear);
-
-	DEBUGFUNC("ixgbe_update_mc_addr_list_vf");
-
-	/* Each entry in the list uses 1 16 bit word.  We have 30
-	 * 16 bit words available in our HW msg buffer (minus 1 for the
-	 * msg type).  That's 30 hash values if we pack 'em right.  If
-	 * there are more than 30 MC addresses to add then punt the
-	 * extras for now and then add code to handle more than 30 later.
-	 * It would be unusual for a server to request that many multi-cast
-	 * addresses except for in large enterprise network environments.
-	 */
-
-	DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count);
-
-	cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
-	msgbuf[0] = IXGBE_VF_SET_MULTICAST;
-	msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
-
-	for (i = 0; i < cnt; i++) {
-		vector = ixgbe_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
-		DEBUGOUT1("Hash value = 0x%03X\n", vector);
-		vector_list[i] = (u16)vector;
-	}
-
-	return mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE, 0);
-}
-
-/**
- *  ixgbe_set_vfta_vf - Set/Unset vlan filter table address
- *  @hw: pointer to the HW structure
- *  @vlan: 12 bit VLAN ID
- *  @vind: unused by VF drivers
- *  @vlan_on: if true then set bit, else clear bit
- **/
-s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
-{
-	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	u32 msgbuf[2];
-	s32 ret_val;
-	UNREFERENCED_1PARAMETER(vind);
-
-	msgbuf[0] = IXGBE_VF_SET_VLAN;
-	msgbuf[1] = vlan;
-	/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
-	msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
-
-	ret_val = mbx->ops.write_posted(hw, msgbuf, 2, 0);
-	if (!ret_val)
-		ret_val = mbx->ops.read_posted(hw, msgbuf, 1, 0);
-
-	if (!ret_val && (msgbuf[0] & IXGBE_VT_MSGTYPE_ACK))
-		return IXGBE_SUCCESS;
-
-	return ret_val | (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK);
-}
-
-/**
- *  ixgbe_get_num_of_tx_queues_vf - Get number of TX queues
- *  @hw: pointer to hardware structure
- *
- *  Returns the number of transmit queues for the given adapter.
- **/
-u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw)
-{
-	UNREFERENCED_1PARAMETER(hw);
-	return IXGBE_VF_MAX_TX_QUEUES;
-}
-
-/**
- *  ixgbe_get_num_of_rx_queues_vf - Get number of RX queues
- *  @hw: pointer to hardware structure
- *
- *  Returns the number of receive queues for the given adapter.
- **/
-u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw)
-{
-	UNREFERENCED_1PARAMETER(hw);
-	return IXGBE_VF_MAX_RX_QUEUES;
-}
-
-/**
- *  ixgbe_get_mac_addr_vf - Read device MAC address
- *  @hw: pointer to the HW structure
- **/
-s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
-{
-	int i;
-
-	for (i = 0; i < IXGBE_ETH_LENGTH_OF_ADDRESS; i++)
-		mac_addr[i] = hw->mac.perm_addr[i];
-
-	return IXGBE_SUCCESS;
-}
-
-s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
-{
-	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	u32 msgbuf[3];
-	u8 *msg_addr = (u8 *)(&msgbuf[1]);
-	s32 ret_val;
-
-	memset(msgbuf, 0, sizeof(msgbuf));
-	/*
-	 * If index is one then this is the start of a new list and needs
-	 * indication to the PF so it can do it's own list management.
-	 * If it is zero then that tells the PF to just clear all of
-	 * this VF's macvlans and there is no new list.
-	 */
-	msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
-	msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
-	if (addr)
-		memcpy(msg_addr, addr, 6);
-	ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0);
-
-	if (!ret_val)
-		ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
-
-	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
-
-	if (!ret_val)
-		if (msgbuf[0] == (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK))
-			ret_val = IXGBE_ERR_OUT_OF_MEM;
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_setup_mac_link_vf - Setup MAC link settings
- *  @hw: pointer to hardware structure
- *  @speed: new link speed
- *  @autoneg: true if autonegotiation enabled
- *  @autoneg_wait_to_complete: true when waiting for completion is needed
- *
- *  Set the link speed in the AUTOC register and restarts link.
- **/
-s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed,
-			    bool autoneg_wait_to_complete)
-{
-	UNREFERENCED_3PARAMETER(hw, speed, autoneg_wait_to_complete);
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_check_mac_link_vf - Get link/speed status
- *  @hw: pointer to hardware structure
- *  @speed: pointer to link speed
- *  @link_up: true is link is up, false otherwise
- *  @autoneg_wait_to_complete: true when waiting for completion is needed
- *
- *  Reads the links register to determine if link is up and the current speed
- **/
-s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
-			    bool *link_up, bool autoneg_wait_to_complete)
-{
-	struct ixgbe_mbx_info *mbx = &hw->mbx;
-	struct ixgbe_mac_info *mac = &hw->mac;
-	s32 ret_val = IXGBE_SUCCESS;
-	u32 links_reg;
-	u32 in_msg = 0;
-	UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
-
-	/* If we were hit with a reset drop the link */
-	if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
-		mac->get_link_status = true;
-
-	if (!mac->get_link_status)
-		goto out;
-
-	/* if link status is down no point in checking to see if pf is up */
-	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
-	if (!(links_reg & IXGBE_LINKS_UP))
-		goto out;
-
-	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
-	 * before the link status is correct
-	 */
-	if (mac->type == ixgbe_mac_82599_vf) {
-		int i;
-
-		for (i = 0; i < 5; i++) {
-			usec_delay(100);
-			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
-
-			if (!(links_reg & IXGBE_LINKS_UP))
-				goto out;
-		}
-	}
-
-	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
-	case IXGBE_LINKS_SPEED_10G_82599:
-		*speed = IXGBE_LINK_SPEED_10GB_FULL;
-		break;
-	case IXGBE_LINKS_SPEED_1G_82599:
-		*speed = IXGBE_LINK_SPEED_1GB_FULL;
-		break;
-	case IXGBE_LINKS_SPEED_100_82599:
-		*speed = IXGBE_LINK_SPEED_100_FULL;
-		break;
-	}
-
-	/* if the read failed it could just be a mailbox collision, best wait
-	 * until we are called again and don't report an error
-	 */
-	if (mbx->ops.read(hw, &in_msg, 1, 0))
-		goto out;
-
-	if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
-		/* msg is not CTS and is NACK we must have lost CTS status */
-		if (in_msg & IXGBE_VT_MSGTYPE_NACK)
-			ret_val = -1;
-		goto out;
-	}
-
-	/* the pf is talking, if we timed out in the past we reinit */
-	if (!mbx->timeout) {
-		ret_val = -1;
-		goto out;
-	}
-
-	/* if we passed all the tests above then the link is up and we no
-	 * longer need to check for link
-	 */
-	mac->get_link_status = false;
-
-out:
-	*link_up = !mac->get_link_status;
-	return ret_val;
-}
-
-/**
- *  ixgbevf_rlpml_set_vf - Set the maximum receive packet length
- *  @hw: pointer to the HW structure
- *  @max_size: value to assign to max frame size
- **/
-void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
-{
-	u32 msgbuf[2];
-
-	msgbuf[0] = IXGBE_VF_SET_LPE;
-	msgbuf[1] = max_size;
-	ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
-}
-
-/**
- *  ixgbevf_negotiate_api_version - Negotiate supported API version
- *  @hw: pointer to the HW structure
- *  @api: integer containing requested API version
- **/
-int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
-{
-	int err;
-	u32 msg[3];
-
-	/* Negotiate the mailbox API version */
-	msg[0] = IXGBE_VF_API_NEGOTIATE;
-	msg[1] = api;
-	msg[2] = 0;
-	err = hw->mbx.ops.write_posted(hw, msg, 3, 0);
-
-	if (!err)
-		err = hw->mbx.ops.read_posted(hw, msg, 3, 0);
-
-	if (!err) {
-		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
-
-		/* Store value and return 0 on success */
-		if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
-			hw->api_version = api;
-			return 0;
-		}
-
-		err = IXGBE_ERR_INVALID_ARGUMENT;
-	}
-
-	return err;
-}
-
-int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
-		       unsigned int *default_tc)
-{
-	int err;
-	u32 msg[5];
-
-	/* do nothing if API doesn't support ixgbevf_get_queues */
-	switch (hw->api_version) {
-	case ixgbe_mbox_api_11:
-		break;
-	default:
-		return 0;
-	}
-
-	/* Fetch queue configuration from the PF */
-	msg[0] = IXGBE_VF_GET_QUEUES;
-	msg[1] = msg[2] = msg[3] = msg[4] = 0;
-	err = hw->mbx.ops.write_posted(hw, msg, 5, 0);
-
-	if (!err)
-		err = hw->mbx.ops.read_posted(hw, msg, 5, 0);
-
-	if (!err) {
-		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
-
-		/*
-		 * if we we didn't get an ACK there must have been
-		 * some sort of mailbox error so we should treat it
-		 * as such
-		 */
-		if (msg[0] != (IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK))
-			return IXGBE_ERR_MBX;
-
-		/* record and validate values from message */
-		hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
-		if (hw->mac.max_tx_queues == 0 ||
-		    hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
-			hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
-
-		hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
-		if (hw->mac.max_rx_queues == 0 ||
-		    hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
-			hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
-
-		*num_tcs = msg[IXGBE_VF_TRANS_VLAN];
-		/* in case of unknown state assume we cannot tag frames */
-		if (*num_tcs > hw->mac.max_rx_queues)
-			*num_tcs = 1;
-
-		*default_tc = msg[IXGBE_VF_DEF_QUEUE];
-		/* default to queue 0 on out-of-bounds queue number */
-		if (*default_tc >= hw->mac.max_tx_queues)
-			*default_tc = 0;
-	}
-
-	return err;
-}
-
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.h
deleted file mode 100644
index ae7d58f..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.h
+++ /dev/null
@@ -1,140 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#ifndef __IXGBE_VF_H__
-#define __IXGBE_VF_H__
-
-#define IXGBE_VF_IRQ_CLEAR_MASK	7
-#define IXGBE_VF_MAX_TX_QUEUES	8
-#define IXGBE_VF_MAX_RX_QUEUES	8
-
-/* DCB define */
-#define IXGBE_VF_MAX_TRAFFIC_CLASS	8
-
-#define IXGBE_VFCTRL		0x00000
-#define IXGBE_VFSTATUS		0x00008
-#define IXGBE_VFLINKS		0x00010
-#define IXGBE_VFFRTIMER		0x00048
-#define IXGBE_VFRXMEMWRAP	0x03190
-#define IXGBE_VTEICR		0x00100
-#define IXGBE_VTEICS		0x00104
-#define IXGBE_VTEIMS		0x00108
-#define IXGBE_VTEIMC		0x0010C
-#define IXGBE_VTEIAC		0x00110
-#define IXGBE_VTEIAM		0x00114
-#define IXGBE_VTEITR(x)		(0x00820 + (4 * (x)))
-#define IXGBE_VTIVAR(x)		(0x00120 + (4 * (x)))
-#define IXGBE_VTIVAR_MISC	0x00140
-#define IXGBE_VTRSCINT(x)	(0x00180 + (4 * (x)))
-/* define IXGBE_VFPBACL  still says TBD in EAS */
-#define IXGBE_VFRDBAL(x)	(0x01000 + (0x40 * (x)))
-#define IXGBE_VFRDBAH(x)	(0x01004 + (0x40 * (x)))
-#define IXGBE_VFRDLEN(x)	(0x01008 + (0x40 * (x)))
-#define IXGBE_VFRDH(x)		(0x01010 + (0x40 * (x)))
-#define IXGBE_VFRDT(x)		(0x01018 + (0x40 * (x)))
-#define IXGBE_VFRXDCTL(x)	(0x01028 + (0x40 * (x)))
-#define IXGBE_VFSRRCTL(x)	(0x01014 + (0x40 * (x)))
-#define IXGBE_VFRSCCTL(x)	(0x0102C + (0x40 * (x)))
-#define IXGBE_VFPSRTYPE		0x00300
-#define IXGBE_VFTDBAL(x)	(0x02000 + (0x40 * (x)))
-#define IXGBE_VFTDBAH(x)	(0x02004 + (0x40 * (x)))
-#define IXGBE_VFTDLEN(x)	(0x02008 + (0x40 * (x)))
-#define IXGBE_VFTDH(x)		(0x02010 + (0x40 * (x)))
-#define IXGBE_VFTDT(x)		(0x02018 + (0x40 * (x)))
-#define IXGBE_VFTXDCTL(x)	(0x02028 + (0x40 * (x)))
-#define IXGBE_VFTDWBAL(x)	(0x02038 + (0x40 * (x)))
-#define IXGBE_VFTDWBAH(x)	(0x0203C + (0x40 * (x)))
-#define IXGBE_VFDCA_RXCTRL(x)	(0x0100C + (0x40 * (x)))
-#define IXGBE_VFDCA_TXCTRL(x)	(0x0200c + (0x40 * (x)))
-#define IXGBE_VFGPRC		0x0101C
-#define IXGBE_VFGPTC		0x0201C
-#define IXGBE_VFGORC_LSB	0x01020
-#define IXGBE_VFGORC_MSB	0x01024
-#define IXGBE_VFGOTC_LSB	0x02020
-#define IXGBE_VFGOTC_MSB	0x02024
-#define IXGBE_VFMPRC		0x01034
-#define IXGBE_VFMRQC		0x3000
-#define IXGBE_VFRSSRK(x)	(0x3100 + ((x) * 4))
-#define IXGBE_VFRETA(x)	(0x3200 + ((x) * 4))
-
-
-struct ixgbevf_hw_stats {
-	u64 base_vfgprc;
-	u64 base_vfgptc;
-	u64 base_vfgorc;
-	u64 base_vfgotc;
-	u64 base_vfmprc;
-
-	u64 last_vfgprc;
-	u64 last_vfgptc;
-	u64 last_vfgorc;
-	u64 last_vfgotc;
-	u64 last_vfmprc;
-
-	u64 vfgprc;
-	u64 vfgptc;
-	u64 vfgorc;
-	u64 vfgotc;
-	u64 vfmprc;
-
-	u64 saved_reset_vfgprc;
-	u64 saved_reset_vfgptc;
-	u64 saved_reset_vfgorc;
-	u64 saved_reset_vfgotc;
-	u64 saved_reset_vfmprc;
-};
-
-s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw);
-s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw);
-s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw);
-u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw);
-u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw);
-s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr);
-s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed,
-			    bool autoneg_wait_to_complete);
-s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
-			    bool *link_up, bool autoneg_wait_to_complete);
-s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
-		     u32 enable_addr);
-s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr);
-s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
-				 u32 mc_addr_count, ixgbe_mc_addr_itr,
-				 bool clear);
-s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
-void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
-int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
-int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
-		       unsigned int *default_tc);
-
-#endif /* __IXGBE_VF_H__ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.c
deleted file mode 100644
index 6e8835d..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.c
+++ /dev/null
@@ -1,1040 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#include "ixgbe_x540.h"
-#include "ixgbe_type.h"
-#include "ixgbe_api.h"
-#include "ixgbe_common.h"
-#include "ixgbe_phy.h"
-
-#define IXGBE_X540_MAX_TX_QUEUES	128
-#define IXGBE_X540_MAX_RX_QUEUES	128
-#define IXGBE_X540_RAR_ENTRIES		128
-#define IXGBE_X540_MC_TBL_SIZE		128
-#define IXGBE_X540_VFT_TBL_SIZE		128
-#define IXGBE_X540_RX_PB_SIZE		384
-
-STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
-STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
-STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
-
-/**
- *  ixgbe_init_ops_X540 - Inits func ptrs and MAC type
- *  @hw: pointer to hardware structure
- *
- *  Initialize the function pointers and assign the MAC type for X540.
- *  Does not touch the hardware.
- **/
-s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
-{
-	struct ixgbe_mac_info *mac = &hw->mac;
-	struct ixgbe_phy_info *phy = &hw->phy;
-	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
-	s32 ret_val;
-
-	DEBUGFUNC("ixgbe_init_ops_X540");
-
-	ret_val = ixgbe_init_phy_ops_generic(hw);
-	ret_val = ixgbe_init_ops_generic(hw);
-
-
-	/* EEPROM */
-	eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
-	eeprom->ops.read = ixgbe_read_eerd_X540;
-	eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_X540;
-	eeprom->ops.write = ixgbe_write_eewr_X540;
-	eeprom->ops.write_buffer = ixgbe_write_eewr_buffer_X540;
-	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X540;
-	eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X540;
-	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X540;
-
-	/* PHY */
-	phy->ops.init = ixgbe_init_phy_ops_generic;
-	phy->ops.reset = NULL;
-	if (!ixgbe_mng_present(hw))
-		phy->ops.set_phy_power = ixgbe_set_copper_phy_power;
-
-	/* MAC */
-	mac->ops.reset_hw = ixgbe_reset_hw_X540;
-	mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
-	mac->ops.get_media_type = ixgbe_get_media_type_X540;
-	mac->ops.get_supported_physical_layer =
-				    ixgbe_get_supported_physical_layer_X540;
-	mac->ops.read_analog_reg8 = NULL;
-	mac->ops.write_analog_reg8 = NULL;
-	mac->ops.start_hw = ixgbe_start_hw_X540;
-	mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
-	mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
-	mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
-	mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
-	mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
-	mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540;
-	mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X540;
-	mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
-	mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
-
-	/* RAR, Multicast, VLAN */
-	mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
-	mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
-	mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
-	mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
-	mac->rar_highwater = 1;
-	mac->ops.set_vfta = ixgbe_set_vfta_generic;
-	mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
-	mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
-	mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
-	mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
-	mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
-
-	/* Link */
-	mac->ops.get_link_capabilities =
-				ixgbe_get_copper_link_capabilities_generic;
-	mac->ops.setup_link = ixgbe_setup_mac_link_X540;
-	mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
-	mac->ops.check_link = ixgbe_check_mac_link_generic;
-
-
-	mac->mcft_size		= IXGBE_X540_MC_TBL_SIZE;
-	mac->vft_size		= IXGBE_X540_VFT_TBL_SIZE;
-	mac->num_rar_entries	= IXGBE_X540_RAR_ENTRIES;
-	mac->rx_pb_size		= IXGBE_X540_RX_PB_SIZE;
-	mac->max_rx_queues	= IXGBE_X540_MAX_RX_QUEUES;
-	mac->max_tx_queues	= IXGBE_X540_MAX_TX_QUEUES;
-	mac->max_msix_vectors	= ixgbe_get_pcie_msix_count_generic(hw);
-
-	/*
-	 * FWSM register
-	 * ARC supported; valid only if manageability features are
-	 * enabled.
-	 */
-	mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
-				   IXGBE_FWSM_MODE_MASK) ? true : false;
-
-	hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
-
-	/* LEDs */
-	mac->ops.blink_led_start = ixgbe_blink_led_start_X540;
-	mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540;
-
-	/* Manageability interface */
-	mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
-
-	mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_get_link_capabilities_X540 - Determines link capabilities
- *  @hw: pointer to hardware structure
- *  @speed: pointer to link speed
- *  @autoneg: true when autoneg or autotry is enabled
- *
- *  Determines the link capabilities by reading the AUTOC register.
- **/
-s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
-				     ixgbe_link_speed *speed,
-				     bool *autoneg)
-{
-	ixgbe_get_copper_link_capabilities_generic(hw, speed, autoneg);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_get_media_type_X540 - Get media type
- *  @hw: pointer to hardware structure
- *
- *  Returns the media type (fiber, copper, backplane)
- **/
-enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
-{
-	UNREFERENCED_1PARAMETER(hw);
-	return ixgbe_media_type_copper;
-}
-
-/**
- *  ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities
- *  @hw: pointer to hardware structure
- *  @speed: new link speed
- *  @autoneg_wait_to_complete: true when waiting for completion is needed
- **/
-s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
-			      ixgbe_link_speed speed,
-			      bool autoneg_wait_to_complete)
-{
-	DEBUGFUNC("ixgbe_setup_mac_link_X540");
-	return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
-}
-
-/**
- *  ixgbe_reset_hw_X540 - Perform hardware reset
- *  @hw: pointer to hardware structure
- *
- *  Resets the hardware by resetting the transmit and receive units, masks
- *  and clears all interrupts, and perform a reset.
- **/
-s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
-{
-	s32 status;
-	u32 ctrl, i;
-
-	DEBUGFUNC("ixgbe_reset_hw_X540");
-
-	/* Call adapter stop to disable tx/rx and clear interrupts */
-	status = hw->mac.ops.stop_adapter(hw);
-	if (status != IXGBE_SUCCESS)
-		goto reset_hw_out;
-
-	/* flush pending Tx transactions */
-	ixgbe_clear_tx_pending(hw);
-
-mac_reset_top:
-	ctrl = IXGBE_CTRL_RST;
-	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
-	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
-	IXGBE_WRITE_FLUSH(hw);
-
-	/* Poll for reset bit to self-clear indicating reset is complete */
-	for (i = 0; i < 10; i++) {
-		usec_delay(1);
-		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
-		if (!(ctrl & IXGBE_CTRL_RST_MASK))
-			break;
-	}
-
-	if (ctrl & IXGBE_CTRL_RST_MASK) {
-		status = IXGBE_ERR_RESET_FAILED;
-		ERROR_REPORT1(IXGBE_ERROR_POLLING,
-			     "Reset polling failed to complete.\n");
-	}
-	msec_delay(100);
-
-	/*
-	 * Double resets are required for recovery from certain error
-	 * conditions.  Between resets, it is necessary to stall to allow time
-	 * for any pending HW events to complete.
-	 */
-	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
-		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
-		goto mac_reset_top;
-	}
-
-	/* Set the Rx packet buffer size. */
-	IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
-
-	/* Store the permanent mac address */
-	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
-
-	/*
-	 * Store MAC address from RAR0, clear receive address registers, and
-	 * clear the multicast table.  Also reset num_rar_entries to 128,
-	 * since we modify this value when programming the SAN MAC address.
-	 */
-	hw->mac.num_rar_entries = 128;
-	hw->mac.ops.init_rx_addrs(hw);
-
-	/* Store the permanent SAN mac address */
-	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
-
-	/* Add the SAN MAC address to the RAR only if it's a valid address */
-	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
-		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
-				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
-
-		/* Save the SAN MAC RAR index */
-		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
-
-		/* Reserve the last RAR for the SAN MAC address */
-		hw->mac.num_rar_entries--;
-	}
-
-	/* Store the alternative WWNN/WWPN prefix */
-	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
-				   &hw->mac.wwpn_prefix);
-
-reset_hw_out:
-	return status;
-}
-
-/**
- *  ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx
- *  @hw: pointer to hardware structure
- *
- *  Starts the hardware using the generic start_hw function
- *  and the generation start_hw function.
- *  Then performs revision-specific operations, if any.
- **/
-s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
-{
-	s32 ret_val = IXGBE_SUCCESS;
-
-	DEBUGFUNC("ixgbe_start_hw_X540");
-
-	ret_val = ixgbe_start_hw_generic(hw);
-	if (ret_val != IXGBE_SUCCESS)
-		goto out;
-
-	ret_val = ixgbe_start_hw_gen2(hw);
-
-out:
-	return ret_val;
-}
-
-/**
- *  ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
- *  @hw: pointer to hardware structure
- *
- *  Determines physical layer capabilities of the current configuration.
- **/
-u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
-{
-	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-	u16 ext_ability = 0;
-
-	DEBUGFUNC("ixgbe_get_supported_physical_layer_X540");
-
-	hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
-	IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
-	if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
-		physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
-	if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
-		physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
-	if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
-		physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
-
-	return physical_layer;
-}
-
-/**
- *  ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
- *  @hw: pointer to hardware structure
- *
- *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
- *  ixgbe_hw struct in order to set up EEPROM access.
- **/
-s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
-{
-	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
-	u32 eec;
-	u16 eeprom_size;
-
-	DEBUGFUNC("ixgbe_init_eeprom_params_X540");
-
-	if (eeprom->type == ixgbe_eeprom_uninitialized) {
-		eeprom->semaphore_delay = 10;
-		eeprom->type = ixgbe_flash;
-
-		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-		eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
-				    IXGBE_EEC_SIZE_SHIFT);
-		eeprom->word_size = 1 << (eeprom_size +
-					  IXGBE_EEPROM_WORD_SIZE_SHIFT);
-
-		DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
-			  eeprom->type, eeprom->word_size);
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_read_eerd_X540- Read EEPROM word using EERD
- *  @hw: pointer to hardware structure
- *  @offset: offset of  word in the EEPROM to read
- *  @data: word read from the EEPROM
- *
- *  Reads a 16 bit word from the EEPROM using the EERD register.
- **/
-s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
-{
-	s32 status = IXGBE_SUCCESS;
-
-	DEBUGFUNC("ixgbe_read_eerd_X540");
-	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
-	    IXGBE_SUCCESS) {
-		status = ixgbe_read_eerd_generic(hw, offset, data);
-		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-	} else {
-		status = IXGBE_ERR_SWFW_SYNC;
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD
- *  @hw: pointer to hardware structure
- *  @offset: offset of  word in the EEPROM to read
- *  @words: number of words
- *  @data: word(s) read from the EEPROM
- *
- *  Reads a 16 bit word(s) from the EEPROM using the EERD register.
- **/
-s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
-				u16 offset, u16 words, u16 *data)
-{
-	s32 status = IXGBE_SUCCESS;
-
-	DEBUGFUNC("ixgbe_read_eerd_buffer_X540");
-	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
-	    IXGBE_SUCCESS) {
-		status = ixgbe_read_eerd_buffer_generic(hw, offset,
-							words, data);
-		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-	} else {
-		status = IXGBE_ERR_SWFW_SYNC;
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
- *  @hw: pointer to hardware structure
- *  @offset: offset of  word in the EEPROM to write
- *  @data: word write to the EEPROM
- *
- *  Write a 16 bit word to the EEPROM using the EEWR register.
- **/
-s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
-{
-	s32 status = IXGBE_SUCCESS;
-
-	DEBUGFUNC("ixgbe_write_eewr_X540");
-	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
-	    IXGBE_SUCCESS) {
-		status = ixgbe_write_eewr_generic(hw, offset, data);
-		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-	} else {
-		status = IXGBE_ERR_SWFW_SYNC;
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR
- *  @hw: pointer to hardware structure
- *  @offset: offset of  word in the EEPROM to write
- *  @words: number of words
- *  @data: word(s) write to the EEPROM
- *
- *  Write a 16 bit word(s) to the EEPROM using the EEWR register.
- **/
-s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
-				 u16 offset, u16 words, u16 *data)
-{
-	s32 status = IXGBE_SUCCESS;
-
-	DEBUGFUNC("ixgbe_write_eewr_buffer_X540");
-	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
-	    IXGBE_SUCCESS) {
-		status = ixgbe_write_eewr_buffer_generic(hw, offset,
-							 words, data);
-		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-	} else {
-		status = IXGBE_ERR_SWFW_SYNC;
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
- *
- *  This function does not use synchronization for EERD and EEWR. It can
- *  be used internally by function which utilize ixgbe_acquire_swfw_sync_X540.
- *
- *  @hw: pointer to hardware structure
- *
- *  Returns a negative error code on error, or the 16-bit checksum
- **/
-s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
-{
-	u16 i, j;
-	u16 checksum = 0;
-	u16 length = 0;
-	u16 pointer = 0;
-	u16 word = 0;
-	u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM;
-	u16 ptr_start = IXGBE_PCIE_ANALOG_PTR;
-
-	/* Do not use hw->eeprom.ops.read because we do not want to take
-	 * the synchronization semaphores here. Instead use
-	 * ixgbe_read_eerd_generic
-	 */
-
-	DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540");
-
-	/* Include 0x0-0x3F in the checksum */
-	for (i = 0; i <= checksum_last_word; i++) {
-		if (ixgbe_read_eerd_generic(hw, i, &word)) {
-			DEBUGOUT("EEPROM read failed\n");
-			return IXGBE_ERR_EEPROM;
-		}
-		if (i != IXGBE_EEPROM_CHECKSUM)
-			checksum += word;
-	}
-
-	/* Include all data from pointers 0x3, 0x6-0xE.  This excludes the
-	 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
-	 */
-	for (i = ptr_start; i < IXGBE_FW_PTR; i++) {
-		if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
-			continue;
-
-		if (ixgbe_read_eerd_generic(hw, i, &pointer)) {
-			DEBUGOUT("EEPROM read failed\n");
-			return IXGBE_ERR_EEPROM;
-		}
-
-		/* Skip pointer section if the pointer is invalid. */
-		if (pointer == 0xFFFF || pointer == 0 ||
-		    pointer >= hw->eeprom.word_size)
-			continue;
-
-		if (ixgbe_read_eerd_generic(hw, pointer, &length)) {
-			DEBUGOUT("EEPROM read failed\n");
-			return IXGBE_ERR_EEPROM;
-		}
-
-		/* Skip pointer section if length is invalid. */
-		if (length == 0xFFFF || length == 0 ||
-		    (pointer + length) >= hw->eeprom.word_size)
-			continue;
-
-		for (j = pointer + 1; j <= pointer + length; j++) {
-			if (ixgbe_read_eerd_generic(hw, j, &word)) {
-				DEBUGOUT("EEPROM read failed\n");
-				return IXGBE_ERR_EEPROM;
-			}
-			checksum += word;
-		}
-	}
-
-	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
-
-	return (s32)checksum;
-}
-
-/**
- *  ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum
- *  @hw: pointer to hardware structure
- *  @checksum_val: calculated checksum
- *
- *  Performs checksum calculation and validates the EEPROM checksum.  If the
- *  caller does not need checksum_val, the value can be NULL.
- **/
-s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
-					u16 *checksum_val)
-{
-	s32 status;
-	u16 checksum;
-	u16 read_checksum = 0;
-
-	DEBUGFUNC("ixgbe_validate_eeprom_checksum_X540");
-
-	/* Read the first word from the EEPROM. If this times out or fails, do
-	 * not continue or we could be in for a very long wait while every
-	 * EEPROM read fails
-	 */
-	status = hw->eeprom.ops.read(hw, 0, &checksum);
-	if (status) {
-		DEBUGOUT("EEPROM read failed\n");
-		return status;
-	}
-
-	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
-		return IXGBE_ERR_SWFW_SYNC;
-
-	status = hw->eeprom.ops.calc_checksum(hw);
-	if (status < 0)
-		goto out;
-
-	checksum = (u16)(status & 0xffff);
-
-	/* Do not use hw->eeprom.ops.read because we do not want to take
-	 * the synchronization semaphores twice here.
-	 */
-	status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
-					 &read_checksum);
-	if (status)
-		goto out;
-
-	/* Verify read checksum from EEPROM is the same as
-	 * calculated checksum
-	 */
-	if (read_checksum != checksum) {
-		ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
-			     "Invalid EEPROM checksum");
-		status = IXGBE_ERR_EEPROM_CHECKSUM;
-	}
-
-	/* If the user cares, return the calculated checksum */
-	if (checksum_val)
-		*checksum_val = checksum;
-
-out:
-	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-
-	return status;
-}
-
-/**
- * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
- * @hw: pointer to hardware structure
- *
- * After writing EEPROM to shadow RAM using EEWR register, software calculates
- * checksum and updates the EEPROM and instructs the hardware to update
- * the flash.
- **/
-s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
-{
-	s32 status;
-	u16 checksum;
-
-	DEBUGFUNC("ixgbe_update_eeprom_checksum_X540");
-
-	/* Read the first word from the EEPROM. If this times out or fails, do
-	 * not continue or we could be in for a very long wait while every
-	 * EEPROM read fails
-	 */
-	status = hw->eeprom.ops.read(hw, 0, &checksum);
-	if (status) {
-		DEBUGOUT("EEPROM read failed\n");
-		return status;
-	}
-
-	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
-		return IXGBE_ERR_SWFW_SYNC;
-
-	status = hw->eeprom.ops.calc_checksum(hw);
-	if (status < 0)
-		goto out;
-
-	checksum = (u16)(status & 0xffff);
-
-	/* Do not use hw->eeprom.ops.write because we do not want to
-	 * take the synchronization semaphores twice here.
-	 */
-	status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum);
-	if (status)
-		goto out;
-
-	status = ixgbe_update_flash_X540(hw);
-
-out:
-	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-
-	return status;
-}
-
-/**
- *  ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
- *  @hw: pointer to hardware structure
- *
- *  Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
- *  EEPROM from shadow RAM to the flash device.
- **/
-s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
-{
-	u32 flup;
-	s32 status;
-
-	DEBUGFUNC("ixgbe_update_flash_X540");
-
-	status = ixgbe_poll_flash_update_done_X540(hw);
-	if (status == IXGBE_ERR_EEPROM) {
-		DEBUGOUT("Flash update time out\n");
-		goto out;
-	}
-
-	flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
-	IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
-
-	status = ixgbe_poll_flash_update_done_X540(hw);
-	if (status == IXGBE_SUCCESS)
-		DEBUGOUT("Flash update complete\n");
-	else
-		DEBUGOUT("Flash update time out\n");
-
-	if (hw->mac.type == ixgbe_mac_X540 && hw->revision_id == 0) {
-		flup = IXGBE_READ_REG(hw, IXGBE_EEC);
-
-		if (flup & IXGBE_EEC_SEC1VAL) {
-			flup |= IXGBE_EEC_FLUP;
-			IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
-		}
-
-		status = ixgbe_poll_flash_update_done_X540(hw);
-		if (status == IXGBE_SUCCESS)
-			DEBUGOUT("Flash update complete\n");
-		else
-			DEBUGOUT("Flash update time out\n");
-	}
-out:
-	return status;
-}
-
-/**
- *  ixgbe_poll_flash_update_done_X540 - Poll flash update status
- *  @hw: pointer to hardware structure
- *
- *  Polls the FLUDONE (bit 26) of the EEC Register to determine when the
- *  flash update is done.
- **/
-STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
-{
-	u32 i;
-	u32 reg;
-	s32 status = IXGBE_ERR_EEPROM;
-
-	DEBUGFUNC("ixgbe_poll_flash_update_done_X540");
-
-	for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
-		reg = IXGBE_READ_REG(hw, IXGBE_EEC);
-		if (reg & IXGBE_EEC_FLUDONE) {
-			status = IXGBE_SUCCESS;
-			break;
-		}
-		msec_delay(5);
-	}
-
-	if (i == IXGBE_FLUDONE_ATTEMPTS)
-		ERROR_REPORT1(IXGBE_ERROR_POLLING,
-			     "Flash update status polling timed out");
-
-	return status;
-}
-
-/**
- * ixgbe_set_mux - Set mux for port 1 access with CS4227
- * @hw: pointer to hardware structure
- * @state: set mux if 1, clear if 0
- */
-STATIC void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
-{
-	u32 esdp;
-
-	if (!hw->bus.lan_id)
-		return;
-	esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
-	if (state)
-		esdp |= IXGBE_ESDP_SDP1;
-	else
-		esdp &= ~IXGBE_ESDP_SDP1;
-	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
-	IXGBE_WRITE_FLUSH(hw);
-}
-
-/**
- *  ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
- *  @hw: pointer to hardware structure
- *  @mask: Mask to specify which semaphore to acquire
- *
- *  Acquires the SWFW semaphore thought the SW_FW_SYNC register for
- *  the specified function (CSR, PHY0, PHY1, NVM, Flash)
- **/
-s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
-{
-	u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK;
-	u32 fwmask = swmask << 5;
-	u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK;
-	u32 timeout = 200;
-	u32 hwmask = 0;
-	u32 swfw_sync;
-	u32 i;
-
-	DEBUGFUNC("ixgbe_acquire_swfw_sync_X540");
-
-	if (swmask & IXGBE_GSSR_EEP_SM)
-		hwmask |= IXGBE_GSSR_FLASH_SM;
-
-	/* SW only mask doesn't have FW bit pair */
-	if (mask & IXGBE_GSSR_SW_MNG_SM)
-		swmask |= IXGBE_GSSR_SW_MNG_SM;
-
-	swmask |= swi2c_mask;
-	fwmask |= swi2c_mask << 2;
-	for (i = 0; i < timeout; i++) {
-		/* SW NVM semaphore bit is used for access to all
-		 * SW_FW_SYNC bits (not just NVM)
-		 */
-		if (ixgbe_get_swfw_sync_semaphore(hw))
-			return IXGBE_ERR_SWFW_SYNC;
-
-		swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
-		if (!(swfw_sync & (fwmask | swmask | hwmask))) {
-			swfw_sync |= swmask;
-			IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
-			ixgbe_release_swfw_sync_semaphore(hw);
-			msec_delay(5);
-			if (swi2c_mask)
-				ixgbe_set_mux(hw, 1);
-			return IXGBE_SUCCESS;
-		}
-		/* Firmware currently using resource (fwmask), hardware
-		 * currently using resource (hwmask), or other software
-		 * thread currently using resource (swmask)
-		 */
-		ixgbe_release_swfw_sync_semaphore(hw);
-		msec_delay(5);
-	}
-
-	/* Failed to get SW only semaphore */
-	if (swmask == IXGBE_GSSR_SW_MNG_SM) {
-		ERROR_REPORT1(IXGBE_ERROR_POLLING,
-			     "Failed to get SW only semaphore");
-		return IXGBE_ERR_SWFW_SYNC;
-	}
-
-	/* If the resource is not released by the FW/HW the SW can assume that
-	 * the FW/HW malfunctions. In that case the SW should set the SW bit(s)
-	 * of the requested resource(s) while ignoring the corresponding FW/HW
-	 * bits in the SW_FW_SYNC register.
-	 */
-	if (ixgbe_get_swfw_sync_semaphore(hw))
-		return IXGBE_ERR_SWFW_SYNC;
-	swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
-	if (swfw_sync & (fwmask | hwmask)) {
-		swfw_sync |= swmask;
-		IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
-		ixgbe_release_swfw_sync_semaphore(hw);
-		msec_delay(5);
-		if (swi2c_mask)
-			ixgbe_set_mux(hw, 1);
-		return IXGBE_SUCCESS;
-	}
-	/* If the resource is not released by other SW the SW can assume that
-	 * the other SW malfunctions. In that case the SW should clear all SW
-	 * flags that it does not own and then repeat the whole process once
-	 * again.
-	 */
-	if (swfw_sync & swmask) {
-		u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
-			    IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM;
-
-		if (swi2c_mask)
-			rmask |= IXGBE_GSSR_I2C_MASK;
-		ixgbe_release_swfw_sync_X540(hw, rmask);
-		ixgbe_release_swfw_sync_semaphore(hw);
-		return IXGBE_ERR_SWFW_SYNC;
-	}
-	ixgbe_release_swfw_sync_semaphore(hw);
-
-	return IXGBE_ERR_SWFW_SYNC;
-}
-
-/**
- *  ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
- *  @hw: pointer to hardware structure
- *  @mask: Mask to specify which semaphore to release
- *
- *  Releases the SWFW semaphore through the SW_FW_SYNC register
- *  for the specified function (CSR, PHY0, PHY1, EVM, Flash)
- **/
-void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
-{
-	u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM);
-	u32 swfw_sync;
-
-	DEBUGFUNC("ixgbe_release_swfw_sync_X540");
-
-	if (mask & IXGBE_GSSR_I2C_MASK) {
-		swmask |= mask & IXGBE_GSSR_I2C_MASK;
-		ixgbe_set_mux(hw, 0);
-	}
-	ixgbe_get_swfw_sync_semaphore(hw);
-
-	swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
-	swfw_sync &= ~swmask;
-	IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
-
-	ixgbe_release_swfw_sync_semaphore(hw);
-	msec_delay(5);
-}
-
-/**
- *  ixgbe_get_swfw_sync_semaphore - Get hardware semaphore
- *  @hw: pointer to hardware structure
- *
- *  Sets the hardware semaphores so SW/FW can gain control of shared resources
- **/
-STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
-{
-	s32 status = IXGBE_ERR_EEPROM;
-	u32 timeout = 2000;
-	u32 i;
-	u32 swsm;
-
-	DEBUGFUNC("ixgbe_get_swfw_sync_semaphore");
-
-	/* Get SMBI software semaphore between device drivers first */
-	for (i = 0; i < timeout; i++) {
-		/*
-		 * If the SMBI bit is 0 when we read it, then the bit will be
-		 * set and we have the semaphore
-		 */
-		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
-		if (!(swsm & IXGBE_SWSM_SMBI)) {
-			status = IXGBE_SUCCESS;
-			break;
-		}
-		usec_delay(50);
-	}
-
-	/* Now get the semaphore between SW/FW through the REGSMP bit */
-	if (status == IXGBE_SUCCESS) {
-		for (i = 0; i < timeout; i++) {
-			swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
-			if (!(swsm & IXGBE_SWFW_REGSMP))
-				break;
-
-			usec_delay(50);
-		}
-
-		/*
-		 * Release semaphores and return error if SW NVM semaphore
-		 * was not granted because we don't have access to the EEPROM
-		 */
-		if (i >= timeout) {
-			ERROR_REPORT1(IXGBE_ERROR_POLLING,
-				"REGSMP Software NVM semaphore not granted.\n");
-			ixgbe_release_swfw_sync_semaphore(hw);
-			status = IXGBE_ERR_EEPROM;
-		}
-	} else {
-		ERROR_REPORT1(IXGBE_ERROR_POLLING,
-			     "Software semaphore SMBI between device drivers "
-			     "not granted.\n");
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_release_swfw_sync_semaphore - Release hardware semaphore
- *  @hw: pointer to hardware structure
- *
- *  This function clears hardware semaphore bits.
- **/
-STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
-{
-	u32 swsm;
-
-	DEBUGFUNC("ixgbe_release_swfw_sync_semaphore");
-
-	/* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
-
-	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
-	swsm &= ~IXGBE_SWSM_SMBI;
-	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
-
-	swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
-	swsm &= ~IXGBE_SWFW_REGSMP;
-	IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
-
-	IXGBE_WRITE_FLUSH(hw);
-}
-
-/**
- * ixgbe_blink_led_start_X540 - Blink LED based on index.
- * @hw: pointer to hardware structure
- * @index: led number to blink
- *
- * Devices that implement the version 2 interface:
- *   X540
- **/
-s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
-{
-	u32 macc_reg;
-	u32 ledctl_reg;
-	ixgbe_link_speed speed;
-	bool link_up;
-
-	DEBUGFUNC("ixgbe_blink_led_start_X540");
-
-	/*
-	 * Link should be up in order for the blink bit in the LED control
-	 * register to work. Force link and speed in the MAC if link is down.
-	 * This will be reversed when we stop the blinking.
-	 */
-	hw->mac.ops.check_link(hw, &speed, &link_up, false);
-	if (link_up == false) {
-		macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
-		macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
-		IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
-	}
-	/* Set the LED to LINK_UP + BLINK. */
-	ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
-	ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
-	ledctl_reg |= IXGBE_LED_BLINK(index);
-	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
-	IXGBE_WRITE_FLUSH(hw);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index.
- * @hw: pointer to hardware structure
- * @index: led number to stop blinking
- *
- * Devices that implement the version 2 interface:
- *   X540
- **/
-s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
-{
-	u32 macc_reg;
-	u32 ledctl_reg;
-
-	DEBUGFUNC("ixgbe_blink_led_stop_X540");
-
-	/* Restore the LED to its default value. */
-	ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
-	ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
-	ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
-	ledctl_reg &= ~IXGBE_LED_BLINK(index);
-	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
-
-	/* Unforce link and speed in the MAC. */
-	macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
-	macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS);
-	IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
-	IXGBE_WRITE_FLUSH(hw);
-
-	return IXGBE_SUCCESS;
-}
-
-
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.h
deleted file mode 100644
index bca1dff..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.h
+++ /dev/null
@@ -1,66 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#ifndef _IXGBE_X540_H_
-#define _IXGBE_X540_H_
-
-#include "ixgbe_type.h"
-
-s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
-				     ixgbe_link_speed *speed, bool *autoneg);
-enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
-			      bool link_up_wait_to_complete);
-s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
-u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
-
-s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
-s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data);
-s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
-				u16 *data);
-s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
-				 u16 *data);
-s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw);
-s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val);
-s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw);
-s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
-
-s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
-void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
-
-s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
-#endif /* _IXGBE_X540_H_ */
-
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x550.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x550.c
deleted file mode 100644
index 9572697..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x550.c
+++ /dev/null
@@ -1,2113 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#include "ixgbe_x550.h"
-#include "ixgbe_x540.h"
-#include "ixgbe_type.h"
-#include "ixgbe_api.h"
-#include "ixgbe_common.h"
-#include "ixgbe_phy.h"
-
-/**
- *  ixgbe_init_ops_X550 - Inits func ptrs and MAC type
- *  @hw: pointer to hardware structure
- *
- *  Initialize the function pointers and assign the MAC type for X550.
- *  Does not touch the hardware.
- **/
-s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
-{
-	struct ixgbe_mac_info *mac = &hw->mac;
-	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
-	s32 ret_val;
-
-	DEBUGFUNC("ixgbe_init_ops_X550");
-
-	ret_val = ixgbe_init_ops_X540(hw);
-	mac->ops.dmac_config = ixgbe_dmac_config_X550;
-	mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
-	mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
-	mac->ops.setup_eee = ixgbe_setup_eee_X550;
-	mac->ops.set_source_address_pruning =
-			ixgbe_set_source_address_pruning_X550;
-	mac->ops.set_ethertype_anti_spoofing =
-			ixgbe_set_ethertype_anti_spoofing_X550;
-
-	mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
-	eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
-	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
-	eeprom->ops.read = ixgbe_read_ee_hostif_X550;
-	eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
-	eeprom->ops.write = ixgbe_write_ee_hostif_X550;
-	eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
-	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
-	eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
-
-	mac->ops.disable_mdd = ixgbe_disable_mdd_X550;
-	mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
-	mac->ops.mdd_event = ixgbe_mdd_event_X550;
-	mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
-	mac->ops.disable_rx = ixgbe_disable_rx_x550;
-	return ret_val;
-}
-
-/**
- * ixgbe_identify_phy_x550em - Get PHY type based on device id
- * @hw: pointer to hardware structure
- *
- * Returns error code
- */
-STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
-{
-	u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
-
-	switch (hw->device_id) {
-	case IXGBE_DEV_ID_X550EM_X_SFP:
-		/* set up for CS4227 usage */
-		hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
-		if (hw->bus.lan_id) {
-
-			esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
-			esdp |= IXGBE_ESDP_SDP1_DIR;
-		}
-		esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
-		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
-
-		return ixgbe_identify_module_generic(hw);
-		break;
-	case IXGBE_DEV_ID_X550EM_X_KX4:
-		hw->phy.type = ixgbe_phy_x550em_kx4;
-		break;
-	case IXGBE_DEV_ID_X550EM_X_KR:
-		hw->phy.type = ixgbe_phy_x550em_kr;
-		break;
-	case IXGBE_DEV_ID_X550EM_X_1G_T:
-	case IXGBE_DEV_ID_X550EM_X_10G_T:
-		return ixgbe_identify_phy_generic(hw);
-	default:
-		break;
-	}
-	return IXGBE_SUCCESS;
-}
-
-STATIC s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
-				     u32 device_type, u16 *phy_data)
-{
-	UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
-	return IXGBE_NOT_IMPLEMENTED;
-}
-
-STATIC s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
-				      u32 device_type, u16 phy_data)
-{
-	UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
-	return IXGBE_NOT_IMPLEMENTED;
-}
-
-/**
-*  ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
-*  @hw: pointer to hardware structure
-*
-*  Initialize the function pointers and for MAC type X550EM.
-*  Does not touch the hardware.
-**/
-s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
-{
-	struct ixgbe_mac_info *mac = &hw->mac;
-	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
-	struct ixgbe_phy_info *phy = &hw->phy;
-	s32 ret_val;
-
-	DEBUGFUNC("ixgbe_init_ops_X550EM");
-
-	/* Similar to X550 so start there. */
-	ret_val = ixgbe_init_ops_X550(hw);
-
-	/* Since this function eventually calls
-	 * ixgbe_init_ops_540 by design, we are setting
-	 * the pointers to NULL explicitly here to overwrite
-	 * the values being set in the x540 function.
-	 */
-	/* Thermal sensor not supported in x550EM */
-	mac->ops.get_thermal_sensor_data = NULL;
-	mac->ops.init_thermal_sensor_thresh = NULL;
-	mac->thermal_sensor_enabled = false;
-
-	/* FCOE not supported in x550EM */
-	mac->ops.get_san_mac_addr = NULL;
-	mac->ops.set_san_mac_addr = NULL;
-	mac->ops.get_wwn_prefix = NULL;
-	mac->ops.get_fcoe_boot_status = NULL;
-
-	/* IPsec not supported in x550EM */
-	mac->ops.disable_sec_rx_path = NULL;
-	mac->ops.enable_sec_rx_path = NULL;
-
-	/* X550EM bus type is internal*/
-	hw->bus.type = ixgbe_bus_type_internal;
-	mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
-
-	mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
-	mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
-	mac->ops.get_media_type = ixgbe_get_media_type_X550em;
-	mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
-	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
-	mac->ops.reset_hw = ixgbe_reset_hw_X550em;
-	mac->ops.get_supported_physical_layer =
-				    ixgbe_get_supported_physical_layer_X550em;
-
-	/* PHY */
-	phy->ops.init = ixgbe_init_phy_ops_X550em;
-	phy->ops.identify = ixgbe_identify_phy_x550em;
-	if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
-		phy->ops.set_phy_power = NULL;
-
-
-	/* EEPROM */
-	eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
-	eeprom->ops.read = ixgbe_read_ee_hostif_X550;
-	eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
-	eeprom->ops.write = ixgbe_write_ee_hostif_X550;
-	eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
-	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
-	eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
-	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_dmac_config_X550
- *  @hw: pointer to hardware structure
- *
- *  Configure DMA coalescing. If enabling dmac, dmac is activated.
- *  When disabling dmac, dmac enable dmac bit is cleared.
- **/
-s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
-{
-	u32 reg, high_pri_tc;
-
-	DEBUGFUNC("ixgbe_dmac_config_X550");
-
-	/* Disable DMA coalescing before configuring */
-	reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
-	reg &= ~IXGBE_DMACR_DMAC_EN;
-	IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
-
-	/* Disable DMA Coalescing if the watchdog timer is 0 */
-	if (!hw->mac.dmac_config.watchdog_timer)
-		goto out;
-
-	ixgbe_dmac_config_tcs_X550(hw);
-
-	/* Configure DMA Coalescing Control Register */
-	reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
-
-	/* Set the watchdog timer in units of 40.96 usec */
-	reg &= ~IXGBE_DMACR_DMACWT_MASK;
-	reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
-
-	reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
-	/* If fcoe is enabled, set high priority traffic class */
-	if (hw->mac.dmac_config.fcoe_en) {
-		high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
-		reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
-			IXGBE_DMACR_HIGH_PRI_TC_MASK);
-	}
-	reg |= IXGBE_DMACR_EN_MNG_IND;
-
-	/* Enable DMA coalescing after configuration */
-	reg |= IXGBE_DMACR_DMAC_EN;
-	IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
-
-out:
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_dmac_config_tcs_X550
- *  @hw: pointer to hardware structure
- *
- *  Configure DMA coalescing threshold per TC. The dmac enable bit must
- *  be cleared before configuring.
- **/
-s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
-{
-	u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
-
-	DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
-
-	/* Configure DMA coalescing enabled */
-	switch (hw->mac.dmac_config.link_speed) {
-	case IXGBE_LINK_SPEED_100_FULL:
-		pb_headroom = IXGBE_DMACRXT_100M;
-		break;
-	case IXGBE_LINK_SPEED_1GB_FULL:
-		pb_headroom = IXGBE_DMACRXT_1G;
-		break;
-	default:
-		pb_headroom = IXGBE_DMACRXT_10G;
-		break;
-	}
-
-	maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
-			     IXGBE_MHADD_MFS_SHIFT) / 1024);
-
-	/* Set the per Rx packet buffer receive threshold */
-	for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
-		reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
-		reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
-
-		if (tc < hw->mac.dmac_config.num_tcs) {
-			/* Get Rx PB size */
-			rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
-			rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
-				IXGBE_RXPBSIZE_SHIFT;
-
-			/* Calculate receive buffer threshold in kilobytes */
-			if (rx_pb_size > pb_headroom)
-				rx_pb_size = rx_pb_size - pb_headroom;
-			else
-				rx_pb_size = 0;
-
-			/* Minimum of MFS shall be set for DMCTH */
-			reg |= (rx_pb_size > maxframe_size_kb) ?
-				rx_pb_size : maxframe_size_kb;
-		}
-		IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
-	}
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_dmac_update_tcs_X550
- *  @hw: pointer to hardware structure
- *
- *  Disables dmac, updates per TC settings, and then enables dmac.
- **/
-s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
-{
-	u32 reg;
-
-	DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
-
-	/* Disable DMA coalescing before configuring */
-	reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
-	reg &= ~IXGBE_DMACR_DMAC_EN;
-	IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
-
-	ixgbe_dmac_config_tcs_X550(hw);
-
-	/* Enable DMA coalescing after configuration */
-	reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
-	reg |= IXGBE_DMACR_DMAC_EN;
-	IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
- *  @hw: pointer to hardware structure
- *
- *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
- *  ixgbe_hw struct in order to set up EEPROM access.
- **/
-s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
-{
-	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
-	u32 eec;
-	u16 eeprom_size;
-
-	DEBUGFUNC("ixgbe_init_eeprom_params_X550");
-
-	if (eeprom->type == ixgbe_eeprom_uninitialized) {
-		eeprom->semaphore_delay = 10;
-		eeprom->type = ixgbe_flash;
-
-		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-		eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
-				    IXGBE_EEC_SIZE_SHIFT);
-		eeprom->word_size = 1 << (eeprom_size +
-					  IXGBE_EEPROM_WORD_SIZE_SHIFT);
-
-		DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
-			  eeprom->type, eeprom->word_size);
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_setup_eee_X550 - Enable/disable EEE support
- *  @hw: pointer to the HW structure
- *  @enable_eee: boolean flag to enable EEE
- *
- *  Enable/disable EEE based on enable_eee flag.
- *  Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C
- *  are modified.
- *
- **/
-s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee)
-{
-	u32 eeer;
-	u16 autoneg_eee_reg;
-	u32 link_reg;
-	s32 status;
-
-	DEBUGFUNC("ixgbe_setup_eee_X550");
-
-	eeer = IXGBE_READ_REG(hw, IXGBE_EEER);
-	/* Enable or disable EEE per flag */
-	if (enable_eee) {
-		eeer |= (IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
-
-		if (hw->device_id == IXGBE_DEV_ID_X550T) {
-			/* Advertise EEE capability */
-			hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
-				IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg);
-
-			autoneg_eee_reg |= (IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
-				IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
-				IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
-
-			hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
-				IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg);
-		} else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
-			status = ixgbe_read_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
-			if (status != IXGBE_SUCCESS)
-				return status;
-
-			link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
-				    IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX;
-
-			status = ixgbe_write_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
-			if (status != IXGBE_SUCCESS)
-				return status;
-		}
-	} else {
-		eeer &= ~(IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
-
-		if (hw->device_id == IXGBE_DEV_ID_X550T) {
-			/* Disable advertised EEE capability */
-			hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
-				IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg);
-
-			autoneg_eee_reg &= ~(IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
-				IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
-				IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
-
-			hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
-				IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg);
-		} else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
-			status = ixgbe_read_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
-			if (status != IXGBE_SUCCESS)
-				return status;
-
-			link_reg &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
-				IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX);
-
-			status = ixgbe_write_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
-			if (status != IXGBE_SUCCESS)
-				return status;
-		}
-	}
-	IXGBE_WRITE_REG(hw, IXGBE_EEER, eeer);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
- * @hw: pointer to hardware structure
- * @enable: enable or disable source address pruning
- * @pool: Rx pool to set source address pruning for
- **/
-void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
-					   unsigned int pool)
-{
-	u64 pfflp;
-
-	/* max rx pool is 63 */
-	if (pool > 63)
-		return;
-
-	pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
-	pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
-
-	if (enable)
-		pfflp |= (1ULL << pool);
-	else
-		pfflp &= ~(1ULL << pool);
-
-	IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
-	IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
-}
-
-/**
- *  ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing
- *  @hw: pointer to hardware structure
- *  @enable: enable or disable switch for Ethertype anti-spoofing
- *  @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
- *
- **/
-void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
-		bool enable, int vf)
-{
-	int vf_target_reg = vf >> 3;
-	int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
-	u32 pfvfspoof;
-
-	DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
-
-	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
-	if (enable)
-		pfvfspoof |= (1 << vf_target_shift);
-	else
-		pfvfspoof &= ~(1 << vf_target_shift);
-
-	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
-}
-
-/**
- *  ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF
- *  device
- *  @hw: pointer to hardware structure
- *  @reg_addr: 32 bit PHY register to write
- *  @device_type: 3 bit device type
- *  @data: Data to write to the register
- **/
-s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
-			    u32 device_type, u32 data)
-{
-	u32 i, command, error;
-
-	command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
-		   (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
-
-	/* Write IOSF control register */
-	IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
-
-	/* Write IOSF data register */
-	IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
-	/*
-	 * Check every 10 usec to see if the address cycle completed.
-	 * The SB IOSF BUSY bit will clear when the operation is
-	 * complete
-	 */
-	for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
-		usec_delay(10);
-
-		command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
-		if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
-			break;
-	}
-
-	if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
-		error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
-			 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
-		ERROR_REPORT2(IXGBE_ERROR_POLLING,
-			      "Failed to write, error %x\n", error);
-		return IXGBE_ERR_PHY;
-	}
-
-	if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
-		ERROR_REPORT1(IXGBE_ERROR_POLLING, "Write timed out\n");
-		return IXGBE_ERR_PHY;
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF
- *  device
- *  @hw: pointer to hardware structure
- *  @reg_addr: 32 bit PHY register to write
- *  @device_type: 3 bit device type
- *  @phy_data: Pointer to read data from the register
- **/
-s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
-			   u32 device_type, u32 *data)
-{
-	u32 i, command, error;
-
-	command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
-		   (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
-
-	/* Write IOSF control register */
-	IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
-
-	/*
-	 * Check every 10 usec to see if the address cycle completed.
-	 * The SB IOSF BUSY bit will clear when the operation is
-	 * complete
-	 */
-	for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
-		usec_delay(10);
-
-		command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
-		if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
-			break;
-	}
-
-	if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
-		error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
-			 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
-		ERROR_REPORT2(IXGBE_ERROR_POLLING,
-				"Failed to read, error %x\n", error);
-		return IXGBE_ERR_PHY;
-	}
-
-	if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
-		ERROR_REPORT1(IXGBE_ERROR_POLLING, "Read timed out\n");
-		return IXGBE_ERR_PHY;
-	}
-
-	*data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_disable_mdd_X550
- *  @hw: pointer to hardware structure
- *
- *  Disable malicious driver detection
- **/
-void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
-{
-	u32 reg;
-
-	DEBUGFUNC("ixgbe_disable_mdd_X550");
-
-	/* Disable MDD for TX DMA and interrupt */
-	reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
-	reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
-	IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
-
-	/* Disable MDD for RX and interrupt */
-	reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-	reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
-	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
-}
-
-/**
- *  ixgbe_enable_mdd_X550
- *  @hw: pointer to hardware structure
- *
- *  Enable malicious driver detection
- **/
-void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
-{
-	u32 reg;
-
-	DEBUGFUNC("ixgbe_enable_mdd_X550");
-
-	/* Enable MDD for TX DMA and interrupt */
-	reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
-	reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
-	IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
-
-	/* Enable MDD for RX and interrupt */
-	reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-	reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
-	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
-}
-
-/**
- *  ixgbe_restore_mdd_vf_X550
- *  @hw: pointer to hardware structure
- *  @vf: vf index
- *
- *  Restore VF that was disabled during malicious driver detection event
- **/
-void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
-{
-	u32 idx, reg, num_qs, start_q, bitmask;
-
-	DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
-
-	/* Map VF to queues */
-	reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
-	switch (reg & IXGBE_MRQC_MRQE_MASK) {
-	case IXGBE_MRQC_VMDQRT8TCEN:
-		num_qs = 8;  /* 16 VFs / pools */
-		bitmask = 0x000000FF;
-		break;
-	case IXGBE_MRQC_VMDQRSS32EN:
-	case IXGBE_MRQC_VMDQRT4TCEN:
-		num_qs = 4;  /* 32 VFs / pools */
-		bitmask = 0x0000000F;
-		break;
-	default:            /* 64 VFs / pools */
-		num_qs = 2;
-		bitmask = 0x00000003;
-		break;
-	}
-	start_q = vf * num_qs;
-
-	/* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
-	idx = start_q / 32;
-	reg = 0;
-	reg |= (bitmask << (start_q % 32));
-	IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
-	IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
-}
-
-/**
- *  ixgbe_mdd_event_X550
- *  @hw: pointer to hardware structure
- *  @vf_bitmap: vf bitmap of malicious vfs
- *
- *  Handle malicious driver detection event.
- **/
-void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
-{
-	u32 wqbr;
-	u32 i, j, reg, q, shift, vf, idx;
-
-	DEBUGFUNC("ixgbe_mdd_event_X550");
-
-	/* figure out pool size for mapping to vf's */
-	reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
-	switch (reg & IXGBE_MRQC_MRQE_MASK) {
-	case IXGBE_MRQC_VMDQRT8TCEN:
-		shift = 3;  /* 16 VFs / pools */
-		break;
-	case IXGBE_MRQC_VMDQRSS32EN:
-	case IXGBE_MRQC_VMDQRT4TCEN:
-		shift = 2;  /* 32 VFs / pools */
-		break;
-	default:
-		shift = 1;  /* 64 VFs / pools */
-		break;
-	}
-
-	/* Read WQBR_TX and WQBR_RX and check for malicious queues */
-	for (i = 0; i < 4; i++) {
-		wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
-		wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
-
-		if (!wqbr)
-			continue;
-
-		/* Get malicious queue */
-		for (j = 0; j < 32 && wqbr; j++) {
-
-			if (!(wqbr & (1 << j)))
-				continue;
-
-			/* Get queue from bitmask */
-			q = j + (i * 32);
-
-			/* Map queue to vf */
-			vf = (q >> shift);
-
-			/* Set vf bit in vf_bitmap */
-			idx = vf / 32;
-			vf_bitmap[idx] |= (1 << (vf % 32));
-			wqbr &= ~(1 << j);
-		}
-	}
-}
-
-/**
- *  ixgbe_get_media_type_X550em - Get media type
- *  @hw: pointer to hardware structure
- *
- *  Returns the media type (fiber, copper, backplane)
- */
-enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
-{
-	enum ixgbe_media_type media_type;
-
-	DEBUGFUNC("ixgbe_get_media_type_X550em");
-
-	/* Detect if there is a copper PHY attached. */
-	switch (hw->device_id) {
-	case IXGBE_DEV_ID_X550EM_X_KR:
-	case IXGBE_DEV_ID_X550EM_X_KX4:
-		media_type = ixgbe_media_type_backplane;
-		break;
-	case IXGBE_DEV_ID_X550EM_X_SFP:
-		media_type = ixgbe_media_type_fiber;
-		break;
-	case IXGBE_DEV_ID_X550EM_X_1G_T:
-	case IXGBE_DEV_ID_X550EM_X_10G_T:
-		media_type = ixgbe_media_type_copper;
-		break;
-	default:
-		media_type = ixgbe_media_type_unknown;
-		break;
-	}
-	return media_type;
-}
-
-/**
- *  ixgbe_setup_sfp_modules_X550em - Setup SFP module
- *  @hw: pointer to hardware structure
- */
-s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
-{
-	bool setup_linear;
-	u16 reg_slice, edc_mode;
-	s32 ret_val;
-
-	DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
-
-	switch (hw->phy.sfp_type) {
-	case ixgbe_sfp_type_unknown:
-		return IXGBE_SUCCESS;
-	case ixgbe_sfp_type_not_present:
-		return IXGBE_ERR_SFP_NOT_PRESENT;
-	case ixgbe_sfp_type_da_cu_core0:
-	case ixgbe_sfp_type_da_cu_core1:
-		setup_linear = true;
-		break;
-	case ixgbe_sfp_type_srlr_core0:
-	case ixgbe_sfp_type_srlr_core1:
-	case ixgbe_sfp_type_da_act_lmt_core0:
-	case ixgbe_sfp_type_da_act_lmt_core1:
-	case ixgbe_sfp_type_1g_sx_core0:
-	case ixgbe_sfp_type_1g_sx_core1:
-	case ixgbe_sfp_type_1g_lx_core0:
-	case ixgbe_sfp_type_1g_lx_core1:
-		setup_linear = false;
-		break;
-	default:
-		return IXGBE_ERR_SFP_NOT_SUPPORTED;
-	}
-
-	ixgbe_init_mac_link_ops_X550em(hw);
-	hw->phy.ops.reset = NULL;
-
-	/* The CS4227 slice address is the base address + the port-pair reg
-	 * offset. I.e. Slice 0 = 0x12B0 and slice 1 = 0x22B0.
-	 */
-	reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->bus.lan_id << 12);
-
-	if (setup_linear)
-		edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
-	else
-		edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
-
-	/* Configure CS4227 for connection type. */
-	ret_val = ixgbe_write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
-					   edc_mode);
-
-	if (ret_val != IXGBE_SUCCESS)
-		ret_val = ixgbe_write_i2c_combined(hw, 0x80, reg_slice,
-						   edc_mode);
-
-	return ret_val;
-}
-
-/**
- *  ixgbe_init_mac_link_ops_X550em - init mac link function pointers
- *  @hw: pointer to hardware structure
- */
-void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
-{
-	struct ixgbe_mac_info *mac = &hw->mac;
-
-	DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
-
-	/* CS4227 does not support autoneg, so disable the laser control
-	 * functions for SFP+ fiber
-	 */
-	 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
-		mac->ops.disable_tx_laser = NULL;
-		mac->ops.enable_tx_laser = NULL;
-		mac->ops.flap_tx_laser = NULL;
-	 }
-}
-
-/**
- *  ixgbe_get_link_capabilities_x550em - Determines link capabilities
- *  @hw: pointer to hardware structure
- *  @speed: pointer to link speed
- *  @autoneg: true when autoneg or autotry is enabled
- */
-s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
-				       ixgbe_link_speed *speed,
-				       bool *autoneg)
-{
-	DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
-
-	/* SFP */
-	if (hw->phy.media_type == ixgbe_media_type_fiber) {
-
-		/* CS4227 SFP must not enable auto-negotiation */
-		*autoneg = false;
-
-		/* Check if 1G SFP module. */
-		if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
-		    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
-		    || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
-		    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
-			*speed = IXGBE_LINK_SPEED_1GB_FULL;
-			return IXGBE_SUCCESS;
-		}
-
-		/* Link capabilities are based on SFP */
-		if (hw->phy.multispeed_fiber)
-			*speed = IXGBE_LINK_SPEED_10GB_FULL |
-				 IXGBE_LINK_SPEED_1GB_FULL;
-		else
-			*speed = IXGBE_LINK_SPEED_10GB_FULL;
-	} else {
-		*speed = IXGBE_LINK_SPEED_10GB_FULL |
-			 IXGBE_LINK_SPEED_1GB_FULL;
-		*autoneg = true;
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_init_phy_ops_X550em - PHY/SFP specific init
- *  @hw: pointer to hardware structure
- *
- *  Initialize any function pointers that were not able to be
- *  set during init_shared_code because the PHY/SFP type was
- *  not known.  Perform the SFP init if necessary.
- */
-s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
-{
-	struct ixgbe_phy_info *phy = &hw->phy;
-	s32 ret_val;
-	u32 esdp;
-
-	DEBUGFUNC("ixgbe_init_phy_ops_X550em");
-
-	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
-		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
-		phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
-
-		if (hw->bus.lan_id) {
-			esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
-			esdp |= IXGBE_ESDP_SDP1_DIR;
-		}
-		esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
-		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
-	}
-
-	/* Identify the PHY or SFP module */
-	ret_val = phy->ops.identify(hw);
-	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
-		return ret_val;
-
-	/* Setup function pointers based on detected SFP module and speeds */
-	ixgbe_init_mac_link_ops_X550em(hw);
-	if (phy->sfp_type != ixgbe_sfp_type_unknown)
-		phy->ops.reset = NULL;
-
-	/* Set functions pointers based on phy type */
-	switch (hw->phy.type) {
-	case ixgbe_phy_x550em_kx4:
-		phy->ops.setup_link = ixgbe_setup_kx4_x550em;
-		phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
-		phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
-		break;
-	case ixgbe_phy_x550em_kr:
-		phy->ops.setup_link = ixgbe_setup_kr_x550em;
-		phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
-		phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
-		break;
-	case ixgbe_phy_x550em_ext_t:
-		phy->ops.setup_internal_link = ixgbe_setup_internal_phy_x550em;
-		break;
-	default:
-		break;
-	}
-	return ret_val;
-}
-
-/**
- *  ixgbe_reset_hw_X550em - Perform hardware reset
- *  @hw: pointer to hardware structure
- *
- *  Resets the hardware by resetting the transmit and receive units, masks
- *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
- *  reset.
- */
-s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
-{
-	ixgbe_link_speed link_speed;
-	s32 status;
-	u32 ctrl = 0;
-	u32 i;
-	bool link_up = false;
-
-	DEBUGFUNC("ixgbe_reset_hw_X550em");
-
-	/* Call adapter stop to disable Tx/Rx and clear interrupts */
-	status = hw->mac.ops.stop_adapter(hw);
-	if (status != IXGBE_SUCCESS)
-		return status;
-
-	/* flush pending Tx transactions */
-	ixgbe_clear_tx_pending(hw);
-
-	/* PHY ops must be identified and initialized prior to reset */
-
-	/* Identify PHY and related function pointers */
-	status = hw->phy.ops.init(hw);
-
-	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
-		return status;
-
-	/* start the external PHY */
-	if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
-		status = ixgbe_init_ext_t_x550em(hw);
-		if (status)
-			return status;
-	}
-
-	/* Setup SFP module if there is one present. */
-	if (hw->phy.sfp_setup_needed) {
-		status = hw->mac.ops.setup_sfp(hw);
-		hw->phy.sfp_setup_needed = false;
-	}
-
-	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
-		return status;
-
-	/* Reset PHY */
-	if (!hw->phy.reset_disable && hw->phy.ops.reset)
-		hw->phy.ops.reset(hw);
-
-mac_reset_top:
-	/* Issue global reset to the MAC.  Needs to be SW reset if link is up.
-	 * If link reset is used when link is up, it might reset the PHY when
-	 * mng is using it.  If link is down or the flag to force full link
-	 * reset is set, then perform link reset.
-	 */
-	ctrl = IXGBE_CTRL_LNK_RST;
-	if (!hw->force_full_reset) {
-		hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
-		if (link_up)
-			ctrl = IXGBE_CTRL_RST;
-	}
-
-	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
-	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
-	IXGBE_WRITE_FLUSH(hw);
-
-	/* Poll for reset bit to self-clear meaning reset is complete */
-	for (i = 0; i < 10; i++) {
-		usec_delay(1);
-		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
-		if (!(ctrl & IXGBE_CTRL_RST_MASK))
-			break;
-	}
-
-	if (ctrl & IXGBE_CTRL_RST_MASK) {
-		status = IXGBE_ERR_RESET_FAILED;
-		DEBUGOUT("Reset polling failed to complete.\n");
-	}
-
-	msec_delay(50);
-
-	/* Double resets are required for recovery from certain error
-	 * conditions.  Between resets, it is necessary to stall to
-	 * allow time for any pending HW events to complete.
-	 */
-	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
-		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
-		goto mac_reset_top;
-	}
-
-	/* Store the permanent mac address */
-	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
-
-	/* Store MAC address from RAR0, clear receive address registers, and
-	 * clear the multicast table.  Also reset num_rar_entries to 128,
-	 * since we modify this value when programming the SAN MAC address.
-	 */
-	hw->mac.num_rar_entries = 128;
-	hw->mac.ops.init_rx_addrs(hw);
-
-
-	return status;
-}
-
-/**
- * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
- * @hw: pointer to hardware structure
- */
-s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
-{
-	u32 status;
-	u16 reg;
-	u32 retries = 1;
-
-	/* TODO: The number of attempts and delay between attempts is undefined */
-	do {
-		/* decrement retries counter and exit if we hit 0 */
-		if (retries < 1) {
-			ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
-				      "External PHY not yet finished resetting.");
-			return IXGBE_ERR_PHY;
-		}
-		retries--;
-
-		usec_delay(0);
-
-		status = hw->phy.ops.read_reg(hw,
-					      IXGBE_MDIO_TX_VENDOR_ALARMS_3,
-					      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-					      &reg);
-
-		if (status != IXGBE_SUCCESS)
-			return status;
-
-		/* Verify PHY FW reset has completed */
-	} while ((reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) != 1);
-
-	/* Set port to low power mode */
-	status = hw->phy.ops.read_reg(hw,
-				      IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
-				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
-				      &reg);
-
-	if (status != IXGBE_SUCCESS)
-		return status;
-
-	reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
-
-	status = hw->phy.ops.write_reg(hw,
-				       IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
-				       IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
-				       reg);
-
-	if (status != IXGBE_SUCCESS)
-		return status;
-
-	/* Enable the transmitter */
-	status = hw->phy.ops.read_reg(hw,
-				      IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
-				      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-				      &reg);
-
-	if (status != IXGBE_SUCCESS)
-		return status;
-
-	reg &= ~IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE;
-
-	status = hw->phy.ops.write_reg(hw,
-				       IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
-				       IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-				       reg);
-
-	if (status != IXGBE_SUCCESS)
-		return status;
-
-	/* Un-stall the PHY FW */
-	status = hw->phy.ops.read_reg(hw,
-				      IXGBE_MDIO_GLOBAL_RES_PR_10,
-				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
-				      &reg);
-
-	if (status != IXGBE_SUCCESS)
-		return status;
-
-	reg &= ~IXGBE_MDIO_POWER_UP_STALL;
-
-	status = hw->phy.ops.write_reg(hw,
-				       IXGBE_MDIO_GLOBAL_RES_PR_10,
-				       IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
-				       reg);
-
-	return status;
-}
-
-/**
- *  ixgbe_setup_kr_x550em - Configure the KR PHY.
- *  @hw: pointer to hardware structure
- *
- *  Configures the integrated KR PHY.
- **/
-s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
-{
-	s32 status;
-	u32 reg_val;
-
-	status = ixgbe_read_iosf_sb_reg_x550(hw,
-		IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
-		IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
-	if (status)
-		return status;
-
-	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
-	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ;
-	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
-	reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
-		     IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
-
-	/* Advertise 10G support. */
-	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
-		reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
-
-	/* Advertise 1G support. */
-	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
-		reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
-
-	/* Restart auto-negotiation. */
-	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
-	status = ixgbe_write_iosf_sb_reg_x550(hw,
-		IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
-		IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
-	return status;
-}
-
-/**
- *  ixgbe_setup_kx4_x550em - Configure the KX4 PHY.
- *  @hw: pointer to hardware structure
- *
- *  Configures the integrated KX4 PHY.
- **/
-s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
-{
-	s32 status;
-	u32 reg_val;
-
-	status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
-		IXGBE_SB_IOSF_TARGET_KX4_PCS0 + hw->bus.lan_id, &reg_val);
-	if (status)
-		return status;
-
-	reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 |
-			IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX);
-
-	reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE;
-
-	/* Advertise 10G support. */
-	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
-		reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4;
-
-	/* Advertise 1G support. */
-	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
-		reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX;
-
-	/* Restart auto-negotiation. */
-	reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART;
-	status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
-		IXGBE_SB_IOSF_TARGET_KX4_PCS0 + hw->bus.lan_id, reg_val);
-
-	return status;
-}
-
-/**
- *  ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
- *  @hw: pointer to hardware structure
- *  @speed: the link speed to force
- *
- *  Configures the integrated KR PHY to use iXFI mode. Used to connect an
- *  internal and external PHY at a specific speed, without autonegotiation.
- **/
-STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
-{
-	s32 status;
-	u32 reg_val;
-
-	/* Disable AN and force speed to 10G Serial. */
-	status = ixgbe_read_iosf_sb_reg_x550(hw,
-					IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
-					IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
-	if (status != IXGBE_SUCCESS)
-		return status;
-
-	reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
-	reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
-
-	/* Select forced link speed for internal PHY. */
-	switch (*speed) {
-	case IXGBE_LINK_SPEED_10GB_FULL:
-		reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
-		break;
-	case IXGBE_LINK_SPEED_1GB_FULL:
-		reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
-		break;
-	default:
-		/* Other link speeds are not supported by internal KR PHY. */
-		return IXGBE_ERR_LINK_SETUP;
-	}
-
-	status = ixgbe_write_iosf_sb_reg_x550(hw,
-					IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
-					IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-	if (status != IXGBE_SUCCESS)
-		return status;
-
-	/* Disable training protocol FSM. */
-	status = ixgbe_read_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
-	if (status != IXGBE_SUCCESS)
-		return status;
-	reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
-	status = ixgbe_write_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-	if (status != IXGBE_SUCCESS)
-		return status;
-
-	/* Disable Flex from training TXFFE. */
-	status = ixgbe_read_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
-	if (status != IXGBE_SUCCESS)
-		return status;
-	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
-	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
-	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
-	status = ixgbe_write_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-	if (status != IXGBE_SUCCESS)
-		return status;
-	status = ixgbe_read_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
-	if (status != IXGBE_SUCCESS)
-		return status;
-	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
-	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
-	reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
-	status = ixgbe_write_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-	if (status != IXGBE_SUCCESS)
-		return status;
-
-	/* Enable override for coefficients. */
-	status = ixgbe_read_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
-	if (status != IXGBE_SUCCESS)
-		return status;
-	reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
-	reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
-	reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
-	reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
-	status = ixgbe_write_iosf_sb_reg_x550(hw,
-				IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
-				IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-	if (status != IXGBE_SUCCESS)
-		return status;
-
-	/* Toggle port SW reset by AN reset. */
-	status = ixgbe_read_iosf_sb_reg_x550(hw,
-					IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
-					IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
-	if (status != IXGBE_SUCCESS)
-		return status;
-	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
-	status = ixgbe_write_iosf_sb_reg_x550(hw,
-					IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
-					IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
-	return status;
-}
-
-/**
- * ixgbe_setup_internal_phy_x550em - Configure integrated KR PHY
- * @hw: point to hardware structure
- *
- * Configures the integrated KR PHY to talk to the external PHY. The base
- * driver will call this function when it gets notification via interrupt from
- * the external PHY. This function forces the internal PHY into iXFI mode at
- * the correct speed.
- *
- * A return of a non-zero value indicates an error, and the base driver should
- * not report link up.
- */
-s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
-{
-	u32 status;
-	u16 lasi, autoneg_status, speed;
-	ixgbe_link_speed force_speed;
-
-	/* Verify that the external link status has changed */
-	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_XENPAK_LASI_STATUS,
-				      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-				      &lasi);
-	if (status != IXGBE_SUCCESS)
-		return status;
-
-	/* If there was no change in link status, we can just exit */
-	if (!(lasi & IXGBE_XENPAK_LASI_LINK_STATUS_ALARM))
-		return IXGBE_SUCCESS;
-
-	/* we read this twice back to back to indicate current status */
-	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
-				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-				      &autoneg_status);
-	if (status != IXGBE_SUCCESS)
-		return status;
-
-	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
-				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-				      &autoneg_status);
-	if (status != IXGBE_SUCCESS)
-		return status;
-
-	/* If link is not up return an error indicating treat link as down */
-	if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
-		return IXGBE_ERR_INVALID_LINK_SETTINGS;
-
-	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
-				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
-				      &speed);
-
-	/* clear everything but the speed and duplex bits */
-	speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
-
-	switch (speed) {
-	case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
-		force_speed = IXGBE_LINK_SPEED_10GB_FULL;
-		break;
-	case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
-		force_speed = IXGBE_LINK_SPEED_1GB_FULL;
-		break;
-	default:
-		/* Internal PHY does not support anything else */
-		return IXGBE_ERR_INVALID_LINK_SETTINGS;
-	}
-
-	return ixgbe_setup_ixfi_x550em(hw, &force_speed);
-}
-
-/**
- *  ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
- *  @hw: pointer to hardware structure
- *
- *  Configures the integrated KR PHY to use internal loopback mode.
- **/
-s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
-{
-	s32 status;
-	u32 reg_val;
-
-	/* Disable AN and force speed to 10G Serial. */
-	status = ixgbe_read_iosf_sb_reg_x550(hw,
-		IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
-		IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
-	if (status != IXGBE_SUCCESS)
-		return status;
-	reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
-	reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
-	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
-	status = ixgbe_write_iosf_sb_reg_x550(hw,
-		IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
-		IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-	if (status != IXGBE_SUCCESS)
-		return status;
-
-	/* Set near-end loopback clocks. */
-	status = ixgbe_read_iosf_sb_reg_x550(hw,
-		IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
-		IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
-	if (status != IXGBE_SUCCESS)
-		return status;
-	reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
-	reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
-	status = ixgbe_write_iosf_sb_reg_x550(hw,
-		IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
-		IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-	if (status != IXGBE_SUCCESS)
-		return status;
-
-	/* Set loopback enable. */
-	status = ixgbe_read_iosf_sb_reg_x550(hw,
-		IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
-		IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
-	if (status != IXGBE_SUCCESS)
-		return status;
-	reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
-	status = ixgbe_write_iosf_sb_reg_x550(hw,
-		IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
-		IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-	if (status != IXGBE_SUCCESS)
-		return status;
-
-	/* Training bypass. */
-	status = ixgbe_read_iosf_sb_reg_x550(hw,
-		IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
-		IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
-	if (status != IXGBE_SUCCESS)
-		return status;
-	reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
-	status = ixgbe_write_iosf_sb_reg_x550(hw,
-		IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
-		IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
-	return status;
-}
-
-/**
- *  ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
- *  assuming that the semaphore is already obtained.
- *  @hw: pointer to hardware structure
- *  @offset: offset of  word in the EEPROM to read
- *  @data: word read from the EEPROM
- *
- *  Reads a 16 bit word from the EEPROM using the hostif.
- **/
-s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
-				   u16 *data)
-{
-	s32 status;
-	struct ixgbe_hic_read_shadow_ram buffer;
-
-	DEBUGFUNC("ixgbe_read_ee_hostif_data_X550");
-	buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
-	buffer.hdr.req.buf_lenh = 0;
-	buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
-	buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
-
-	/* convert offset from words to bytes */
-	buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
-	/* one word */
-	buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
-
-	status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
-					      sizeof(buffer),
-					      IXGBE_HI_COMMAND_TIMEOUT, false);
-
-	if (status)
-		return status;
-
-	*data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
-					  FW_NVM_DATA_OFFSET);
-
-	return 0;
-}
-
-/**
- *  ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
- *  @hw: pointer to hardware structure
- *  @offset: offset of  word in the EEPROM to read
- *  @data: word read from the EEPROM
- *
- *  Reads a 16 bit word from the EEPROM using the hostif.
- **/
-s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
-			      u16 *data)
-{
-	s32 status = IXGBE_SUCCESS;
-
-	DEBUGFUNC("ixgbe_read_ee_hostif_X550");
-
-	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
-	    IXGBE_SUCCESS) {
-		status = ixgbe_read_ee_hostif_data_X550(hw, offset, data);
-		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-	} else {
-		status = IXGBE_ERR_SWFW_SYNC;
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
- *  @hw: pointer to hardware structure
- *  @offset: offset of  word in the EEPROM to read
- *  @words: number of words
- *  @data: word(s) read from the EEPROM
- *
- *  Reads a 16 bit word(s) from the EEPROM using the hostif.
- **/
-s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
-				     u16 offset, u16 words, u16 *data)
-{
-	struct ixgbe_hic_read_shadow_ram buffer;
-	u32 current_word = 0;
-	u16 words_to_read;
-	s32 status;
-	u32 i;
-
-	DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
-
-	/* Take semaphore for the entire operation. */
-	status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-	if (status) {
-		DEBUGOUT("EEPROM read buffer - semaphore failed\n");
-		return status;
-	}
-	while (words) {
-		if (words > FW_MAX_READ_BUFFER_SIZE / 2)
-			words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
-		else
-			words_to_read = words;
-
-		buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
-		buffer.hdr.req.buf_lenh = 0;
-		buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
-		buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
-
-		/* convert offset from words to bytes */
-		buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
-		buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
-
-		status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
-						      sizeof(buffer),
-						      IXGBE_HI_COMMAND_TIMEOUT,
-						      false);
-
-		if (status) {
-			DEBUGOUT("Host interface command failed\n");
-			goto out;
-		}
-
-		for (i = 0; i < words_to_read; i++) {
-			u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
-				  2 * i;
-			u32 value = IXGBE_READ_REG(hw, reg);
-
-			data[current_word] = (u16)(value & 0xffff);
-			current_word++;
-			i++;
-			if (i < words_to_read) {
-				value >>= 16;
-				data[current_word] = (u16)(value & 0xffff);
-				current_word++;
-			}
-		}
-		words -= words_to_read;
-	}
-
-out:
-	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-	return status;
-}
-
-/**
- *  ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
- *  @hw: pointer to hardware structure
- *  @offset: offset of  word in the EEPROM to write
- *  @data: word write to the EEPROM
- *
- *  Write a 16 bit word to the EEPROM using the hostif.
- **/
-s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
-				    u16 data)
-{
-	s32 status;
-	struct ixgbe_hic_write_shadow_ram buffer;
-
-	DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
-
-	buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
-	buffer.hdr.req.buf_lenh = 0;
-	buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
-	buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
-
-	 /* one word */
-	buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
-	buffer.data = data;
-	buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
-
-	status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
-					      sizeof(buffer),
-					      IXGBE_HI_COMMAND_TIMEOUT, false);
-
-	return status;
-}
-
-/**
- *  ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
- *  @hw: pointer to hardware structure
- *  @offset: offset of  word in the EEPROM to write
- *  @data: word write to the EEPROM
- *
- *  Write a 16 bit word to the EEPROM using the hostif.
- **/
-s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
-			       u16 data)
-{
-	s32 status = IXGBE_SUCCESS;
-
-	DEBUGFUNC("ixgbe_write_ee_hostif_X550");
-
-	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
-	    IXGBE_SUCCESS) {
-		status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
-		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-	} else {
-		DEBUGOUT("write ee hostif failed to get semaphore");
-		status = IXGBE_ERR_SWFW_SYNC;
-	}
-
-	return status;
-}
-
-/**
- *  ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
- *  @hw: pointer to hardware structure
- *  @offset: offset of  word in the EEPROM to write
- *  @words: number of words
- *  @data: word(s) write to the EEPROM
- *
- *  Write a 16 bit word(s) to the EEPROM using the hostif.
- **/
-s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
-				      u16 offset, u16 words, u16 *data)
-{
-	s32 status = IXGBE_SUCCESS;
-	u32 i = 0;
-
-	DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
-
-	/* Take semaphore for the entire operation. */
-	status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-	if (status != IXGBE_SUCCESS) {
-		DEBUGOUT("EEPROM write buffer - semaphore failed\n");
-		goto out;
-	}
-
-	for (i = 0; i < words; i++) {
-		status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
-							 data[i]);
-
-		if (status != IXGBE_SUCCESS) {
-			DEBUGOUT("Eeprom buffered write failed\n");
-			break;
-		}
-	}
-
-	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-out:
-
-	return status;
-}
-
-/**
- * ixgbe_checksum_ptr_x550 - Checksum one pointer region
- * @hw: pointer to hardware structure
- * @ptr: pointer offset in eeprom
- * @size: size of section pointed by ptr, if 0 first word will be used as size
- * @csum: address of checksum to update
- *
- * Returns error status for any failure
- */
-STATIC s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
-				   u16 size, u16 *csum, u16 *buffer,
-				   u32 buffer_size)
-{
-	u16 buf[256];
-	s32 status;
-	u16 length, bufsz, i, start;
-	u16 *local_buffer;
-
-	bufsz = sizeof(buf) / sizeof(buf[0]);
-
-	/* Read a chunk at the pointer location */
-	if (!buffer) {
-		status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
-		if (status) {
-			DEBUGOUT("Failed to read EEPROM image\n");
-			return status;
-		}
-		local_buffer = buf;
-	} else {
-		if (buffer_size < ptr)
-			return  IXGBE_ERR_PARAM;
-		local_buffer = &buffer[ptr];
-	}
-
-	if (size) {
-		start = 0;
-		length = size;
-	} else {
-		start = 1;
-		length = local_buffer[0];
-
-		/* Skip pointer section if length is invalid. */
-		if (length == 0xFFFF || length == 0 ||
-		    (ptr + length) >= hw->eeprom.word_size)
-			return IXGBE_SUCCESS;
-	}
-
-	if (buffer && ((u32)start + (u32)length > buffer_size))
-		return IXGBE_ERR_PARAM;
-
-	for (i = start; length; i++, length--) {
-		if (i == bufsz && !buffer) {
-			ptr += bufsz;
-			i = 0;
-			if (length < bufsz)
-				bufsz = length;
-
-			/* Read a chunk at the pointer location */
-			status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
-								  bufsz, buf);
-			if (status) {
-				DEBUGOUT("Failed to read EEPROM image\n");
-				return status;
-			}
-		}
-		*csum += local_buffer[i];
-	}
-	return IXGBE_SUCCESS;
-}
-
-/**
- *  ixgbe_calc_checksum_X550 - Calculates and returns the checksum
- *  @hw: pointer to hardware structure
- *  @buffer: pointer to buffer containing calculated checksum
- *  @buffer_size: size of buffer
- *
- *  Returns a negative error code on error, or the 16-bit checksum
- **/
-s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
-{
-	u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
-	u16 *local_buffer;
-	s32 status;
-	u16 checksum = 0;
-	u16 pointer, i, size;
-
-	DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
-
-	hw->eeprom.ops.init_params(hw);
-
-	if (!buffer) {
-		/* Read pointer area */
-		status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
-						     IXGBE_EEPROM_LAST_WORD + 1,
-						     eeprom_ptrs);
-		if (status) {
-			DEBUGOUT("Failed to read EEPROM image\n");
-			return status;
-		}
-		local_buffer = eeprom_ptrs;
-	} else {
-		if (buffer_size < IXGBE_EEPROM_LAST_WORD)
-			return IXGBE_ERR_PARAM;
-		local_buffer = buffer;
-	}
-
-	/*
-	 * For X550 hardware include 0x0-0x41 in the checksum, skip the
-	 * checksum word itself
-	 */
-	for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
-		if (i != IXGBE_EEPROM_CHECKSUM)
-			checksum += local_buffer[i];
-
-	/*
-	 * Include all data from pointers 0x3, 0x6-0xE.  This excludes the
-	 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
-	 */
-	for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
-		if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
-			continue;
-
-		pointer = local_buffer[i];
-
-		/* Skip pointer section if the pointer is invalid. */
-		if (pointer == 0xFFFF || pointer == 0 ||
-		    pointer >= hw->eeprom.word_size)
-			continue;
-
-		switch (i) {
-		case IXGBE_PCIE_GENERAL_PTR:
-			size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
-			break;
-		case IXGBE_PCIE_CONFIG0_PTR:
-		case IXGBE_PCIE_CONFIG1_PTR:
-			size = IXGBE_PCIE_CONFIG_SIZE;
-			break;
-		default:
-			size = 0;
-			break;
-		}
-
-		status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
-						buffer, buffer_size);
-		if (status)
-			return status;
-	}
-
-	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
-
-	return (s32)checksum;
-}
-
-/**
- *  ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
- *  @hw: pointer to hardware structure
- *
- *  Returns a negative error code on error, or the 16-bit checksum
- **/
-s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
-{
-	return ixgbe_calc_checksum_X550(hw, NULL, 0);
-}
-
-/**
- *  ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
- *  @hw: pointer to hardware structure
- *  @checksum_val: calculated checksum
- *
- *  Performs checksum calculation and validates the EEPROM checksum.  If the
- *  caller does not need checksum_val, the value can be NULL.
- **/
-s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
-{
-	s32 status;
-	u16 checksum;
-	u16 read_checksum = 0;
-
-	DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
-
-	/* Read the first word from the EEPROM. If this times out or fails, do
-	 * not continue or we could be in for a very long wait while every
-	 * EEPROM read fails
-	 */
-	status = hw->eeprom.ops.read(hw, 0, &checksum);
-	if (status) {
-		DEBUGOUT("EEPROM read failed\n");
-		return status;
-	}
-
-	status = hw->eeprom.ops.calc_checksum(hw);
-	if (status < 0)
-		return status;
-
-	checksum = (u16)(status & 0xffff);
-
-	status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
-					   &read_checksum);
-	if (status)
-		return status;
-
-	/* Verify read checksum from EEPROM is the same as
-	 * calculated checksum
-	 */
-	if (read_checksum != checksum) {
-		status = IXGBE_ERR_EEPROM_CHECKSUM;
-		ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
-			     "Invalid EEPROM checksum");
-	}
-
-	/* If the user cares, return the calculated checksum */
-	if (checksum_val)
-		*checksum_val = checksum;
-
-	return status;
-}
-
-/**
- * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
- * @hw: pointer to hardware structure
- *
- * After writing EEPROM to shadow RAM using EEWR register, software calculates
- * checksum and updates the EEPROM and instructs the hardware to update
- * the flash.
- **/
-s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
-{
-	s32 status;
-	u16 checksum = 0;
-
-	DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
-
-	/* Read the first word from the EEPROM. If this times out or fails, do
-	 * not continue or we could be in for a very long wait while every
-	 * EEPROM read fails
-	 */
-	status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
-	if (status) {
-		DEBUGOUT("EEPROM read failed\n");
-		return status;
-	}
-
-	status = ixgbe_calc_eeprom_checksum_X550(hw);
-	if (status < 0)
-		return status;
-
-	checksum = (u16)(status & 0xffff);
-
-	status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
-					    checksum);
-	if (status)
-		return status;
-
-	status = ixgbe_update_flash_X550(hw);
-
-	return status;
-}
-
-/**
- *  ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
- *  @hw: pointer to hardware structure
- *
- *  Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
- **/
-s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
-{
-	s32 status = IXGBE_SUCCESS;
-	union ixgbe_hic_hdr2 buffer;
-
-	DEBUGFUNC("ixgbe_update_flash_X550");
-
-	buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
-	buffer.req.buf_lenh = 0;
-	buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
-	buffer.req.checksum = FW_DEFAULT_CHECKSUM;
-
-	status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
-					      sizeof(buffer),
-					      IXGBE_HI_COMMAND_TIMEOUT, false);
-
-	return status;
-}
-
-/**
- *  ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
- *  @hw: pointer to hardware structure
- *
- *  Determines physical layer capabilities of the current configuration.
- **/
-u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
-{
-	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-	u16 ext_ability = 0;
-
-	DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
-
-	hw->phy.ops.identify(hw);
-
-	switch (hw->phy.type) {
-	case ixgbe_phy_x550em_kr:
-		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
-				 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
-		break;
-	case ixgbe_phy_x550em_kx4:
-		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
-				 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
-		break;
-	case ixgbe_phy_x550em_ext_t:
-		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
-				     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-				     &ext_ability);
-		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
-		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
-			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
-		break;
-	default:
-		break;
-	}
-
-	if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
-		physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
-
-	return physical_layer;
-}
-
-/**
- * ixgbe_get_bus_info_x550em - Set PCI bus info
- * @hw: pointer to hardware structure
- *
- * Sets bus link width and speed to unknown because X550em is
- * not a PCI device.
- **/
-s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
-{
-
-	DEBUGFUNC("ixgbe_get_bus_info_x550em");
-
-	hw->bus.width = ixgbe_bus_width_unknown;
-	hw->bus.speed = ixgbe_bus_speed_unknown;
-
-	return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_disable_rx_x550 - Disable RX unit
- *
- * Enables the Rx DMA unit for x550
- **/
-void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
-{
-	u32 rxctrl, pfdtxgswc;
-	s32 status;
-	struct ixgbe_hic_disable_rxen fw_cmd;
-
-	DEBUGFUNC("ixgbe_enable_rx_dma_x550");
-
-	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
-	if (rxctrl & IXGBE_RXCTRL_RXEN) {
-		pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
-		if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
-			pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
-			IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
-			hw->mac.set_lben = true;
-		} else {
-			hw->mac.set_lben = false;
-		}
-
-		fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
-		fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
-		fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
-		fw_cmd.port_number = (u8)hw->bus.lan_id;
-
-		status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
-					sizeof(struct ixgbe_hic_disable_rxen),
-					IXGBE_HI_COMMAND_TIMEOUT, true);
-
-		/* If we fail - disable RX using register write */
-		if (status) {
-			rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
-			if (rxctrl & IXGBE_RXCTRL_RXEN) {
-				rxctrl &= ~IXGBE_RXCTRL_RXEN;
-				IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
-			}
-		}
-	}
-}
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x550.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x550.h
deleted file mode 100644
index 1aff9b3..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x550.h
+++ /dev/null
@@ -1,91 +0,0 @@ 
-/*******************************************************************************
-
-Copyright (c) 2001-2014, Intel Corporation
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
-    this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in the
-    documentation and/or other materials provided with the distribution.
-
- 3. Neither the name of the Intel Corporation nor the names of its
-    contributors may be used to endorse or promote products derived from
-    this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-***************************************************************************/
-
-#ifndef _IXGBE_X550_H_
-#define _IXGBE_X550_H_
-
-#include "ixgbe_type.h"
-
-s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw);
-s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw);
-s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw);
-
-s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw);
-s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw);
-s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw);
-s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw);
-s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size);
-s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val);
-s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw);
-s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
-				      u16 offset, u16 words, u16 *data);
-s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
-			       u16 data);
-s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
-				     u16 offset, u16 words, u16 *data);
-s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
-u16				*data);
-s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
-				   u16 *data);
-s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
-				    u16 data);
-s32 ixgbe_set_eee_X550(struct ixgbe_hw *hw, bool enable_eee);
-s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee);
-void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
-					   unsigned int pool);
-void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
-					    bool enable, int vf);
-s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
-				 u32 device_type, u32 data);
-s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
-				u32 device_type, u32 *data);
-void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw);
-void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw);
-void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap);
-void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf);
-enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw);
-s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw);
-s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
-				       ixgbe_link_speed *speed, bool *autoneg);
-void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw);
-s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw);
-s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw);
-s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw);
-s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw);
-s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw);
-s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw);
-s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw);
-u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw);
-void ixgbe_disable_rx_x550(struct ixgbe_hw *hw);
-#endif /* _IXGBE_X550_H_ */
-
diff --git a/lib/librte_pmd_ixgbe/ixgbe_82599_bypass.c b/lib/librte_pmd_ixgbe/ixgbe_82599_bypass.c
deleted file mode 100644
index 12cc01d..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe_82599_bypass.c
+++ /dev/null
@@ -1,314 +0,0 @@ 
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "ixgbe/ixgbe_type.h"
-#include "ixgbe/ixgbe_82599.h"
-#include "ixgbe/ixgbe_api.h"
-#include "ixgbe/ixgbe_common.h"
-#include "ixgbe/ixgbe_phy.h"
-#include "ixgbe_bypass_defines.h"
-#include "ixgbe_bypass.h"
-
-/**
- *  ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
- *  @hw: pointer to hardware structure
- *  @speed: link speed to set
- *
- *  We set the module speed differently for fixed fiber.  For other
- *  multi-speed devices we don't have an error value so here if we
- *  detect an error we just log it and exit.
- */
-static void
-ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
-{
-	s32 status;
-	u8 rs, eeprom_data;
-
-	switch (speed) {
-	case IXGBE_LINK_SPEED_10GB_FULL:
-		/* one bit mask same as setting on */
-		rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
-		break;
-	case IXGBE_LINK_SPEED_1GB_FULL:
-		rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
-		break;
-	default:
-		PMD_DRV_LOG(ERR, "Invalid fixed module speed");
-		return;
-	}
-
-	/* Set RS0 */
-	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
-					   IXGBE_I2C_EEPROM_DEV_ADDR2,
-					   &eeprom_data);
-	if (status) {
-		PMD_DRV_LOG(ERR, "Failed to read Rx Rate Select RS0");
-		goto out;
-	}
-
-	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
-
-	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
-					    IXGBE_I2C_EEPROM_DEV_ADDR2,
-					    eeprom_data);
-	if (status) {
-		PMD_DRV_LOG(ERR, "Failed to write Rx Rate Select RS0");
-		goto out;
-	}
-
-	/* Set RS1 */
-	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
-					   IXGBE_I2C_EEPROM_DEV_ADDR2,
-					   &eeprom_data);
-	if (status) {
-		PMD_DRV_LOG(ERR, "Failed to read Rx Rate Select RS1");
-		goto out;
-	}
-
-	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
-
-	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
-					    IXGBE_I2C_EEPROM_DEV_ADDR2,
-					    eeprom_data);
-	if (status) {
-		PMD_DRV_LOG(ERR, "Failed to write Rx Rate Select RS1");
-		goto out;
-	}
-out:
-	return;
-}
-
-/**
- *  ixgbe_setup_mac_link_multispeed_fixed_fiber - Set MAC link speed
- *  @hw: pointer to hardware structure
- *  @speed: new link speed
- *  @autoneg_wait_to_complete: true when waiting for completion is needed
- *
- *  Set the link speed in the AUTOC register and restarts link.
- **/
-static s32
-ixgbe_setup_mac_link_multispeed_fixed_fiber(struct ixgbe_hw *hw,
-				     ixgbe_link_speed speed,
-				     bool autoneg_wait_to_complete)
-{
-	s32 status = IXGBE_SUCCESS;
-	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
-	ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
-	u32 speedcnt = 0;
-	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
-	u32 i = 0;
-	bool link_up = false;
-	bool negotiation;
-
-	PMD_INIT_FUNC_TRACE();
-
-	/* Mask off requested but non-supported speeds */
-	status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
-	if (status != IXGBE_SUCCESS)
-		return status;
-
-	speed &= link_speed;
-
-	/*
-	 * Try each speed one by one, highest priority first.  We do this in
-	 * software because 10gb fiber doesn't support speed autonegotiation.
-	 */
-	if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
-		speedcnt++;
-		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
-
-		/* If we already have link at this speed, just jump out */
-		status = ixgbe_check_link(hw, &link_speed, &link_up, false);
-		if (status != IXGBE_SUCCESS)
-			return status;
-
-		if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
-			goto out;
-		/* Set the module link speed */
-		ixgbe_set_fiber_fixed_speed(hw, IXGBE_LINK_SPEED_10GB_FULL);
-
-		/* Set the module link speed */
-		esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
-		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
-		IXGBE_WRITE_FLUSH(hw);
-
-		/* Allow module to change analog characteristics (1G->10G) */
-		msec_delay(40);
-
-		status = ixgbe_setup_mac_link_82599(hw,
-						    IXGBE_LINK_SPEED_10GB_FULL,
-						    autoneg_wait_to_complete);
-		if (status != IXGBE_SUCCESS)
-			return status;
-
-		/* Flap the tx laser if it has not already been done */
-		ixgbe_flap_tx_laser(hw);
-
-		/*
-		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
-		 * Section 73.10.2, we may have to wait up to 500ms if KR is
-		 * attempted.  82599 uses the same timing for 10g SFI.
-		 */
-		for (i = 0; i < 5; i++) {
-			/* Wait for the link partner to also set speed */
-			msec_delay(100);
-
-			/* If we have link, just jump out */
-			status = ixgbe_check_link(hw, &link_speed,
-						  &link_up, false);
-			if (status != IXGBE_SUCCESS)
-				return status;
-
-			if (link_up)
-				goto out;
-		}
-	}
-
-	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
-		speedcnt++;
-		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
-			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
-
-		/* If we already have link at this speed, just jump out */
-		status = ixgbe_check_link(hw, &link_speed, &link_up, false);
-		if (status != IXGBE_SUCCESS)
-			return status;
-
-		if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
-			goto out;
-
-		/* Set the module link speed */
-		ixgbe_set_fiber_fixed_speed(hw, IXGBE_LINK_SPEED_1GB_FULL);
-
-		/* Allow module to change analog characteristics (10G->1G) */
-		msec_delay(40);
-
-		status = ixgbe_setup_mac_link_82599(hw,
-						    IXGBE_LINK_SPEED_1GB_FULL,
-						    autoneg_wait_to_complete);
-		if (status != IXGBE_SUCCESS)
-			return status;
-
-		/* Flap the tx laser if it has not already been done */
-		ixgbe_flap_tx_laser(hw);
-
-		/* Wait for the link partner to also set speed */
-		msec_delay(100);
-
-		/* If we have link, just jump out */
-		status = ixgbe_check_link(hw, &link_speed, &link_up, false);
-		if (status != IXGBE_SUCCESS)
-			return status;
-
-		if (link_up)
-			goto out;
-	}
-
-	/*
-	 * We didn't get link.  Configure back to the highest speed we tried,
-	 * (if there was more than one).  We call ourselves back with just the
-	 * single highest speed that the user requested.
-	 */
-	if (speedcnt > 1)
-		status = ixgbe_setup_mac_link_multispeed_fixed_fiber(hw,
-			highest_link_speed, autoneg_wait_to_complete);
-
-out:
-	/* Set autoneg_advertised value based on input link speed */
-	hw->phy.autoneg_advertised = 0;
-
-	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
-		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
-
-	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
-		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
-
-	return status;
-}
-
-static enum ixgbe_media_type
-ixgbe_bypass_get_media_type(struct ixgbe_hw *hw)
-{
-	enum ixgbe_media_type media_type;
-
-	PMD_INIT_FUNC_TRACE();
-
-	if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
-		media_type = ixgbe_media_type_fiber;
-	} else {
-		media_type = ixgbe_get_media_type_82599(hw);
-	}
-	return (media_type);
-}
-
-/*
- * Wrapper around shared code (base driver) to support BYPASS nic.
- */
-s32
-ixgbe_bypass_init_shared_code(struct ixgbe_hw *hw)
-{
-	s32 ret_val;
-
-	if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
-		hw->mac.type = ixgbe_mac_82599EB;
-	}
-
-	ret_val = ixgbe_init_shared_code(hw);
-	if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
-		hw->mac.ops.get_media_type = &ixgbe_bypass_get_media_type;
-		ixgbe_init_mac_link_ops_82599(hw);
-	}
-
-	return ret_val;
-}
-
-s32
-ixgbe_bypass_init_hw(struct ixgbe_hw *hw)
-{
-	int rc;
-
-	if ((rc  = ixgbe_init_hw(hw)) == 0 &&
-			hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
-
-		hw->mac.ops.setup_link =
-			&ixgbe_setup_mac_link_multispeed_fixed_fiber;
-
-		hw->mac.ops.get_media_type = &ixgbe_bypass_get_media_type;
-
-		hw->mac.ops.disable_tx_laser = NULL;
-                hw->mac.ops.enable_tx_laser = NULL;
-                hw->mac.ops.flap_tx_laser = NULL;
-	}
-
-	return (rc);
-}
diff --git a/lib/librte_pmd_ixgbe/ixgbe_bypass.c b/lib/librte_pmd_ixgbe/ixgbe_bypass.c
deleted file mode 100644
index 832f415..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe_bypass.c
+++ /dev/null
@@ -1,414 +0,0 @@ 
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <time.h>
-#include <rte_atomic.h>
-#include <rte_ethdev.h>
-#include "ixgbe_ethdev.h"
-#include "ixgbe_bypass_api.h"
-
-#define	BYPASS_STATUS_OFF_MASK	3
-
-/* Macros to check for invlaid function pointers. */
-#define	FUNC_PTR_OR_ERR_RET(func, retval) do {              \
-	if ((func) == NULL) {                               \
-		PMD_DRV_LOG(ERR, "%s:%d function not supported", \
-			    __func__, __LINE__);            \
-		return retval;                            \
-	}                                                   \
-} while(0)
-
-#define	FUNC_PTR_OR_RET(func) do {                          \
-	if ((func) == NULL) {                               \
-		PMD_DRV_LOG(ERR, "%s:%d function not supported", \
-			    __func__, __LINE__);            \
-		return;                                     \
-	}                                                   \
-} while(0)
-
-
-/**
- *  ixgbe_bypass_set_time - Set bypass FW time epoc.
- *
- *  @hw: pointer to hardware structure
- *
- *  This function with sync the FW date stamp with that of the
- *  system clock.
- **/
-static void
-ixgbe_bypass_set_time(struct ixgbe_adapter *adapter)
-{
-	u32 mask, value;
-	u32 sec;
-	struct ixgbe_hw *hw = &adapter->hw;
-
-	sec = 0;
-
-	/*
-	 * Send the FW our current time and turn on time_valid and
-	 * timer_reset bits.
-	 */
-	mask = BYPASS_CTL1_TIME_M |
-	       BYPASS_CTL1_VALID_M |
-	       BYPASS_CTL1_OFFTRST_M;
-	value = (sec & BYPASS_CTL1_TIME_M) |
-	        BYPASS_CTL1_VALID |
-		BYPASS_CTL1_OFFTRST;
-
-	FUNC_PTR_OR_RET(adapter->bps.ops.bypass_set);
-
-	/* Store FW reset time (in seconds from epoch). */
-	adapter->bps.reset_tm = time(NULL);
-
-	/* reset FW timer. */
-	adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL1, mask, value);
-}
-
-/**
- * ixgbe_bypass_init - Make some environment changes for bypass
- *
- * @adapter: pointer to ixgbe_adapter structure for access to state bits
- *
- * This function collects all the modifications needed by the bypass
- * driver.
- **/
-void
-ixgbe_bypass_init(struct rte_eth_dev *dev)
-{
-	struct ixgbe_adapter *adapter;
-	struct ixgbe_hw *hw;
-
-	adapter = IXGBE_DEV_TO_ADPATER(dev);
-	hw = &adapter->hw;
-
-	/* Only allow BYPASS ops on the first port */
-	if (hw->device_id != IXGBE_DEV_ID_82599_BYPASS ||
-			hw->bus.func != 0) {
-		PMD_DRV_LOG(ERR, "bypass function is not supported on that device");
-		return;
-	}
-
-	/* set bypass ops. */
-	adapter->bps.ops.bypass_rw = &ixgbe_bypass_rw_generic;
-	adapter->bps.ops.bypass_valid_rd = &ixgbe_bypass_valid_rd_generic;
-	adapter->bps.ops.bypass_set = &ixgbe_bypass_set_generic;
-	adapter->bps.ops.bypass_rd_eep = &ixgbe_bypass_rd_eep_generic;
-
-	/* set the time for logging. */
-	ixgbe_bypass_set_time(adapter);
-
-	/* Don't have the SDP to the laser */
-	hw->mac.ops.disable_tx_laser = NULL;
-	hw->mac.ops.enable_tx_laser = NULL;
-	hw->mac.ops.flap_tx_laser = NULL;
-}
-
-s32
-ixgbe_bypass_state_show(struct rte_eth_dev *dev, u32 *state)
-{
-	struct ixgbe_hw *hw;
-	s32 ret_val;
-	u32 cmd;
-	u32 by_ctl = 0;
-	struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
-
-	hw = &adapter->hw;
-	FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
-
-	cmd = BYPASS_PAGE_CTL0;
-	ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl);
-
-	/* Assume bypass_rw didn't error out, if it did state will
-	 * be ignored anyway.
-	 */
-	*state = (by_ctl >> BYPASS_STATUS_OFF_SHIFT) &  BYPASS_STATUS_OFF_MASK;
-
-	return (ret_val);
-}
-
-
-s32
-ixgbe_bypass_state_store(struct rte_eth_dev *dev, u32 *new_state)
-{
-	struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
-	struct ixgbe_hw *hw;
-	s32 ret_val;
-
-	hw = &adapter->hw;
-	FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP);
-
-	/* Set the new state */
-	ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
-					 BYPASS_MODE_OFF_M, *new_state);
-	if (ret_val)
-		goto exit;
-
-	/* Set AUTO back on so FW can receive events */
-	ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
-					 BYPASS_MODE_OFF_M, BYPASS_AUTO);
-
-exit:
-	return ret_val;
-
-}
-
-s32
-ixgbe_bypass_event_show(struct rte_eth_dev *dev, u32 event,
-			    u32 *state)
-{
-	struct ixgbe_hw *hw;
-	s32 ret_val;
-	u32 shift;
-	u32 cmd;
-	u32 by_ctl = 0;
-	struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
-
-	hw = &adapter->hw;
-	FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
-
-	cmd = BYPASS_PAGE_CTL0;
-	ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl);
-
-	/* Assume bypass_rw didn't error out, if it did event will
-	 * be ignored anyway.
-	 */
-	switch (event) {
-	case BYPASS_EVENT_WDT_TO:
-		shift = BYPASS_WDTIMEOUT_SHIFT;
-		break;
-	case BYPASS_EVENT_MAIN_ON:
-		shift = BYPASS_MAIN_ON_SHIFT;
-		break;
-	case BYPASS_EVENT_MAIN_OFF:
-		shift = BYPASS_MAIN_OFF_SHIFT;
-		break;
-	case BYPASS_EVENT_AUX_ON:
-		shift = BYPASS_AUX_ON_SHIFT;
-		break;
-	case BYPASS_EVENT_AUX_OFF:
-		shift = BYPASS_AUX_OFF_SHIFT;
-		break;
-	default:
-		return EINVAL;
-	}
-
-	*state = (by_ctl >> shift) & 0x3;
-
-	return ret_val;
-}
-
-s32
-ixgbe_bypass_event_store(struct rte_eth_dev *dev, u32 event,
-			     u32 state)
-{
-	struct ixgbe_hw *hw;
-	u32 status;
-	u32 off;
-	s32 ret_val;
-	struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
-
-	hw = &adapter->hw;
-	FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP);
-
-	switch (event) {
-	case BYPASS_EVENT_WDT_TO:
-		off = BYPASS_WDTIMEOUT_M;
-		status = state << BYPASS_WDTIMEOUT_SHIFT;
-		break;
-	case BYPASS_EVENT_MAIN_ON:
-		off = BYPASS_MAIN_ON_M;
-		status = state << BYPASS_MAIN_ON_SHIFT;
-		break;
-	case BYPASS_EVENT_MAIN_OFF:
-		off = BYPASS_MAIN_OFF_M;
-		status = state << BYPASS_MAIN_OFF_SHIFT;
-		break;
-	case BYPASS_EVENT_AUX_ON:
-		off = BYPASS_AUX_ON_M;
-		status = state << BYPASS_AUX_ON_SHIFT;
-		break;
-	case BYPASS_EVENT_AUX_OFF:
-		off = BYPASS_AUX_OFF_M;
-		status = state << BYPASS_AUX_OFF_SHIFT;
-		break;
-	default:
-		return EINVAL;
-	}
-
-	ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
-		off, status);
-
-	return ret_val;
-}
-
-s32
-ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout)
-{
-	struct ixgbe_hw *hw;
-        u32 status;
-        u32 mask;
-	s32 ret_val;
-	struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
-
-	hw = &adapter->hw;
-	FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_set, -ENOTSUP);
-
-	/* disable the timer with timeout of zero */
-	if (timeout == RTE_BYPASS_TMT_OFF) {
-		status = 0x0;   /* WDG enable off */
-		mask = BYPASS_WDT_ENABLE_M;
-	} else {
-		/* set time out value */
-		mask = BYPASS_WDT_VALUE_M;
-
-		/* enable the timer */
-		status = timeout << BYPASS_WDT_TIME_SHIFT;
-		status |= 0x1 << BYPASS_WDT_ENABLE_SHIFT;
-		mask |= BYPASS_WDT_ENABLE_M;
-	}
-
-	ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
-		mask, status);
-
-	return ret_val;
-}
-
-s32
-ixgbe_bypass_ver_show(struct rte_eth_dev *dev, u32 *ver)
-{
-	struct ixgbe_hw *hw;
-	u32 cmd;
-	u32 status;
-	s32 ret_val;
-	struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
-
-	hw = &adapter->hw;
-	FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
-
-	cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
-	cmd |= (BYPASS_EEPROM_VER_ADD << BYPASS_CTL2_OFFSET_SHIFT) &
-	       BYPASS_CTL2_OFFSET_M;
-	ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status);
-	if (ret_val)
-		goto exit;
-
-	/* wait for the write to stick */
-	msleep(100);
-
-	/* Now read the results */
-	cmd &= ~BYPASS_WE;
-	ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status);
-	if (ret_val)
-		goto exit;
-
-	*ver = status & BYPASS_CTL2_DATA_M;      /* only one byte of date */
-
-exit:
-	return ret_val;
-}
-
-s32
-ixgbe_bypass_wd_timeout_show(struct rte_eth_dev *dev, u32 *wd_timeout)
-{
-	struct ixgbe_hw *hw;
-	u32 by_ctl = 0;
-	u32 cmd;
-	u32 wdg;
-	s32 ret_val;
-	struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
-
-	hw = &adapter->hw;
-	FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
-
-	cmd = BYPASS_PAGE_CTL0;
-	ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &by_ctl);
-
-	wdg = by_ctl & BYPASS_WDT_ENABLE_M;
-	if (!wdg)
-		*wd_timeout = RTE_BYPASS_TMT_OFF;
-	else
-		*wd_timeout = (by_ctl >> BYPASS_WDT_TIME_SHIFT) &
-			BYPASS_WDT_MASK;
-
-	return ret_val;
-}
-
-s32
-ixgbe_bypass_wd_reset(struct rte_eth_dev *dev)
-{
-	u32 cmd;
-	u32 status;
-	u32 sec;
-	u32 count = 0;
-	s32 ret_val;
-	struct ixgbe_hw *hw;
-	struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
-
-	hw = &adapter->hw;
-
-	FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_rw, -ENOTSUP);
-	FUNC_PTR_OR_ERR_RET(adapter->bps.ops.bypass_valid_rd, -ENOTSUP);
-
-	/* Use the lower level bit-bang functions since we don't need
-	 * to read the register first to get it's current state as we
-	 * are setting every thing in this write.
-	 */
-	/* Set up WD pet */
-	cmd = BYPASS_PAGE_CTL1 | BYPASS_WE | BYPASS_CTL1_WDT_PET;
-
-	/* Resync the FW time while writing to CTL1 anyway */
-	adapter->bps.reset_tm = time(NULL);
-	sec = 0;
-
-	cmd |= (sec & BYPASS_CTL1_TIME_M) | BYPASS_CTL1_VALID;
-
-	/* reset FW timer offset since we are resetting the clock */
-	cmd |= BYPASS_CTL1_OFFTRST;
-
-	ret_val = adapter->bps.ops.bypass_rw(hw, cmd, &status);
-
-	/* Read until it matches what we wrote, or we time out */
-	do {
-		if (count++ > 10) {
-			ret_val = IXGBE_BYPASS_FW_WRITE_FAILURE;
-			break;
-		}
-
-		if (adapter->bps.ops.bypass_rw(hw, BYPASS_PAGE_CTL1, &status)) {
-			ret_val = IXGBE_ERR_INVALID_ARGUMENT;
-			break;
-		}
-	} while (!adapter->bps.ops.bypass_valid_rd(cmd, status));
-
-	return ret_val;
-}
diff --git a/lib/librte_pmd_ixgbe/ixgbe_bypass.h b/lib/librte_pmd_ixgbe/ixgbe_bypass.h
deleted file mode 100644
index fcd9774..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe_bypass.h
+++ /dev/null
@@ -1,68 +0,0 @@ 
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _IXGBE_BYPASS_H_
-#define _IXGBE_BYPASS_H_
-
-#ifdef RTE_NIC_BYPASS
-
-struct ixgbe_bypass_mac_ops {
-	s32 (*bypass_rw) (struct ixgbe_hw *hw, u32 cmd, u32 *status);
-	bool (*bypass_valid_rd) (u32 in_reg, u32 out_reg);
-	s32 (*bypass_set) (struct ixgbe_hw *hw, u32 cmd, u32 event, u32 action);
-	s32 (*bypass_rd_eep) (struct ixgbe_hw *hw, u32 addr, u8 *value);
-};
-
-struct ixgbe_bypass_info {
-	uint64_t reset_tm;
-	struct ixgbe_bypass_mac_ops ops;
-};
-
-struct rte_eth_dev;
-
-void ixgbe_bypass_init(struct rte_eth_dev *dev);
-s32 ixgbe_bypass_state_show(struct rte_eth_dev *dev, u32 *state);
-s32 ixgbe_bypass_state_store(struct rte_eth_dev *dev, u32 *new_state);
-s32 ixgbe_bypass_event_show(struct rte_eth_dev *dev, u32 event, u32 *state);
-s32 ixgbe_bypass_event_store(struct rte_eth_dev *dev, u32 event, u32 state);
-s32 ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout);
-s32 ixgbe_bypass_ver_show(struct rte_eth_dev *dev, u32 *ver);
-s32 ixgbe_bypass_wd_timeout_show(struct rte_eth_dev *dev, u32 *wd_timeout);
-s32 ixgbe_bypass_wd_reset(struct rte_eth_dev *dev);
-
-s32 ixgbe_bypass_init_shared_code(struct ixgbe_hw *hw);
-s32 ixgbe_bypass_init_hw(struct ixgbe_hw *hw);
-
-#endif /* RTE_NIC_BYPASS */
-
-#endif /*  _IXGBE_BYPASS_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe_bypass_api.h b/lib/librte_pmd_ixgbe/ixgbe_bypass_api.h
deleted file mode 100644
index b4a7386..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe_bypass_api.h
+++ /dev/null
@@ -1,299 +0,0 @@ 
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _IXGBE_BYPASS_API_H_
-#define _IXGBE_BYPASS_API_H_
-
-#ifdef RTE_NIC_BYPASS
-
-#include "ixgbe_bypass_defines.h"
-/**
- *  ixgbe_bypass_rw_generic - Bit bang data into by_pass FW
- *
- *  @hw: pointer to hardware structure
- *  @cmd: Command we send to the FW
- *  @status: The reply from the FW
- *
- *  Bit-bangs the cmd to the by_pass FW status points to what is returned.
- **/
-#define IXGBE_BYPASS_BB_WAIT 1
-static s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status)
-{
-	int i;
-	u32 sck, sdi, sdo, dir_sck, dir_sdi, dir_sdo;
-	u32 esdp;
-
-	if (!status)
-		return IXGBE_ERR_PARAM;
-
-	*status = 0;
-
-	/* SDP vary by MAC type */
-	switch (hw->mac.type) {
-	case ixgbe_mac_82599EB:
-		sck = IXGBE_ESDP_SDP7;
-		sdi = IXGBE_ESDP_SDP0;
-		sdo = IXGBE_ESDP_SDP6;
-		dir_sck = IXGBE_ESDP_SDP7_DIR;
-		dir_sdi = IXGBE_ESDP_SDP0_DIR;
-		dir_sdo = IXGBE_ESDP_SDP6_DIR;
-		break;
-	case ixgbe_mac_X540:
-		sck = IXGBE_ESDP_SDP2;
-		sdi = IXGBE_ESDP_SDP0;
-		sdo = IXGBE_ESDP_SDP1;
-		dir_sck = IXGBE_ESDP_SDP2_DIR;
-		dir_sdi = IXGBE_ESDP_SDP0_DIR;
-		dir_sdo = IXGBE_ESDP_SDP1_DIR;
-		break;
-	case ixgbe_mac_X550:
-	case ixgbe_mac_X550EM_x:
-		sck = IXGBE_ESDP_SDP2;
-		sdi = IXGBE_ESDP_SDP0;
-		sdo = IXGBE_ESDP_SDP1;
-		dir_sck = IXGBE_ESDP_SDP2_DIR;
-		dir_sdi = IXGBE_ESDP_SDP0_DIR;
-		dir_sdo = IXGBE_ESDP_SDP1_DIR;
-		break;
-	default:
-		return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
-	}
-
-	/* Set SDP pins direction */
-	esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
-	esdp |= dir_sck;	/* SCK as output */
-	esdp |= dir_sdi;	/* SDI as output */
-	esdp &= ~dir_sdo;	/* SDO as input */
-	esdp |= sck;
-	esdp |= sdi;
-	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
-	IXGBE_WRITE_FLUSH(hw);
-  //  TODO:
-	msleep(IXGBE_BYPASS_BB_WAIT);
-
-	/* Generate start condition */
-	esdp &= ~sdi;
-	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
-	IXGBE_WRITE_FLUSH(hw);
-	msleep(IXGBE_BYPASS_BB_WAIT);
-
-	esdp &= ~sck;
-	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
-	IXGBE_WRITE_FLUSH(hw);
-	msleep(IXGBE_BYPASS_BB_WAIT);
-
-	/* Clock out the new control word and clock in the status */
-	for (i = 0; i < 32; i++) {
-		if ((cmd >> (31 - i)) & 0x01) {
-			esdp |= sdi;
-			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
-		} else {
-			esdp &= ~sdi;
-			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
-		}
-		IXGBE_WRITE_FLUSH(hw);
-		msleep(IXGBE_BYPASS_BB_WAIT);
-
-		esdp |= sck;
-		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
-		IXGBE_WRITE_FLUSH(hw);
-		msleep(IXGBE_BYPASS_BB_WAIT);
-
-		esdp &= ~sck;
-		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
-		IXGBE_WRITE_FLUSH(hw);
-		msleep(IXGBE_BYPASS_BB_WAIT);
-
-		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
-		if (esdp & sdo)
-			*status = (*status << 1) | 0x01;
-		else
-			*status = (*status << 1) | 0x00;
-		msleep(IXGBE_BYPASS_BB_WAIT);
-	}
-
-	/* stop condition */
-	esdp |= sck;
-	esdp &= ~sdi;
-	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
-	IXGBE_WRITE_FLUSH(hw);
-	msleep(IXGBE_BYPASS_BB_WAIT);
-
-	esdp |= sdi;
-	IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
-	IXGBE_WRITE_FLUSH(hw);
-
-	/* set the page bits to match the cmd that the status it belongs to */
-	*status = (*status & 0x3fffffff) | (cmd & 0xc0000000);
-
-	return 0;
-}
-
-/**
- * ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang.
- *
- * If we send a write we can't be sure it took until we can read back
- * that same register.  It can be a problem as some of the feilds may
- * for valid reasons change between the time wrote the register and
- * we read it again to verify.  So this function check everything we
- * can check and then assumes it worked.
- *
- * @u32 in_reg - The register cmd for the bit-bang read.
- * @u32 out_reg - The register returned from a bit-bang read.
- **/
-static bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg)
-{
-	u32 mask;
-
-	/* Page must match for all control pages */
-	if ((in_reg & BYPASS_PAGE_M) != (out_reg & BYPASS_PAGE_M))
-		return false;
-
-	switch (in_reg & BYPASS_PAGE_M) {
-	case BYPASS_PAGE_CTL0:
-		/* All the following can't change since the last write
-		 *  - All the event actions
-		 *  - The timeout value
-		 */
-		mask = BYPASS_AUX_ON_M | BYPASS_MAIN_ON_M |
-		       BYPASS_MAIN_OFF_M | BYPASS_AUX_OFF_M |
-		       BYPASS_WDTIMEOUT_M |
-		       BYPASS_WDT_VALUE_M;
-		if ((out_reg & mask) != (in_reg & mask))
-			return false;
-
-		/* 0x0 is never a valid value for bypass status */
-		if (!(out_reg & BYPASS_STATUS_OFF_M))
-			return false;
-		break;
-	case BYPASS_PAGE_CTL1:
-		/* All the following can't change since the last write
-		 *  - time valid bit
-		 *  - time we last sent
-		 */
-		mask = BYPASS_CTL1_VALID_M | BYPASS_CTL1_TIME_M;
-		if ((out_reg & mask) != (in_reg & mask))
-			return false;
-		break;
-	case BYPASS_PAGE_CTL2:
-		/* All we can check in this page is control number
-		 * which is already done above.
-		 */
-		break;
-	}
-
-	/* We are as sure as we can be return true */
-	return true;
-}
-
-/**
- *  ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter.
- *
- *  @hw: pointer to hardware structure
- *  @cmd: The control word we are setting.
- *  @event: The event we are setting in the FW.  This also happens to
- *	    be the mask for the event we are setting (handy)
- *  @action: The action we set the event to in the FW. This is in a
- *	     bit field that happens to be what we want to put in
- *	     the event spot (also handy)
- **/
-static s32 ixgbe_bypass_set_generic(struct ixgbe_hw *hw, u32 ctrl, u32 event,
-			     u32 action)
-{
-	u32 by_ctl = 0;
-	u32 cmd, verify;
-	u32 count = 0;
-
-	/* Get current values */
-	cmd = ctrl;	/* just reading only need control number */
-	if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
-		return IXGBE_ERR_INVALID_ARGUMENT;
-
-	/* Set to new action */
-	cmd = (by_ctl & ~event) | BYPASS_WE | action;
-	if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl))
-		return IXGBE_ERR_INVALID_ARGUMENT;
-
-	/* Page 0 force a FW eeprom write which is slow so verify */
-	if ((cmd & BYPASS_PAGE_M) == BYPASS_PAGE_CTL0) {
-		verify = BYPASS_PAGE_CTL0;
-		do {
-			if (count++ > 5)
-				return IXGBE_BYPASS_FW_WRITE_FAILURE;
-
-			if (ixgbe_bypass_rw_generic(hw, verify, &by_ctl))
-				return IXGBE_ERR_INVALID_ARGUMENT;
-		} while (!ixgbe_bypass_valid_rd_generic(cmd, by_ctl));
-	} else {
-		/* We have give the FW time for the write to stick */
-		msleep(100);
-	}
-
-	return 0;
-}
-
-/**
- *  ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom address.
- *
- *  @hw: pointer to hardware structure
- *  @addr: The bypass eeprom address to read.
- *  @value: The 8b of data at the address above.
- **/
-static s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value)
-{
-	u32 cmd;
-	u32 status;
-
-
-	/* send the request */
-	cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
-	cmd |= (addr << BYPASS_CTL2_OFFSET_SHIFT) & BYPASS_CTL2_OFFSET_M;
-	if (ixgbe_bypass_rw_generic(hw, cmd, &status))
-		return IXGBE_ERR_INVALID_ARGUMENT;
-
-	/* We have give the FW time for the write to stick */
-	msleep(100);
-
-	/* now read the results */
-	cmd &= ~BYPASS_WE;
-	if (ixgbe_bypass_rw_generic(hw, cmd, &status))
-		return IXGBE_ERR_INVALID_ARGUMENT;
-
-	*value = status & BYPASS_CTL2_DATA_M;
-
-	return 0;
-}
-
-#endif /* RTE_NIC_BYPASS */
-
-#endif /* _IXGBE_BYPASS_API_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe_bypass_defines.h b/lib/librte_pmd_ixgbe/ixgbe_bypass_defines.h
deleted file mode 100644
index 22570ac..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe_bypass_defines.h
+++ /dev/null
@@ -1,160 +0,0 @@ 
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _IXGBE_BYPASS_DEFINES_H_
-#define _IXGBE_BYPASS_DEFINES_H_
-
-#ifdef RTE_NIC_BYPASS
-
-#define msleep(x)             rte_delay_us(x*1000)
-#define usleep_range(min, max) rte_delay_us(min)
-
-#define BYPASS_PAGE_CTL0	0x00000000
-#define BYPASS_PAGE_CTL1	0x40000000
-#define BYPASS_PAGE_CTL2	0x80000000
-#define BYPASS_PAGE_M		0xc0000000
-#define BYPASS_WE		0x20000000
-
-#define BYPASS_AUTO	0x0
-#define BYPASS_NOP	0x0
-#define BYPASS_NORM	0x1
-#define BYPASS_BYPASS	0x2
-#define BYPASS_ISOLATE	0x3
-
-#define BYPASS_EVENT_MAIN_ON	0x1
-#define BYPASS_EVENT_AUX_ON	0x2
-#define BYPASS_EVENT_MAIN_OFF	0x3
-#define BYPASS_EVENT_AUX_OFF	0x4
-#define BYPASS_EVENT_WDT_TO	0x5
-#define BYPASS_EVENT_USR	0x6
-
-#define BYPASS_MODE_OFF_M	0x00000003
-#define BYPASS_STATUS_OFF_M	0x0000000c
-#define BYPASS_AUX_ON_M		0x00000030
-#define BYPASS_MAIN_ON_M	0x000000c0
-#define BYPASS_MAIN_OFF_M	0x00000300
-#define BYPASS_AUX_OFF_M	0x00000c00
-#define BYPASS_WDTIMEOUT_M	0x00003000
-#define BYPASS_WDT_ENABLE_M	0x00004000
-#define BYPASS_WDT_VALUE_M	0x00070000
-
-#define BYPASS_MODE_OFF_SHIFT	0
-#define BYPASS_STATUS_OFF_SHIFT	2
-#define BYPASS_AUX_ON_SHIFT	4
-#define BYPASS_MAIN_ON_SHIFT	6
-#define BYPASS_MAIN_OFF_SHIFT	8
-#define BYPASS_AUX_OFF_SHIFT	10
-#define BYPASS_WDTIMEOUT_SHIFT	12
-#define BYPASS_WDT_ENABLE_SHIFT	14
-#define BYPASS_WDT_TIME_SHIFT	16
-
-#define BYPASS_WDT_1	0x0
-#define BYPASS_WDT_1_5	0x1
-#define BYPASS_WDT_2	0x2
-#define BYPASS_WDT_3	0x3
-#define BYPASS_WDT_4	0x4
-#define BYPASS_WDT_8	0x5
-#define BYPASS_WDT_16	0x6
-#define BYPASS_WDT_32	0x7
-#define BYPASS_WDT_OFF	0xffff
-
-#define BYPASS_WDT_MASK	0x7
-
-#define BYPASS_CTL1_TIME_M	0x01ffffff
-#define BYPASS_CTL1_VALID_M	0x02000000
-#define BYPASS_CTL1_OFFTRST_M	0x04000000
-#define BYPASS_CTL1_WDT_PET_M	0x08000000
-
-#define BYPASS_CTL1_VALID	0x02000000
-#define BYPASS_CTL1_OFFTRST	0x04000000
-#define BYPASS_CTL1_WDT_PET	0x08000000
-
-#define BYPASS_CTL2_DATA_M	0x000000ff
-#define BYPASS_CTL2_OFFSET_M	0x0000ff00
-#define BYPASS_CTL2_RW_M	0x00010000
-#define BYPASS_CTL2_HEAD_M	0x0ff00000
-
-#define BYPASS_CTL2_OFFSET_SHIFT	8
-#define BYPASS_CTL2_HEAD_SHIFT		20
-
-#define BYPASS_CTL2_RW		0x00010000
-
-enum ixgbe_state_t {
-	__IXGBE_TESTING,
-	__IXGBE_RESETTING,
-	__IXGBE_DOWN,
-	__IXGBE_SERVICE_SCHED,
-	__IXGBE_IN_SFP_INIT,
-	__IXGBE_IN_BYPASS_LOW,
-	__IXGBE_IN_BYPASS_HIGH,
-	__IXGBE_IN_BYPASS_LOG,
-};
-
-#define BYPASS_MAX_LOGS		43
-#define BYPASS_LOG_SIZE		5
-#define BYPASS_LOG_LINE_SIZE	37
-
-#define BYPASS_EEPROM_VER_ADD	0x02
-
-#define BYPASS_LOG_TIME_M	0x01ffffff
-#define BYPASS_LOG_TIME_VALID_M	0x02000000
-#define BYPASS_LOG_HEAD_M	0x04000000
-#define BYPASS_LOG_CLEAR_M	0x08000000
-#define BYPASS_LOG_EVENT_M	0xf0000000
-#define BYPASS_LOG_ACTION_M	0x03
-
-#define BYPASS_LOG_EVENT_SHIFT	28
-#define BYPASS_LOG_CLEAR_SHIFT	24 /* bit offset */
-#define IXGBE_DEV_TO_ADPATER(dev) \
-	((struct ixgbe_adapter*)(dev->data->dev_private))
-
-/* extractions from ixgbe_phy.h */
-#define	IXGBE_I2C_EEPROM_DEV_ADDR2	0xA2
-
-#define IXGBE_SFF_SFF_8472_SWAP		0x5C
-#define IXGBE_SFF_SFF_8472_COMP		0x5E
-#define IXGBE_SFF_SFF_8472_OSCB		0x6E
-#define IXGBE_SFF_SFF_8472_ESCB		0x76
-
-#define IXGBE_SFF_SOFT_RS_SELECT_MASK	0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_10G	0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_1G	0x0
-
-/* extractions from ixgbe_type.h */
-#define IXGBE_DEV_ID_82599_BYPASS	0x155D
-
-#define IXGBE_BYPASS_FW_WRITE_FAILURE	-35
-
-#endif /* RTE_NIC_BYPASS */
-
-#endif /* _IXGBE_BYPASS_DEFINES_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
deleted file mode 100644
index 5f9a1cf..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
+++ /dev/null
@@ -1,4453 +0,0 @@ 
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <sys/queue.h>
-#include <stdio.h>
-#include <errno.h>
-#include <stdint.h>
-#include <string.h>
-#include <unistd.h>
-#include <stdarg.h>
-#include <inttypes.h>
-#include <netinet/in.h>
-#include <rte_byteorder.h>
-#include <rte_common.h>
-#include <rte_cycles.h>
-
-#include <rte_interrupts.h>
-#include <rte_log.h>
-#include <rte_debug.h>
-#include <rte_pci.h>
-#include <rte_atomic.h>
-#include <rte_branch_prediction.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_eal.h>
-#include <rte_alarm.h>
-#include <rte_ether.h>
-#include <rte_ethdev.h>
-#include <rte_atomic.h>
-#include <rte_malloc.h>
-#include <rte_random.h>
-#include <rte_dev.h>
-
-#include "ixgbe_logs.h"
-#include "ixgbe/ixgbe_api.h"
-#include "ixgbe/ixgbe_vf.h"
-#include "ixgbe/ixgbe_common.h"
-#include "ixgbe_ethdev.h"
-#include "ixgbe_bypass.h"
-#include "ixgbe_rxtx.h"
-
-/*
- * High threshold controlling when to start sending XOFF frames. Must be at
- * least 8 bytes less than receive packet buffer size. This value is in units
- * of 1024 bytes.
- */
-#define IXGBE_FC_HI    0x80
-
-/*
- * Low threshold controlling when to start sending XON frames. This value is
- * in units of 1024 bytes.
- */
-#define IXGBE_FC_LO    0x40
-
-/* Timer value included in XOFF frames. */
-#define IXGBE_FC_PAUSE 0x680
-
-#define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
-#define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
-#define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
-
-#define IXGBE_MMW_SIZE_DEFAULT        0x4
-#define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
-
-/*
- *  Default values for RX/TX configuration
- */
-#define IXGBE_DEFAULT_RX_FREE_THRESH  32
-#define IXGBE_DEFAULT_RX_PTHRESH      8
-#define IXGBE_DEFAULT_RX_HTHRESH      8
-#define IXGBE_DEFAULT_RX_WTHRESH      0
-
-#define IXGBE_DEFAULT_TX_FREE_THRESH  32
-#define IXGBE_DEFAULT_TX_PTHRESH      32
-#define IXGBE_DEFAULT_TX_HTHRESH      0
-#define IXGBE_DEFAULT_TX_WTHRESH      0
-#define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
-
-/* Bit shift and mask */
-#define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
-#define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
-#define IXGBE_8_BIT_WIDTH  CHAR_BIT
-#define IXGBE_8_BIT_MASK   UINT8_MAX
-
-#define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
-
-#define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
-
-static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
-static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
-static int  ixgbe_dev_start(struct rte_eth_dev *dev);
-static void ixgbe_dev_stop(struct rte_eth_dev *dev);
-static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
-static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
-static void ixgbe_dev_close(struct rte_eth_dev *dev);
-static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
-static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
-static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
-static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
-static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
-				int wait_to_complete);
-static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
-				struct rte_eth_stats *stats);
-static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
-static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
-					     uint16_t queue_id,
-					     uint8_t stat_idx,
-					     uint8_t is_rx);
-static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
-			       struct rte_eth_dev_info *dev_info);
-static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
-				 struct rte_eth_dev_info *dev_info);
-static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
-
-static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
-		uint16_t vlan_id, int on);
-static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
-static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
-		uint16_t queue, bool on);
-static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
-		int on);
-static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
-static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
-static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
-static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
-static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
-
-static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
-static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
-static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
-			       struct rte_eth_fc_conf *fc_conf);
-static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
-			       struct rte_eth_fc_conf *fc_conf);
-static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
-		struct rte_eth_pfc_conf *pfc_conf);
-static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
-			struct rte_eth_rss_reta_entry64 *reta_conf,
-			uint16_t reta_size);
-static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
-			struct rte_eth_rss_reta_entry64 *reta_conf,
-			uint16_t reta_size);
-static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
-static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
-static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
-static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
-static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
-		void *param);
-static void ixgbe_dev_interrupt_delayed_handler(void *param);
-static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
-		uint32_t index, uint32_t pool);
-static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
-static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
-
-/* For Virtual Function support */
-static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
-static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
-static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
-static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
-static void ixgbevf_dev_close(struct rte_eth_dev *dev);
-static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
-static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
-		struct rte_eth_stats *stats);
-static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
-static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
-		uint16_t vlan_id, int on);
-static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
-		uint16_t queue, int on);
-static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
-static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
-
-/* For Eth VMDQ APIs support */
-static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
-		ether_addr* mac_addr,uint8_t on);
-static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
-static int  ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev,  uint16_t pool,
-		uint16_t rx_mask, uint8_t on);
-static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
-static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
-static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
-		uint64_t pool_mask,uint8_t vlan_on);
-static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
-		struct rte_eth_vmdq_mirror_conf *mirror_conf,
-		uint8_t rule_id, uint8_t on);
-static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
-		uint8_t	rule_id);
-
-static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
-		uint16_t queue_idx, uint16_t tx_rate);
-static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
-		uint16_t tx_rate, uint64_t q_msk);
-
-static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
-				 struct ether_addr *mac_addr,
-				 uint32_t index, uint32_t pool);
-static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
-static int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
-			struct rte_eth_syn_filter *filter,
-			bool add);
-static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
-			struct rte_eth_syn_filter *filter);
-static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
-			enum rte_filter_op filter_op,
-			void *arg);
-static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
-			struct ixgbe_5tuple_filter *filter);
-static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
-			struct ixgbe_5tuple_filter *filter);
-static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
-			struct rte_eth_ntuple_filter *filter,
-			bool add);
-static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
-				enum rte_filter_op filter_op,
-				void *arg);
-static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
-			struct rte_eth_ntuple_filter *filter);
-static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
-			struct rte_eth_ethertype_filter *filter,
-			bool add);
-static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
-				enum rte_filter_op filter_op,
-				void *arg);
-static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
-			struct rte_eth_ethertype_filter *filter);
-static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
-		     enum rte_filter_type filter_type,
-		     enum rte_filter_op filter_op,
-		     void *arg);
-static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
-
-/*
- * Define VF Stats MACRO for Non "cleared on read" register
- */
-#define UPDATE_VF_STAT(reg, last, cur)	                        \
-{                                                               \
-	u32 latest = IXGBE_READ_REG(hw, reg);                   \
-	cur += latest - last;                                   \
-	last = latest;                                          \
-}
-
-#define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
-{                                                                \
-	u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
-	u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
-	u64 latest = ((new_msb << 32) | new_lsb);                \
-	cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
-	last = latest;                                           \
-}
-
-#define IXGBE_SET_HWSTRIP(h, q) do{\
-		uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
-		uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
-		(h)->bitmap[idx] |= 1 << bit;\
-	}while(0)
-
-#define IXGBE_CLEAR_HWSTRIP(h, q) do{\
-		uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
-		uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
-		(h)->bitmap[idx] &= ~(1 << bit);\
-	}while(0)
-
-#define IXGBE_GET_HWSTRIP(h, q, r) do{\
-		uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
-		uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
-		(r) = (h)->bitmap[idx] >> bit & 1;\
-	}while(0)
-
-/*
- * The set of PCI devices this driver supports
- */
-static const struct rte_pci_id pci_id_ixgbe_map[] = {
-
-#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#include "rte_pci_dev_ids.h"
-
-{ .vendor_id = 0, /* sentinel */ },
-};
-
-
-/*
- * The set of PCI devices this driver supports (for 82599 VF)
- */
-static const struct rte_pci_id pci_id_ixgbevf_map[] = {
-
-#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#include "rte_pci_dev_ids.h"
-{ .vendor_id = 0, /* sentinel */ },
-
-};
-
-static const struct eth_dev_ops ixgbe_eth_dev_ops = {
-	.dev_configure        = ixgbe_dev_configure,
-	.dev_start            = ixgbe_dev_start,
-	.dev_stop             = ixgbe_dev_stop,
-	.dev_set_link_up    = ixgbe_dev_set_link_up,
-	.dev_set_link_down  = ixgbe_dev_set_link_down,
-	.dev_close            = ixgbe_dev_close,
-	.promiscuous_enable   = ixgbe_dev_promiscuous_enable,
-	.promiscuous_disable  = ixgbe_dev_promiscuous_disable,
-	.allmulticast_enable  = ixgbe_dev_allmulticast_enable,
-	.allmulticast_disable = ixgbe_dev_allmulticast_disable,
-	.link_update          = ixgbe_dev_link_update,
-	.stats_get            = ixgbe_dev_stats_get,
-	.stats_reset          = ixgbe_dev_stats_reset,
-	.queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
-	.dev_infos_get        = ixgbe_dev_info_get,
-	.mtu_set              = ixgbe_dev_mtu_set,
-	.vlan_filter_set      = ixgbe_vlan_filter_set,
-	.vlan_tpid_set        = ixgbe_vlan_tpid_set,
-	.vlan_offload_set     = ixgbe_vlan_offload_set,
-	.vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
-	.rx_queue_start	      = ixgbe_dev_rx_queue_start,
-	.rx_queue_stop        = ixgbe_dev_rx_queue_stop,
-	.tx_queue_start	      = ixgbe_dev_tx_queue_start,
-	.tx_queue_stop        = ixgbe_dev_tx_queue_stop,
-	.rx_queue_setup       = ixgbe_dev_rx_queue_setup,
-	.rx_queue_release     = ixgbe_dev_rx_queue_release,
-	.rx_queue_count       = ixgbe_dev_rx_queue_count,
-	.rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
-	.tx_queue_setup       = ixgbe_dev_tx_queue_setup,
-	.tx_queue_release     = ixgbe_dev_tx_queue_release,
-	.dev_led_on           = ixgbe_dev_led_on,
-	.dev_led_off          = ixgbe_dev_led_off,
-	.flow_ctrl_get        = ixgbe_flow_ctrl_get,
-	.flow_ctrl_set        = ixgbe_flow_ctrl_set,
-	.priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
-	.mac_addr_add         = ixgbe_add_rar,
-	.mac_addr_remove      = ixgbe_remove_rar,
-	.uc_hash_table_set    = ixgbe_uc_hash_table_set,
-	.uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
-	.mirror_rule_set      = ixgbe_mirror_rule_set,
-	.mirror_rule_reset    = ixgbe_mirror_rule_reset,
-	.set_vf_rx_mode       = ixgbe_set_pool_rx_mode,
-	.set_vf_rx            = ixgbe_set_pool_rx,
-	.set_vf_tx            = ixgbe_set_pool_tx,
-	.set_vf_vlan_filter   = ixgbe_set_pool_vlan_filter,
-	.set_queue_rate_limit = ixgbe_set_queue_rate_limit,
-	.set_vf_rate_limit    = ixgbe_set_vf_rate_limit,
-	.reta_update          = ixgbe_dev_rss_reta_update,
-	.reta_query           = ixgbe_dev_rss_reta_query,
-#ifdef RTE_NIC_BYPASS
-	.bypass_init          = ixgbe_bypass_init,
-	.bypass_state_set     = ixgbe_bypass_state_store,
-	.bypass_state_show    = ixgbe_bypass_state_show,
-	.bypass_event_set     = ixgbe_bypass_event_store,
-	.bypass_event_show    = ixgbe_bypass_event_show,
-	.bypass_wd_timeout_set  = ixgbe_bypass_wd_timeout_store,
-	.bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show,
-	.bypass_ver_show      = ixgbe_bypass_ver_show,
-	.bypass_wd_reset      = ixgbe_bypass_wd_reset,
-#endif /* RTE_NIC_BYPASS */
-	.rss_hash_update      = ixgbe_dev_rss_hash_update,
-	.rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
-	.filter_ctrl          = ixgbe_dev_filter_ctrl,
-};
-
-/*
- * dev_ops for virtual function, bare necessities for basic vf
- * operation have been implemented
- */
-static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
-	.dev_configure        = ixgbevf_dev_configure,
-	.dev_start            = ixgbevf_dev_start,
-	.dev_stop             = ixgbevf_dev_stop,
-	.link_update          = ixgbe_dev_link_update,
-	.stats_get            = ixgbevf_dev_stats_get,
-	.stats_reset          = ixgbevf_dev_stats_reset,
-	.dev_close            = ixgbevf_dev_close,
-	.dev_infos_get        = ixgbevf_dev_info_get,
-	.mtu_set              = ixgbevf_dev_set_mtu,
-	.vlan_filter_set      = ixgbevf_vlan_filter_set,
-	.vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
-	.vlan_offload_set     = ixgbevf_vlan_offload_set,
-	.rx_queue_setup       = ixgbe_dev_rx_queue_setup,
-	.rx_queue_release     = ixgbe_dev_rx_queue_release,
-	.tx_queue_setup       = ixgbe_dev_tx_queue_setup,
-	.tx_queue_release     = ixgbe_dev_tx_queue_release,
-	.mac_addr_add         = ixgbevf_add_mac_addr,
-	.mac_addr_remove      = ixgbevf_remove_mac_addr,
-};
-
-/**
- * Atomically reads the link status information from global
- * structure rte_eth_dev.
- *
- * @param dev
- *   - Pointer to the structure rte_eth_dev to read from.
- *   - Pointer to the buffer to be saved with the link status.
- *
- * @return
- *   - On success, zero.
- *   - On failure, negative value.
- */
-static inline int
-rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
-				struct rte_eth_link *link)
-{
-	struct rte_eth_link *dst = link;
-	struct rte_eth_link *src = &(dev->data->dev_link);
-
-	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
-					*(uint64_t *)src) == 0)
-		return -1;
-
-	return 0;
-}
-
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- *   - Pointer to the structure rte_eth_dev to read from.
- *   - Pointer to the buffer to be saved with the link status.
- *
- * @return
- *   - On success, zero.
- *   - On failure, negative value.
- */
-static inline int
-rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
-				struct rte_eth_link *link)
-{
-	struct rte_eth_link *dst = &(dev->data->dev_link);
-	struct rte_eth_link *src = link;
-
-	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
-					*(uint64_t *)src) == 0)
-		return -1;
-
-	return 0;
-}
-
-/*
- * This function is the same as ixgbe_is_sfp() in ixgbe/ixgbe.h.
- */
-static inline int
-ixgbe_is_sfp(struct ixgbe_hw *hw)
-{
-	switch (hw->phy.type) {
-	case ixgbe_phy_sfp_avago:
-	case ixgbe_phy_sfp_ftl:
-	case ixgbe_phy_sfp_intel:
-	case ixgbe_phy_sfp_unknown:
-	case ixgbe_phy_sfp_passive_tyco:
-	case ixgbe_phy_sfp_passive_unknown:
-		return 1;
-	default:
-		return 0;
-	}
-}
-
-static inline int32_t
-ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
-{
-	uint32_t ctrl_ext;
-	int32_t status;
-
-	status = ixgbe_reset_hw(hw);
-
-	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
-	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
-	ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
-	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
-	IXGBE_WRITE_FLUSH(hw);
-
-	return status;
-}
-
-static inline void
-ixgbe_enable_intr(struct rte_eth_dev *dev)
-{
-	struct ixgbe_interrupt *intr =
-		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
-	IXGBE_WRITE_FLUSH(hw);
-}
-
-/*
- * This function is based on ixgbe_disable_intr() in ixgbe/ixgbe.h.
- */
-static void
-ixgbe_disable_intr(struct ixgbe_hw *hw)
-{
-	PMD_INIT_FUNC_TRACE();
-
-	if (hw->mac.type == ixgbe_mac_82598EB) {
-		IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
-	} else {
-		IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
-		IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
-		IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
-	}
-	IXGBE_WRITE_FLUSH(hw);
-}
-
-/*
- * This function resets queue statistics mapping registers.
- * From Niantic datasheet, Initialization of Statistics section:
- * "...if software requires the queue counters, the RQSMR and TQSM registers
- * must be re-programmed following a device reset.
- */
-static void
-ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
-{
-	uint32_t i;
-
-	for(i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
-		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
-	}
-}
-
-
-static int
-ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
-				  uint16_t queue_id,
-				  uint8_t stat_idx,
-				  uint8_t is_rx)
-{
-#define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
-#define NB_QMAP_FIELDS_PER_QSM_REG 4
-#define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
-
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
-	struct ixgbe_stat_mapping_registers *stat_mappings =
-		IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
-	uint32_t qsmr_mask = 0;
-	uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
-	uint32_t q_map;
-	uint8_t n, offset;
-
-	if ((hw->mac.type != ixgbe_mac_82599EB) &&
-		(hw->mac.type != ixgbe_mac_X540) &&
-		(hw->mac.type != ixgbe_mac_X550) &&
-		(hw->mac.type != ixgbe_mac_X550EM_x))
-		return -ENOSYS;
-
-	PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d",
-		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
-		     queue_id, stat_idx);
-
-	n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
-	if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
-		PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
-		return -EIO;
-	}
-	offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
-
-	/* Now clear any previous stat_idx set */
-	clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
-	if (!is_rx)
-		stat_mappings->tqsm[n] &= ~clearing_mask;
-	else
-		stat_mappings->rqsmr[n] &= ~clearing_mask;
-
-	q_map = (uint32_t)stat_idx;
-	q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
-	qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
-	if (!is_rx)
-		stat_mappings->tqsm[n] |= qsmr_mask;
-	else
-		stat_mappings->rqsmr[n] |= qsmr_mask;
-
-	PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d",
-		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
-		     queue_id, stat_idx);
-	PMD_INIT_LOG(INFO, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
-		     is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
-
-	/* Now write the mapping in the appropriate register */
-	if (is_rx) {
-		PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d",
-			     stat_mappings->rqsmr[n], n);
-		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
-	}
-	else {
-		PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d",
-			     stat_mappings->tqsm[n], n);
-		IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
-	}
-	return 0;
-}
-
-static void
-ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
-{
-	struct ixgbe_stat_mapping_registers *stat_mappings =
-		IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	int i;
-
-	/* write whatever was in stat mapping table to the NIC */
-	for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
-		/* rx */
-		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
-
-		/* tx */
-		IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
-	}
-}
-
-static void
-ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
-{
-	uint8_t i;
-	struct ixgbe_dcb_tc_config *tc;
-	uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
-
-	dcb_config->num_tcs.pg_tcs = dcb_max_tc;
-	dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
-	for (i = 0; i < dcb_max_tc; i++) {
-		tc = &dcb_config->tc_config[i];
-		tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
-		tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
-				 (uint8_t)(100/dcb_max_tc + (i & 1));
-		tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
-		tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
-				 (uint8_t)(100/dcb_max_tc + (i & 1));
-		tc->pfc = ixgbe_dcb_pfc_disabled;
-	}
-
-	/* Initialize default user to priority mapping, UPx->TC0 */
-	tc = &dcb_config->tc_config[0];
-	tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
-	tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
-	for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) {
-		dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
-		dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
-	}
-	dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
-	dcb_config->pfc_mode_enable = false;
-	dcb_config->vt_mode = true;
-	dcb_config->round_robin_enable = false;
-	/* support all DCB capabilities in 82599 */
-	dcb_config->support.capabilities = 0xFF;
-
-	/*we only support 4 Tcs for X540, X550 */
-	if (hw->mac.type == ixgbe_mac_X540 ||
-		hw->mac.type == ixgbe_mac_X550 ||
-		hw->mac.type == ixgbe_mac_X550EM_x) {
-		dcb_config->num_tcs.pg_tcs = 4;
-		dcb_config->num_tcs.pfc_tcs = 4;
-	}
-}
-
-/*
- * Ensure that all locks are released before first NVM or PHY access
- */
-static void
-ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
-{
-	uint16_t mask;
-
-	/*
-	 * Phy lock should not fail in this early stage. If this is the case,
-	 * it is due to an improper exit of the application.
-	 * So force the release of the faulty lock. Release of common lock
-	 * is done automatically by swfw_sync function.
-	 */
-	mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
-	if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
-		PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
-	}
-	ixgbe_release_swfw_semaphore(hw, mask);
-
-	/*
-	 * These ones are more tricky since they are common to all ports; but
-	 * swfw_sync retries last long enough (1s) to be almost sure that if
-	 * lock can not be taken it is due to an improper lock of the
-	 * semaphore.
-	 */
-	mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
-	if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
-		PMD_DRV_LOG(DEBUG, "SWFW common locks released");
-	}
-	ixgbe_release_swfw_semaphore(hw, mask);
-}
-
-/*
- * This function is based on code in ixgbe_attach() in ixgbe/ixgbe.c.
- * It returns 0 on success.
- */
-static int
-eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
-{
-	struct rte_pci_device *pci_dev;
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
-	struct ixgbe_vfta * shadow_vfta =
-		IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
-	struct ixgbe_hwstrip *hwstrip =
-		IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
-	struct ixgbe_dcb_config *dcb_config =
-		IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
-	struct ixgbe_filter_info *filter_info =
-		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
-	uint32_t ctrl_ext;
-	uint16_t csum;
-	int diag, i;
-
-	PMD_INIT_FUNC_TRACE();
-
-	eth_dev->dev_ops = &ixgbe_eth_dev_ops;
-	eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
-	eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
-
-	/*
-	 * For secondary processes, we don't initialise any further as primary
-	 * has already done this work. Only check we don't need a different
-	 * RX and TX function.
-	 */
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY){
-		struct ixgbe_tx_queue *txq;
-		/* TX queue function in primary, set by last queue initialized
-		 * Tx queue may not initialized by primary process */
-		if (eth_dev->data->tx_queues) {
-			txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
-			ixgbe_set_tx_function(eth_dev, txq);
-		} else {
-			/* Use default TX function if we get here */
-			PMD_INIT_LOG(INFO, "No TX queues configured yet. "
-			                   "Using default TX function.");
-		}
-
-		ixgbe_set_rx_function(eth_dev);
-
-		return 0;
-	}
-	pci_dev = eth_dev->pci_dev;
-
-	/* Vendor and Device ID need to be set before init of shared code */
-	hw->device_id = pci_dev->id.device_id;
-	hw->vendor_id = pci_dev->id.vendor_id;
-	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
-	hw->allow_unsupported_sfp = 1;
-
-	/* Initialize the shared code (base driver) */
-#ifdef RTE_NIC_BYPASS
-	diag = ixgbe_bypass_init_shared_code(hw);
-#else
-	diag = ixgbe_init_shared_code(hw);
-#endif /* RTE_NIC_BYPASS */
-
-	if (diag != IXGBE_SUCCESS) {
-		PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
-		return -EIO;
-	}
-
-	/* pick up the PCI bus settings for reporting later */
-	ixgbe_get_bus_info(hw);
-
-	/* Unlock any pending hardware semaphore */
-	ixgbe_swfw_lock_reset(hw);
-
-	/* Initialize DCB configuration*/
-	memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
-	ixgbe_dcb_init(hw,dcb_config);
-	/* Get Hardware Flow Control setting */
-	hw->fc.requested_mode = ixgbe_fc_full;
-	hw->fc.current_mode = ixgbe_fc_full;
-	hw->fc.pause_time = IXGBE_FC_PAUSE;
-	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-		hw->fc.low_water[i] = IXGBE_FC_LO;
-		hw->fc.high_water[i] = IXGBE_FC_HI;
-	}
-	hw->fc.send_xon = 1;
-
-	/* Make sure we have a good EEPROM before we read from it */
-	diag = ixgbe_validate_eeprom_checksum(hw, &csum);
-	if (diag != IXGBE_SUCCESS) {
-		PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
-		return -EIO;
-	}
-
-#ifdef RTE_NIC_BYPASS
-	diag = ixgbe_bypass_init_hw(hw);
-#else
-	diag = ixgbe_init_hw(hw);
-#endif /* RTE_NIC_BYPASS */
-
-	/*
-	 * Devices with copper phys will fail to initialise if ixgbe_init_hw()
-	 * is called too soon after the kernel driver unbinding/binding occurs.
-	 * The failure occurs in ixgbe_identify_phy_generic() for all devices,
-	 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
-	 * also called. See ixgbe_identify_phy_82599(). The reason for the
-	 * failure is not known, and only occuts when virtualisation features
-	 * are disabled in the bios. A delay of 100ms  was found to be enough by
-	 * trial-and-error, and is doubled to be safe.
-	 */
-	if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
-		rte_delay_ms(200);
-		diag = ixgbe_init_hw(hw);
-	}
-
-	if (diag == IXGBE_ERR_EEPROM_VERSION) {
-		PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
-		    "LOM.  Please be aware there may be issues associated "
-		    "with your hardware.");
-		PMD_INIT_LOG(ERR, "If you are experiencing problems "
-		    "please contact your Intel or hardware representative "
-		    "who provided you with this hardware.");
-	} else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
-		PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
-	if (diag) {
-		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
-		return -EIO;
-	}
-
-	/* disable interrupt */
-	ixgbe_disable_intr(hw);
-
-	/* reset mappings for queue statistics hw counters*/
-	ixgbe_reset_qstat_mappings(hw);
-
-	/* Allocate memory for storing MAC addresses */
-	eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
-			hw->mac.num_rar_entries, 0);
-	if (eth_dev->data->mac_addrs == NULL) {
-		PMD_INIT_LOG(ERR,
-			"Failed to allocate %u bytes needed to store "
-			"MAC addresses",
-			ETHER_ADDR_LEN * hw->mac.num_rar_entries);
-		return -ENOMEM;
-	}
-	/* Copy the permanent MAC address */
-	ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
-			&eth_dev->data->mac_addrs[0]);
-
-	/* Allocate memory for storing hash filter MAC addresses */
-	eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
-			IXGBE_VMDQ_NUM_UC_MAC, 0);
-	if (eth_dev->data->hash_mac_addrs == NULL) {
-		PMD_INIT_LOG(ERR,
-			"Failed to allocate %d bytes needed to store MAC addresses",
-			ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
-		return -ENOMEM;
-	}
-
-	/* initialize the vfta */
-	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
-
-	/* initialize the hw strip bitmap*/
-	memset(hwstrip, 0, sizeof(*hwstrip));
-
-	/* initialize PF if max_vfs not zero */
-	ixgbe_pf_host_init(eth_dev);
-
-	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
-	/* let hardware know driver is loaded */
-	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
-	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
-	ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
-	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
-	IXGBE_WRITE_FLUSH(hw);
-
-	if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
-		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
-			     (int) hw->mac.type, (int) hw->phy.type,
-			     (int) hw->phy.sfp_type);
-	else
-		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
-			     (int) hw->mac.type, (int) hw->phy.type);
-
-	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
-			eth_dev->data->port_id, pci_dev->id.vendor_id,
-			pci_dev->id.device_id);
-
-	rte_intr_callback_register(&(pci_dev->intr_handle),
-		ixgbe_dev_interrupt_handler, (void *)eth_dev);
-
-	/* enable uio intr after callback register */
-	rte_intr_enable(&(pci_dev->intr_handle));
-
-	/* enable support intr */
-	ixgbe_enable_intr(eth_dev);
-
-	/* initialize 5tuple filter list */
-	TAILQ_INIT(&filter_info->fivetuple_list);
-	memset(filter_info->fivetuple_mask, 0,
-		sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
-
-	return 0;
-}
-
-
-/*
- * Negotiate mailbox API version with the PF.
- * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
- * Then we try to negotiate starting with the most recent one.
- * If all negotiation attempts fail, then we will proceed with
- * the default one (ixgbe_mbox_api_10).
- */
-static void
-ixgbevf_negotiate_api(struct ixgbe_hw *hw)
-{
-	int32_t i;
-
-	/* start with highest supported, proceed down */
-	static const enum ixgbe_pfvf_api_rev sup_ver[] = {
-		ixgbe_mbox_api_11,
-		ixgbe_mbox_api_10,
-	};
-
-	for (i = 0;
-			i != RTE_DIM(sup_ver) &&
-			ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
-			i++)
-		;
-}
-
-static void
-generate_random_mac_addr(struct ether_addr *mac_addr)
-{
-	uint64_t random;
-
-	/* Set Organizationally Unique Identifier (OUI) prefix. */
-	mac_addr->addr_bytes[0] = 0x00;
-	mac_addr->addr_bytes[1] = 0x09;
-	mac_addr->addr_bytes[2] = 0xC0;
-	/* Force indication of locally assigned MAC address. */
-	mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
-	/* Generate the last 3 bytes of the MAC address with a random number. */
-	random = rte_rand();
-	memcpy(&mac_addr->addr_bytes[3], &random, 3);
-}
-
-/*
- * Virtual Function device init
- */
-static int
-eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
-{
-	int diag;
-	uint32_t tc, tcs;
-	struct rte_pci_device *pci_dev;
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
-	struct ixgbe_vfta * shadow_vfta =
-		IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
-	struct ixgbe_hwstrip *hwstrip =
-		IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
-	struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
-
-	PMD_INIT_FUNC_TRACE();
-
-	eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
-	eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
-	eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
-
-	/* for secondary processes, we don't initialise any further as primary
-	 * has already done this work. Only check we don't need a different
-	 * RX function */
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY){
-		if (eth_dev->data->scattered_rx)
-			eth_dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
-		return 0;
-	}
-
-	pci_dev = eth_dev->pci_dev;
-
-	hw->device_id = pci_dev->id.device_id;
-	hw->vendor_id = pci_dev->id.vendor_id;
-	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
-
-	/* initialize the vfta */
-	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
-
-	/* initialize the hw strip bitmap*/
-	memset(hwstrip, 0, sizeof(*hwstrip));
-
-	/* Initialize the shared code (base driver) */
-	diag = ixgbe_init_shared_code(hw);
-	if (diag != IXGBE_SUCCESS) {
-		PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
-		return -EIO;
-	}
-
-	/* init_mailbox_params */
-	hw->mbx.ops.init_params(hw);
-
-	/* Disable the interrupts for VF */
-	ixgbevf_intr_disable(hw);
-
-	hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
-	diag = hw->mac.ops.reset_hw(hw);
-
-	/*
-	 * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
-	 * the underlying PF driver has not assigned a MAC address to the VF.
-	 * In this case, assign a random MAC address.
-	 */
-	if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
-		PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
-		return (diag);
-	}
-
-	/* negotiate mailbox API version to use with the PF. */
-	ixgbevf_negotiate_api(hw);
-
-	/* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
-	ixgbevf_get_queues(hw, &tcs, &tc);
-
-	/* Allocate memory for storing MAC addresses */
-	eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
-			hw->mac.num_rar_entries, 0);
-	if (eth_dev->data->mac_addrs == NULL) {
-		PMD_INIT_LOG(ERR,
-			"Failed to allocate %u bytes needed to store "
-			"MAC addresses",
-			ETHER_ADDR_LEN * hw->mac.num_rar_entries);
-		return -ENOMEM;
-	}
-
-	/* Generate a random MAC address, if none was assigned by PF. */
-	if (is_zero_ether_addr(perm_addr)) {
-		generate_random_mac_addr(perm_addr);
-		diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
-		if (diag) {
-			rte_free(eth_dev->data->mac_addrs);
-			eth_dev->data->mac_addrs = NULL;
-			return diag;
-		}
-		PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
-		PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
-			     "%02x:%02x:%02x:%02x:%02x:%02x",
-			     perm_addr->addr_bytes[0],
-			     perm_addr->addr_bytes[1],
-			     perm_addr->addr_bytes[2],
-			     perm_addr->addr_bytes[3],
-			     perm_addr->addr_bytes[4],
-			     perm_addr->addr_bytes[5]);
-	}
-
-	/* Copy the permanent MAC address */
-	ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
-
-	/* reset the hardware with the new settings */
-	diag = hw->mac.ops.start_hw(hw);
-	switch (diag) {
-		case  0:
-			break;
-
-		default:
-			PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
-			return (-EIO);
-	}
-
-	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
-		     eth_dev->data->port_id, pci_dev->id.vendor_id,
-		     pci_dev->id.device_id, "ixgbe_mac_82599_vf");
-
-	return 0;
-}
-
-static struct eth_driver rte_ixgbe_pmd = {
-	{
-		.name = "rte_ixgbe_pmd",
-		.id_table = pci_id_ixgbe_map,
-		.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
-	},
-	.eth_dev_init = eth_ixgbe_dev_init,
-	.dev_private_size = sizeof(struct ixgbe_adapter),
-};
-
-/*
- * virtual function driver struct
- */
-static struct eth_driver rte_ixgbevf_pmd = {
-	{
-		.name = "rte_ixgbevf_pmd",
-		.id_table = pci_id_ixgbevf_map,
-		.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
-	},
-	.eth_dev_init = eth_ixgbevf_dev_init,
-	.dev_private_size = sizeof(struct ixgbe_adapter),
-};
-
-/*
- * Driver initialization routine.
- * Invoked once at EAL init time.
- * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
- */
-static int
-rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
-{
-	PMD_INIT_FUNC_TRACE();
-
-	rte_eth_driver_register(&rte_ixgbe_pmd);
-	return 0;
-}
-
-/*
- * VF Driver initialization routine.
- * Invoked one at EAL init time.
- * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
- */
-static int
-rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
-{
-	PMD_INIT_FUNC_TRACE();
-
-	rte_eth_driver_register(&rte_ixgbevf_pmd);
-	return (0);
-}
-
-static int
-ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_vfta * shadow_vfta =
-		IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
-	uint32_t vfta;
-	uint32_t vid_idx;
-	uint32_t vid_bit;
-
-	vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
-	vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
-	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
-	if (on)
-		vfta |= vid_bit;
-	else
-		vfta &= ~vid_bit;
-	IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
-
-	/* update local VFTA copy */
-	shadow_vfta->vfta[vid_idx] = vfta;
-
-	return 0;
-}
-
-static void
-ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
-{
-	if (on)
-		ixgbe_vlan_hw_strip_enable(dev, queue);
-	else
-		ixgbe_vlan_hw_strip_disable(dev, queue);
-}
-
-static void
-ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	/* Only the high 16-bits is valid */
-	IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
-}
-
-void
-ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t vlnctrl;
-
-	PMD_INIT_FUNC_TRACE();
-
-	/* Filter Table Disable */
-	vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-	vlnctrl &= ~IXGBE_VLNCTRL_VFE;
-
-	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-}
-
-void
-ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_vfta * shadow_vfta =
-		IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
-	uint32_t vlnctrl;
-	uint16_t i;
-
-	PMD_INIT_FUNC_TRACE();
-
-	/* Filter Table Enable */
-	vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-	vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
-	vlnctrl |= IXGBE_VLNCTRL_VFE;
-
-	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-
-	/* write whatever is in local vfta copy */
-	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
-		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
-}
-
-static void
-ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
-{
-	struct ixgbe_hwstrip *hwstrip =
-		IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
-
-	if(queue >= IXGBE_MAX_RX_QUEUE_NUM)
-		return;
-
-	if (on)
-		IXGBE_SET_HWSTRIP(hwstrip, queue);
-	else
-		IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
-}
-
-static void
-ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t ctrl;
-
-	PMD_INIT_FUNC_TRACE();
-
-	if (hw->mac.type == ixgbe_mac_82598EB) {
-		/* No queue level support */
-		PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip");
-		return;
-	}
-	else {
-		/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
-		ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
-		ctrl &= ~IXGBE_RXDCTL_VME;
-		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
-	}
-	/* record those setting for HW strip per queue */
-	ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
-}
-
-static void
-ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t ctrl;
-
-	PMD_INIT_FUNC_TRACE();
-
-	if (hw->mac.type == ixgbe_mac_82598EB) {
-		/* No queue level supported */
-		PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip");
-		return;
-	}
-	else {
-		/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
-		ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
-		ctrl |= IXGBE_RXDCTL_VME;
-		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
-	}
-	/* record those setting for HW strip per queue */
-	ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
-}
-
-void
-ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t ctrl;
-	uint16_t i;
-
-	PMD_INIT_FUNC_TRACE();
-
-	if (hw->mac.type == ixgbe_mac_82598EB) {
-		ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-		ctrl &= ~IXGBE_VLNCTRL_VME;
-		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
-	}
-	else {
-		/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
-		for (i = 0; i < dev->data->nb_rx_queues; i++) {
-			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
-			ctrl &= ~IXGBE_RXDCTL_VME;
-			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
-
-			/* record those setting for HW strip per queue */
-			ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
-		}
-	}
-}
-
-void
-ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t ctrl;
-	uint16_t i;
-
-	PMD_INIT_FUNC_TRACE();
-
-	if (hw->mac.type == ixgbe_mac_82598EB) {
-		ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-		ctrl |= IXGBE_VLNCTRL_VME;
-		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
-	}
-	else {
-		/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
-		for (i = 0; i < dev->data->nb_rx_queues; i++) {
-			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
-			ctrl |= IXGBE_RXDCTL_VME;
-			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
-
-			/* record those setting for HW strip per queue */
-			ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
-		}
-	}
-}
-
-static void
-ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t ctrl;
-
-	PMD_INIT_FUNC_TRACE();
-
-	/* DMATXCTRL: Geric Double VLAN Disable */
-	ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
-	ctrl &= ~IXGBE_DMATXCTL_GDV;
-	IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
-
-	/* CTRL_EXT: Global Double VLAN Disable */
-	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
-	ctrl &= ~IXGBE_EXTENDED_VLAN;
-	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
-
-}
-
-static void
-ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t ctrl;
-
-	PMD_INIT_FUNC_TRACE();
-
-	/* DMATXCTRL: Geric Double VLAN Enable */
-	ctrl  = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
-	ctrl |= IXGBE_DMATXCTL_GDV;
-	IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
-
-	/* CTRL_EXT: Global Double VLAN Enable */
-	ctrl  = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
-	ctrl |= IXGBE_EXTENDED_VLAN;
-	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
-
-	/*
-	 * VET EXT field in the EXVET register = 0x8100 by default
-	 * So no need to change. Same to VT field of DMATXCTL register
-	 */
-}
-
-static void
-ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
-{
-	if(mask & ETH_VLAN_STRIP_MASK){
-		if (dev->data->dev_conf.rxmode.hw_vlan_strip)
-			ixgbe_vlan_hw_strip_enable_all(dev);
-		else
-			ixgbe_vlan_hw_strip_disable_all(dev);
-	}
-
-	if(mask & ETH_VLAN_FILTER_MASK){
-		if (dev->data->dev_conf.rxmode.hw_vlan_filter)
-			ixgbe_vlan_hw_filter_enable(dev);
-		else
-			ixgbe_vlan_hw_filter_disable(dev);
-	}
-
-	if(mask & ETH_VLAN_EXTEND_MASK){
-		if (dev->data->dev_conf.rxmode.hw_vlan_extend)
-			ixgbe_vlan_hw_extend_enable(dev);
-		else
-			ixgbe_vlan_hw_extend_disable(dev);
-	}
-}
-
-static void
-ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
-	uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-	vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
-	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
-}
-
-static int
-ixgbe_dev_configure(struct rte_eth_dev *dev)
-{
-	struct ixgbe_interrupt *intr =
-		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-	struct ixgbe_adapter *adapter =
-		(struct ixgbe_adapter *)dev->data->dev_private;
-
-	PMD_INIT_FUNC_TRACE();
-
-	/* set flag to update link status after init */
-	intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
-
-	/*
-	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
-	 * allocation or vector Rx preconditions we will reset it.
-	 */
-	adapter->rx_bulk_alloc_allowed = true;
-	adapter->rx_vec_allowed = true;
-
-	return 0;
-}
-
-/*
- * Configure device link speed and setup link.
- * It returns 0 on success.
- */
-static int
-ixgbe_dev_start(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_vf_info *vfinfo =
-		*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
-	int err, link_up = 0, negotiate = 0;
-	uint32_t speed = 0;
-	int mask = 0;
-	int status;
-	uint16_t vf, idx;
-
-	PMD_INIT_FUNC_TRACE();
-
-	/* IXGBE devices don't support half duplex */
-	if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
-			(dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
-		PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
-			     dev->data->dev_conf.link_duplex,
-			     dev->data->port_id);
-		return -EINVAL;
-	}
-
-	/* stop adapter */
-	hw->adapter_stopped = FALSE;
-	ixgbe_stop_adapter(hw);
-
-	/* reinitialize adapter
-	 * this calls reset and start */
-	status = ixgbe_pf_reset_hw(hw);
-	if (status != 0)
-		return -1;
-	hw->mac.ops.start_hw(hw);
-	hw->mac.get_link_status = true;
-
-	/* configure PF module if SRIOV enabled */
-	ixgbe_pf_host_configure(dev);
-
-	/* initialize transmission unit */
-	ixgbe_dev_tx_init(dev);
-
-	/* This can fail when allocating mbufs for descriptor rings */
-	err = ixgbe_dev_rx_init(dev);
-	if (err) {
-		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
-		goto error;
-	}
-
-	err = ixgbe_dev_rxtx_start(dev);
-	if (err < 0) {
-		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
-		goto error;
-	}
-
-	/* Skip link setup if loopback mode is enabled for 82599. */
-	if (hw->mac.type == ixgbe_mac_82599EB &&
-			dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
-		goto skip_link_setup;
-
-	if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
-		err = hw->mac.ops.setup_sfp(hw);
-		if (err)
-			goto error;
-	}
-
-	/* Turn on the laser */
-	ixgbe_enable_tx_laser(hw);
-
-	err = ixgbe_check_link(hw, &speed, &link_up, 0);
-	if (err)
-		goto error;
-	dev->data->dev_link.link_status = link_up;
-
-	err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
-	if (err)
-		goto error;
-
-	switch(dev->data->dev_conf.link_speed) {
-	case ETH_LINK_SPEED_AUTONEG:
-		speed = (hw->mac.type != ixgbe_mac_82598EB) ?
-				IXGBE_LINK_SPEED_82599_AUTONEG :
-				IXGBE_LINK_SPEED_82598_AUTONEG;
-		break;
-	case ETH_LINK_SPEED_100:
-		/*
-		 * Invalid for 82598 but error will be detected by
-		 * ixgbe_setup_link()
-		 */
-		speed = IXGBE_LINK_SPEED_100_FULL;
-		break;
-	case ETH_LINK_SPEED_1000:
-		speed = IXGBE_LINK_SPEED_1GB_FULL;
-		break;
-	case ETH_LINK_SPEED_10000:
-		speed = IXGBE_LINK_SPEED_10GB_FULL;
-		break;
-	default:
-		PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu",
-			     dev->data->dev_conf.link_speed,
-			     dev->data->port_id);
-		goto error;
-	}
-
-	err = ixgbe_setup_link(hw, speed, link_up);
-	if (err)
-		goto error;
-
-skip_link_setup:
-
-	/* check if lsc interrupt is enabled */
-	if (dev->data->dev_conf.intr_conf.lsc != 0)
-		ixgbe_dev_lsc_interrupt_setup(dev);
-
-	/* resume enabled intr since hw reset */
-	ixgbe_enable_intr(dev);
-
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
-		ETH_VLAN_EXTEND_MASK;
-	ixgbe_vlan_offload_set(dev, mask);
-
-	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
-		/* Enable vlan filtering for VMDq */
-		ixgbe_vmdq_vlan_hw_filter_enable(dev);
-	}
-
-	/* Configure DCB hw */
-	ixgbe_configure_dcb(dev);
-
-	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
-		err = ixgbe_fdir_configure(dev);
-		if (err)
-			goto error;
-	}
-
-	/* Restore vf rate limit */
-	if (vfinfo != NULL) {
-		for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
-			for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
-				if (vfinfo[vf].tx_rate[idx] != 0)
-					ixgbe_set_vf_rate_limit(dev, vf,
-						vfinfo[vf].tx_rate[idx],
-						1 << idx);
-	}
-
-	ixgbe_restore_statistics_mapping(dev);
-
-	return (0);
-
-error:
-	PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
-	ixgbe_dev_clear_queues(dev);
-	return -EIO;
-}
-
-/*
- * Stop device: disable rx and tx functions to allow for reconfiguring.
- */
-static void
-ixgbe_dev_stop(struct rte_eth_dev *dev)
-{
-	struct rte_eth_link link;
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_vf_info *vfinfo =
-		*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
-	struct ixgbe_filter_info *filter_info =
-		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
-	struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next;
-	int vf;
-
-	PMD_INIT_FUNC_TRACE();
-
-	/* disable interrupts */
-	ixgbe_disable_intr(hw);
-
-	/* reset the NIC */
-	ixgbe_pf_reset_hw(hw);
-	hw->adapter_stopped = FALSE;
-
-	/* stop adapter */
-	ixgbe_stop_adapter(hw);
-
-	for (vf = 0; vfinfo != NULL &&
-		     vf < dev->pci_dev->max_vfs; vf++)
-		vfinfo[vf].clear_to_send = false;
-
-	/* Turn off the laser */
-	ixgbe_disable_tx_laser(hw);
-
-	ixgbe_dev_clear_queues(dev);
-
-	/* Clear stored conf */
-	dev->data->scattered_rx = 0;
-	dev->data->lro = 0;
-
-	/* Clear recorded link status */
-	memset(&link, 0, sizeof(link));
-	rte_ixgbe_dev_atomic_write_link_status(dev, &link);
-
-	/* Remove all ntuple filters of the device */
-	for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
-	     p_5tuple != NULL; p_5tuple = p_5tuple_next) {
-		p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
-		TAILQ_REMOVE(&filter_info->fivetuple_list,
-			     p_5tuple, entries);
-		rte_free(p_5tuple);
-	}
-	memset(filter_info->fivetuple_mask, 0,
-		sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
-
-}
-
-/*
- * Set device link up: enable tx laser.
- */
-static int
-ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	if (hw->mac.type == ixgbe_mac_82599EB) {
-#ifdef RTE_NIC_BYPASS
-		if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
-			/* Not suported in bypass mode */
-			PMD_INIT_LOG(ERR, "Set link up is not supported "
-				     "by device id 0x%x", hw->device_id);
-			return -ENOTSUP;
-		}
-#endif
-		/* Turn on the laser */
-		ixgbe_enable_tx_laser(hw);
-		return 0;
-	}
-
-	PMD_INIT_LOG(ERR, "Set link up is not supported by device id 0x%x",
-		     hw->device_id);
-	return -ENOTSUP;
-}
-
-/*
- * Set device link down: disable tx laser.
- */
-static int
-ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	if (hw->mac.type == ixgbe_mac_82599EB) {
-#ifdef RTE_NIC_BYPASS
-		if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
-			/* Not suported in bypass mode */
-			PMD_INIT_LOG(ERR, "Set link down is not supported "
-				     "by device id 0x%x", hw->device_id);
-			return -ENOTSUP;
-		}
-#endif
-		/* Turn off the laser */
-		ixgbe_disable_tx_laser(hw);
-		return 0;
-	}
-
-	PMD_INIT_LOG(ERR, "Set link down is not supported by device id 0x%x",
-		     hw->device_id);
-	return -ENOTSUP;
-}
-
-/*
- * Reest and stop device.
- */
-static void
-ixgbe_dev_close(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	PMD_INIT_FUNC_TRACE();
-
-	ixgbe_pf_reset_hw(hw);
-
-	ixgbe_dev_stop(dev);
-	hw->adapter_stopped = 1;
-
-	ixgbe_disable_pcie_master(hw);
-
-	/* reprogram the RAR[0] in case user changed it. */
-	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
-}
-
-/*
- * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
- */
-static void
-ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
-{
-	struct ixgbe_hw *hw =
-			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_hw_stats *hw_stats =
-			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
-	uint32_t bprc, lxon, lxoff, total;
-	uint64_t total_missed_rx, total_qbrc, total_qprc;
-	unsigned i;
-
-	total_missed_rx = 0;
-	total_qbrc = 0;
-	total_qprc = 0;
-
-	hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
-	hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
-	hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
-	hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
-
-	for (i = 0; i < 8; i++) {
-		uint32_t mp;
-		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
-		/* global total per queue */
-		hw_stats->mpc[i] += mp;
-		/* Running comprehensive total for stats display */
-		total_missed_rx += hw_stats->mpc[i];
-		if (hw->mac.type == ixgbe_mac_82598EB)
-			hw_stats->rnbc[i] +=
-			    IXGBE_READ_REG(hw, IXGBE_RNBC(i));
-		hw_stats->pxontxc[i] +=
-		    IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
-		hw_stats->pxonrxc[i] +=
-		    IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
-		hw_stats->pxofftxc[i] +=
-		    IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
-		hw_stats->pxoffrxc[i] +=
-		    IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
-		hw_stats->pxon2offc[i] +=
-		    IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
-	}
-	for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
-		hw_stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
-		hw_stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
-		hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
-		hw_stats->qbrc[i] +=
-		    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
-		hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
-		hw_stats->qbtc[i] +=
-		    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
-		hw_stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
-
-		total_qprc += hw_stats->qprc[i];
-		total_qbrc += hw_stats->qbrc[i];
-	}
-	hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
-	hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
-	hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
-
-	/* Note that gprc counts missed packets */
-	hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
-
-	if (hw->mac.type != ixgbe_mac_82598EB) {
-		hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
-		hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
-		hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
-		hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
-		hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
-		hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
-		hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
-		hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
-	} else {
-		hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
-		hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
-		/* 82598 only has a counter in the high register */
-		hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
-		hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
-		hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
-	}
-
-	/*
-	 * Workaround: mprc hardware is incorrectly counting
-	 * broadcasts, so for now we subtract those.
-	 */
-	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
-	hw_stats->bprc += bprc;
-	hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
-	if (hw->mac.type == ixgbe_mac_82598EB)
-		hw_stats->mprc -= bprc;
-
-	hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
-	hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
-	hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
-	hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
-	hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
-	hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
-
-	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
-	hw_stats->lxontxc += lxon;
-	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
-	hw_stats->lxofftxc += lxoff;
-	total = lxon + lxoff;
-
-	hw_stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
-	hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
-	hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
-	hw_stats->gptc -= total;
-	hw_stats->mptc -= total;
-	hw_stats->ptc64 -= total;
-	hw_stats->gotc -= total * ETHER_MIN_LEN;
-
-	hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
-	hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
-	hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
-	hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
-	hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
-	hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
-	hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
-	hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
-	hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
-	hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
-	hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
-	hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
-	hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
-	hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
-	hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
-	hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
-	hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
-	hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
-	/* Only read FCOE on 82599 */
-	if (hw->mac.type != ixgbe_mac_82598EB) {
-		hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
-		hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
-		hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
-		hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
-		hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
-	}
-
-	if (stats == NULL)
-		return;
-
-	/* Fill out the rte_eth_stats statistics structure */
-	stats->ipackets = total_qprc;
-	stats->ibytes = total_qbrc;
-	stats->opackets = hw_stats->gptc;
-	stats->obytes = hw_stats->gotc;
-	stats->imcasts = hw_stats->mprc;
-
-	for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
-		stats->q_ipackets[i] = hw_stats->qprc[i];
-		stats->q_opackets[i] = hw_stats->qptc[i];
-		stats->q_ibytes[i] = hw_stats->qbrc[i];
-		stats->q_obytes[i] = hw_stats->qbtc[i];
-		stats->q_errors[i] = hw_stats->qprdc[i];
-	}
-
-	/* Rx Errors */
-	stats->ibadcrc  = hw_stats->crcerrs;
-	stats->ibadlen  = hw_stats->rlec + hw_stats->ruc + hw_stats->roc;
-	stats->imissed  = total_missed_rx;
-	stats->ierrors  = stats->ibadcrc +
-	                  stats->ibadlen +
-	                  stats->imissed +
-	                  hw_stats->illerrc + hw_stats->errbc;
-
-	/* Tx Errors */
-	stats->oerrors  = 0;
-
-	/* XON/XOFF pause frames */
-	stats->tx_pause_xon  = hw_stats->lxontxc;
-	stats->rx_pause_xon  = hw_stats->lxonrxc;
-	stats->tx_pause_xoff = hw_stats->lxofftxc;
-	stats->rx_pause_xoff = hw_stats->lxoffrxc;
-
-	/* Flow Director Stats registers */
-	hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
-	hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
-	stats->fdirmatch = hw_stats->fdirmatch;
-	stats->fdirmiss = hw_stats->fdirmiss;
-}
-
-static void
-ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw_stats *stats =
-			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
-
-	/* HW registers are cleared on read */
-	ixgbe_dev_stats_get(dev, NULL);
-
-	/* Reset software totals */
-	memset(stats, 0, sizeof(*stats));
-}
-
-static void
-ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
-			  IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
-
-	/* Good Rx packet, include VF loopback */
-	UPDATE_VF_STAT(IXGBE_VFGPRC,
-	    hw_stats->last_vfgprc, hw_stats->vfgprc);
-
-	/* Good Rx octets, include VF loopback */
-	UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
-	    hw_stats->last_vfgorc, hw_stats->vfgorc);
-
-	/* Good Tx packet, include VF loopback */
-	UPDATE_VF_STAT(IXGBE_VFGPTC,
-	    hw_stats->last_vfgptc, hw_stats->vfgptc);
-
-	/* Good Tx octets, include VF loopback */
-	UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
-	    hw_stats->last_vfgotc, hw_stats->vfgotc);
-
-	/* Rx Multicst Packet */
-	UPDATE_VF_STAT(IXGBE_VFMPRC,
-	    hw_stats->last_vfmprc, hw_stats->vfmprc);
-
-	if (stats == NULL)
-		return;
-
-	stats->ipackets = hw_stats->vfgprc;
-	stats->ibytes = hw_stats->vfgorc;
-	stats->opackets = hw_stats->vfgptc;
-	stats->obytes = hw_stats->vfgotc;
-	stats->imcasts = hw_stats->vfmprc;
-}
-
-static void
-ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
-{
-	struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
-			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
-
-	/* Sync HW register to the last stats */
-	ixgbevf_dev_stats_get(dev, NULL);
-
-	/* reset HW current stats*/
-	hw_stats->vfgprc = 0;
-	hw_stats->vfgorc = 0;
-	hw_stats->vfgptc = 0;
-	hw_stats->vfgotc = 0;
-	hw_stats->vfmprc = 0;
-
-}
-
-static void
-ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
-	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
-	dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
-	dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
-	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
-	dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
-	dev_info->max_vfs = dev->pci_dev->max_vfs;
-	if (hw->mac.type == ixgbe_mac_82598EB)
-		dev_info->max_vmdq_pools = ETH_16_POOLS;
-	else
-		dev_info->max_vmdq_pools = ETH_64_POOLS;
-	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
-	dev_info->rx_offload_capa =
-		DEV_RX_OFFLOAD_VLAN_STRIP |
-		DEV_RX_OFFLOAD_IPV4_CKSUM |
-		DEV_RX_OFFLOAD_UDP_CKSUM  |
-		DEV_RX_OFFLOAD_TCP_CKSUM;
-
-	/*
-	 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
-	 * mode.
-	 */
-	if ((hw->mac.type == ixgbe_mac_82599EB ||
-	     hw->mac.type == ixgbe_mac_X540) &&
-	    !RTE_ETH_DEV_SRIOV(dev).active)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
-
-	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM   |
-		DEV_TX_OFFLOAD_SCTP_CKSUM  |
-		DEV_TX_OFFLOAD_TCP_TSO;
-
-	dev_info->default_rxconf = (struct rte_eth_rxconf) {
-		.rx_thresh = {
-			.pthresh = IXGBE_DEFAULT_RX_PTHRESH,
-			.hthresh = IXGBE_DEFAULT_RX_HTHRESH,
-			.wthresh = IXGBE_DEFAULT_RX_WTHRESH,
-		},
-		.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
-		.rx_drop_en = 0,
-	};
-
-	dev_info->default_txconf = (struct rte_eth_txconf) {
-		.tx_thresh = {
-			.pthresh = IXGBE_DEFAULT_TX_PTHRESH,
-			.hthresh = IXGBE_DEFAULT_TX_HTHRESH,
-			.wthresh = IXGBE_DEFAULT_TX_WTHRESH,
-		},
-		.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
-		.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
-		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
-				ETH_TXQ_FLAGS_NOOFFLOADS,
-	};
-	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
-	dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
-}
-
-static void
-ixgbevf_dev_info_get(struct rte_eth_dev *dev,
-		     struct rte_eth_dev_info *dev_info)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
-	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
-	dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
-	dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */
-	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
-	dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
-	dev_info->max_vfs = dev->pci_dev->max_vfs;
-	if (hw->mac.type == ixgbe_mac_82598EB)
-		dev_info->max_vmdq_pools = ETH_16_POOLS;
-	else
-		dev_info->max_vmdq_pools = ETH_64_POOLS;
-	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
-				DEV_RX_OFFLOAD_IPV4_CKSUM |
-				DEV_RX_OFFLOAD_UDP_CKSUM  |
-				DEV_RX_OFFLOAD_TCP_CKSUM;
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM  |
-				DEV_TX_OFFLOAD_UDP_CKSUM   |
-				DEV_TX_OFFLOAD_TCP_CKSUM   |
-				DEV_TX_OFFLOAD_SCTP_CKSUM;
-
-	dev_info->default_rxconf = (struct rte_eth_rxconf) {
-		.rx_thresh = {
-			.pthresh = IXGBE_DEFAULT_RX_PTHRESH,
-			.hthresh = IXGBE_DEFAULT_RX_HTHRESH,
-			.wthresh = IXGBE_DEFAULT_RX_WTHRESH,
-		},
-		.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
-		.rx_drop_en = 0,
-	};
-
-	dev_info->default_txconf = (struct rte_eth_txconf) {
-		.tx_thresh = {
-			.pthresh = IXGBE_DEFAULT_TX_PTHRESH,
-			.hthresh = IXGBE_DEFAULT_TX_HTHRESH,
-			.wthresh = IXGBE_DEFAULT_TX_WTHRESH,
-		},
-		.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
-		.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
-		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
-				ETH_TXQ_FLAGS_NOOFFLOADS,
-	};
-}
-
-/* return 0 means link status changed, -1 means not changed */
-static int
-ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct rte_eth_link link, old;
-	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
-	int link_up;
-	int diag;
-
-	link.link_status = 0;
-	link.link_speed = 0;
-	link.link_duplex = 0;
-	memset(&old, 0, sizeof(old));
-	rte_ixgbe_dev_atomic_read_link_status(dev, &old);
-
-	/* check if it needs to wait to complete, if lsc interrupt is enabled */
-	if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
-		diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
-	else
-		diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
-	if (diag != 0) {
-		link.link_speed = ETH_LINK_SPEED_100;
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		rte_ixgbe_dev_atomic_write_link_status(dev, &link);
-		if (link.link_status == old.link_status)
-			return -1;
-		return 0;
-	}
-
-	if (link_speed == IXGBE_LINK_SPEED_UNKNOWN &&
-	    !hw->mac.get_link_status) {
-		memcpy(&link, &old, sizeof(link));
-		return -1;
-	}
-
-	if (link_up == 0) {
-		rte_ixgbe_dev_atomic_write_link_status(dev, &link);
-		if (link.link_status == old.link_status)
-			return -1;
-		return 0;
-	}
-	link.link_status = 1;
-	link.link_duplex = ETH_LINK_FULL_DUPLEX;
-
-	switch (link_speed) {
-	default:
-	case IXGBE_LINK_SPEED_UNKNOWN:
-		link.link_duplex = ETH_LINK_HALF_DUPLEX;
-		link.link_speed = ETH_LINK_SPEED_100;
-		break;
-
-	case IXGBE_LINK_SPEED_100_FULL:
-		link.link_speed = ETH_LINK_SPEED_100;
-		break;
-
-	case IXGBE_LINK_SPEED_1GB_FULL:
-		link.link_speed = ETH_LINK_SPEED_1000;
-		break;
-
-	case IXGBE_LINK_SPEED_10GB_FULL:
-		link.link_speed = ETH_LINK_SPEED_10000;
-		break;
-	}
-	rte_ixgbe_dev_atomic_write_link_status(dev, &link);
-
-	if (link.link_status == old.link_status)
-		return -1;
-
-	return 0;
-}
-
-static void
-ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t fctrl;
-
-	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-}
-
-static void
-ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t fctrl;
-
-	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-	fctrl &= (~IXGBE_FCTRL_UPE);
-	if (dev->data->all_multicast == 1)
-		fctrl |= IXGBE_FCTRL_MPE;
-	else
-		fctrl &= (~IXGBE_FCTRL_MPE);
-	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-}
-
-static void
-ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t fctrl;
-
-	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-	fctrl |= IXGBE_FCTRL_MPE;
-	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-}
-
-static void
-ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t fctrl;
-
-	if (dev->data->promiscuous == 1)
-		return; /* must remain in all_multicast mode */
-
-	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-	fctrl &= (~IXGBE_FCTRL_MPE);
-	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-}
-
-/**
- * It clears the interrupt causes and enables the interrupt.
- * It will be called once only during nic initialized.
- *
- * @param dev
- *  Pointer to struct rte_eth_dev.
- *
- * @return
- *  - On success, zero.
- *  - On failure, a negative value.
- */
-static int
-ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
-{
-	struct ixgbe_interrupt *intr =
-		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-
-	ixgbe_dev_link_status_print(dev);
-	intr->mask |= IXGBE_EICR_LSC;
-
-	return 0;
-}
-
-/*
- * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
- *
- * @param dev
- *  Pointer to struct rte_eth_dev.
- *
- * @return
- *  - On success, zero.
- *  - On failure, a negative value.
- */
-static int
-ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
-{
-	uint32_t eicr;
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_interrupt *intr =
-		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-
-	/* clear all cause mask */
-	ixgbe_disable_intr(hw);
-
-	/* read-on-clear nic registers here */
-	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
-	PMD_DRV_LOG(INFO, "eicr %x", eicr);
-
-	intr->flags = 0;
-	if (eicr & IXGBE_EICR_LSC) {
-		/* set flag for async link update */
-		intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
-	}
-
-	if (eicr & IXGBE_EICR_MAILBOX)
-		intr->flags |= IXGBE_FLAG_MAILBOX;
-
-	return 0;
-}
-
-/**
- * It gets and then prints the link status.
- *
- * @param dev
- *  Pointer to struct rte_eth_dev.
- *
- * @return
- *  - On success, zero.
- *  - On failure, a negative value.
- */
-static void
-ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
-{
-	struct rte_eth_link link;
-
-	memset(&link, 0, sizeof(link));
-	rte_ixgbe_dev_atomic_read_link_status(dev, &link);
-	if (link.link_status) {
-		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
-					(int)(dev->data->port_id),
-					(unsigned)link.link_speed,
-			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
-					"full-duplex" : "half-duplex");
-	} else {
-		PMD_INIT_LOG(INFO, " Port %d: Link Down",
-				(int)(dev->data->port_id));
-	}
-	PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
-				dev->pci_dev->addr.domain,
-				dev->pci_dev->addr.bus,
-				dev->pci_dev->addr.devid,
-				dev->pci_dev->addr.function);
-}
-
-/*
- * It executes link_update after knowing an interrupt occurred.
- *
- * @param dev
- *  Pointer to struct rte_eth_dev.
- *
- * @return
- *  - On success, zero.
- *  - On failure, a negative value.
- */
-static int
-ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
-{
-	struct ixgbe_interrupt *intr =
-		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-	int64_t timeout;
-	struct rte_eth_link link;
-	int intr_enable_delay = false;
-
-	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
-
-	if (intr->flags & IXGBE_FLAG_MAILBOX) {
-		ixgbe_pf_mbx_process(dev);
-		intr->flags &= ~IXGBE_FLAG_MAILBOX;
-	}
-
-	if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
-		/* get the link status before link update, for predicting later */
-		memset(&link, 0, sizeof(link));
-		rte_ixgbe_dev_atomic_read_link_status(dev, &link);
-
-		ixgbe_dev_link_update(dev, 0);
-
-		/* likely to up */
-		if (!link.link_status)
-			/* handle it 1 sec later, wait it being stable */
-			timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
-		/* likely to down */
-		else
-			/* handle it 4 sec later, wait it being stable */
-			timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
-
-		ixgbe_dev_link_status_print(dev);
-
-		intr_enable_delay = true;
-	}
-
-	if (intr_enable_delay) {
-		if (rte_eal_alarm_set(timeout * 1000,
-				      ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
-			PMD_DRV_LOG(ERR, "Error setting alarm");
-	} else {
-		PMD_DRV_LOG(DEBUG, "enable intr immediately");
-		ixgbe_enable_intr(dev);
-		rte_intr_enable(&(dev->pci_dev->intr_handle));
-	}
-
-
-	return 0;
-}
-
-/**
- * Interrupt handler which shall be registered for alarm callback for delayed
- * handling specific interrupt to wait for the stable nic state. As the
- * NIC interrupt state is not stable for ixgbe after link is just down,
- * it needs to wait 4 seconds to get the stable status.
- *
- * @param handle
- *  Pointer to interrupt handle.
- * @param param
- *  The address of parameter (struct rte_eth_dev *) regsitered before.
- *
- * @return
- *  void
- */
-static void
-ixgbe_dev_interrupt_delayed_handler(void *param)
-{
-	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
-	struct ixgbe_interrupt *intr =
-		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t eicr;
-
-	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
-	if (eicr & IXGBE_EICR_MAILBOX)
-		ixgbe_pf_mbx_process(dev);
-
-	if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
-		ixgbe_dev_link_update(dev, 0);
-		intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
-		ixgbe_dev_link_status_print(dev);
-		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
-	}
-
-	PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
-	ixgbe_enable_intr(dev);
-	rte_intr_enable(&(dev->pci_dev->intr_handle));
-}
-
-/**
- * Interrupt handler triggered by NIC  for handling
- * specific interrupt.
- *
- * @param handle
- *  Pointer to interrupt handle.
- * @param param
- *  The address of parameter (struct rte_eth_dev *) regsitered before.
- *
- * @return
- *  void
- */
-static void
-ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
-							void *param)
-{
-	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
-	ixgbe_dev_interrupt_get_status(dev);
-	ixgbe_dev_interrupt_action(dev);
-}
-
-static int
-ixgbe_dev_led_on(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
-}
-
-static int
-ixgbe_dev_led_off(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
-}
-
-static int
-ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
-{
-	struct ixgbe_hw *hw;
-	uint32_t mflcn_reg;
-	uint32_t fccfg_reg;
-	int rx_pause;
-	int tx_pause;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	fc_conf->pause_time = hw->fc.pause_time;
-	fc_conf->high_water = hw->fc.high_water[0];
-	fc_conf->low_water = hw->fc.low_water[0];
-	fc_conf->send_xon = hw->fc.send_xon;
-	fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
-
-	/*
-	 * Return rx_pause status according to actual setting of
-	 * MFLCN register.
-	 */
-	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
-	if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
-		rx_pause = 1;
-	else
-		rx_pause = 0;
-
-	/*
-	 * Return tx_pause status according to actual setting of
-	 * FCCFG register.
-	 */
-	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
-	if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
-		tx_pause = 1;
-	else
-		tx_pause = 0;
-
-	if (rx_pause && tx_pause)
-		fc_conf->mode = RTE_FC_FULL;
-	else if (rx_pause)
-		fc_conf->mode = RTE_FC_RX_PAUSE;
-	else if (tx_pause)
-		fc_conf->mode = RTE_FC_TX_PAUSE;
-	else
-		fc_conf->mode = RTE_FC_NONE;
-
-	return 0;
-}
-
-static int
-ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
-{
-	struct ixgbe_hw *hw;
-	int err;
-	uint32_t rx_buf_size;
-	uint32_t max_high_water;
-	uint32_t mflcn;
-	enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
-		ixgbe_fc_none,
-		ixgbe_fc_rx_pause,
-		ixgbe_fc_tx_pause,
-		ixgbe_fc_full
-	};
-
-	PMD_INIT_FUNC_TRACE();
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	if (fc_conf->autoneg != !hw->fc.disable_fc_autoneg)
-		return -ENOTSUP;
-	rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
-	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
-
-	/*
-	 * At least reserve one Ethernet frame for watermark
-	 * high_water/low_water in kilo bytes for ixgbe
-	 */
-	max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
-	if ((fc_conf->high_water > max_high_water) ||
-		(fc_conf->high_water < fc_conf->low_water)) {
-		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
-		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
-		return (-EINVAL);
-	}
-
-	hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
-	hw->fc.pause_time     = fc_conf->pause_time;
-	hw->fc.high_water[0]  = fc_conf->high_water;
-	hw->fc.low_water[0]   = fc_conf->low_water;
-	hw->fc.send_xon       = fc_conf->send_xon;
-
-	err = ixgbe_fc_enable(hw);
-
-	/* Not negotiated is not an error case */
-	if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
-
-		/* check if we want to forward MAC frames - driver doesn't have native
-		 * capability to do that, so we'll write the registers ourselves */
-
-		mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
-
-		/* set or clear MFLCN.PMCF bit depending on configuration */
-		if (fc_conf->mac_ctrl_frame_fwd != 0)
-			mflcn |= IXGBE_MFLCN_PMCF;
-		else
-			mflcn &= ~IXGBE_MFLCN_PMCF;
-
-		IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
-		IXGBE_WRITE_FLUSH(hw);
-
-		return 0;
-	}
-
-	PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
-	return -EIO;
-}
-
-/**
- *  ixgbe_pfc_enable_generic - Enable flow control
- *  @hw: pointer to hardware structure
- *  @tc_num: traffic class number
- *  Enable flow control according to the current settings.
- */
-static int
-ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
-{
-	int ret_val = 0;
-	uint32_t mflcn_reg, fccfg_reg;
-	uint32_t reg;
-	uint32_t fcrtl, fcrth;
-	uint8_t i;
-	uint8_t nb_rx_en;
-
-	/* Validate the water mark configuration */
-	if (!hw->fc.pause_time) {
-		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-		goto out;
-	}
-
-	/* Low water mark of zero causes XOFF floods */
-	if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
-		 /* High/Low water can not be 0 */
-		if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
-			PMD_INIT_LOG(ERR, "Invalid water mark configuration");
-			ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-			goto out;
-		}
-
-		if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
-			PMD_INIT_LOG(ERR, "Invalid water mark configuration");
-			ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-			goto out;
-		}
-	}
-	/* Negotiate the fc mode to use */
-	ixgbe_fc_autoneg(hw);
-
-	/* Disable any previous flow control settings */
-	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
-	mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
-
-	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
-	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
-
-	switch (hw->fc.current_mode) {
-	case ixgbe_fc_none:
-		/*
-		 * If the count of enabled RX Priority Flow control >1,
-		 * and the TX pause can not be disabled
-		 */
-		nb_rx_en = 0;
-		for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-			reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
-			if (reg & IXGBE_FCRTH_FCEN)
-				nb_rx_en++;
-		}
-		if (nb_rx_en > 1)
-			fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
-		break;
-	case ixgbe_fc_rx_pause:
-		/*
-		 * Rx Flow control is enabled and Tx Flow control is
-		 * disabled by software override. Since there really
-		 * isn't a way to advertise that we are capable of RX
-		 * Pause ONLY, we will advertise that we support both
-		 * symmetric and asymmetric Rx PAUSE.  Later, we will
-		 * disable the adapter's ability to send PAUSE frames.
-		 */
-		mflcn_reg |= IXGBE_MFLCN_RPFCE;
-		/*
-		 * If the count of enabled RX Priority Flow control >1,
-		 * and the TX pause can not be disabled
-		 */
-		nb_rx_en = 0;
-		for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-			reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
-			if (reg & IXGBE_FCRTH_FCEN)
-				nb_rx_en++;
-		}
-		if (nb_rx_en > 1)
-			fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
-		break;
-	case ixgbe_fc_tx_pause:
-		/*
-		 * Tx Flow control is enabled, and Rx Flow control is
-		 * disabled by software override.
-		 */
-		fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
-		break;
-	case ixgbe_fc_full:
-		/* Flow control (both Rx and Tx) is enabled by SW override. */
-		mflcn_reg |= IXGBE_MFLCN_RPFCE;
-		fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
-		break;
-	default:
-		PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
-		ret_val = IXGBE_ERR_CONFIG;
-		goto out;
-		break;
-	}
-
-	/* Set 802.3x based flow control settings. */
-	mflcn_reg |= IXGBE_MFLCN_DPF;
-	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
-	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
-
-	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
-	if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
-		hw->fc.high_water[tc_num]) {
-		fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
-		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
-		fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
-	} else {
-		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
-		/*
-		 * In order to prevent Tx hangs when the internal Tx
-		 * switch is enabled we must set the high water mark
-		 * to the maximum FCRTH value.  This allows the Tx
-		 * switch to function even under heavy Rx workloads.
-		 */
-		fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
-	}
-	IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
-
-	/* Configure pause time (2 TCs per register) */
-	reg = hw->fc.pause_time * 0x00010001;
-	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
-		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
-
-	/* Configure flow control refresh threshold value */
-	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
-
-out:
-	return ret_val;
-}
-
-static int
-ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
-
-	if(hw->mac.type != ixgbe_mac_82598EB) {
-		ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
-	}
-	return ret_val;
-}
-
-static int
-ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
-{
-	int err;
-	uint32_t rx_buf_size;
-	uint32_t max_high_water;
-	uint8_t tc_num;
-	uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
-	struct ixgbe_hw *hw =
-                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_dcb_config *dcb_config =
-                IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
-
-	enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
-		ixgbe_fc_none,
-		ixgbe_fc_rx_pause,
-		ixgbe_fc_tx_pause,
-		ixgbe_fc_full
-	};
-
-	PMD_INIT_FUNC_TRACE();
-
-	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
-	tc_num = map[pfc_conf->priority];
-	rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
-	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
-	/*
-	 * At least reserve one Ethernet frame for watermark
-	 * high_water/low_water in kilo bytes for ixgbe
-	 */
-	max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
-	if ((pfc_conf->fc.high_water > max_high_water) ||
-	    (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
-		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
-		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
-		return (-EINVAL);
-	}
-
-	hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
-	hw->fc.pause_time = pfc_conf->fc.pause_time;
-	hw->fc.send_xon = pfc_conf->fc.send_xon;
-	hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
-	hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
-
-	err = ixgbe_dcb_pfc_enable(dev,tc_num);
-
-	/* Not negotiated is not an error case */
-	if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
-		return 0;
-
-	PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
-	return -EIO;
-}
-
-static int
-ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
-			  struct rte_eth_rss_reta_entry64 *reta_conf,
-			  uint16_t reta_size)
-{
-	uint8_t i, j, mask;
-	uint32_t reta, r;
-	uint16_t idx, shift;
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	PMD_INIT_FUNC_TRACE();
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
-		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
-			"(%d) doesn't match the number hardware can supported "
-			"(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
-		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
-						IXGBE_4_BIT_MASK);
-		if (!mask)
-			continue;
-		if (mask == IXGBE_4_BIT_MASK)
-			r = 0;
-		else
-			r = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
-		for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
-			if (mask & (0x1 << j))
-				reta |= reta_conf[idx].reta[shift + j] <<
-							(CHAR_BIT * j);
-			else
-				reta |= r & (IXGBE_8_BIT_MASK <<
-						(CHAR_BIT * j));
-		}
-		IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
-	}
-
-	return 0;
-}
-
-static int
-ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
-			 struct rte_eth_rss_reta_entry64 *reta_conf,
-			 uint16_t reta_size)
-{
-	uint8_t i, j, mask;
-	uint32_t reta;
-	uint16_t idx, shift;
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	PMD_INIT_FUNC_TRACE();
-	if (reta_size != ETH_RSS_RETA_SIZE_128) {
-		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
-			"(%d) doesn't match the number hardware can supported "
-				"(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IXGBE_4_BIT_WIDTH) {
-		idx = i / RTE_RETA_GROUP_SIZE;
-		shift = i % RTE_RETA_GROUP_SIZE;
-		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
-						IXGBE_4_BIT_MASK);
-		if (!mask)
-			continue;
-
-		reta = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
-		for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
-			if (mask & (0x1 << j))
-				reta_conf[idx].reta[shift + j] =
-					((reta >> (CHAR_BIT * j)) &
-						IXGBE_8_BIT_MASK);
-		}
-	}
-
-	return 0;
-}
-
-static void
-ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
-				uint32_t index, uint32_t pool)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t enable_addr = 1;
-
-	ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
-}
-
-static void
-ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	ixgbe_clear_rar(hw, index);
-}
-
-static int
-ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
-{
-	uint32_t hlreg0;
-	uint32_t maxfrs;
-	struct ixgbe_hw *hw;
-	struct rte_eth_dev_info dev_info;
-	uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
-
-	ixgbe_dev_info_get(dev, &dev_info);
-
-	/* check that mtu is within the allowed range */
-	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
-		return -EINVAL;
-
-	/* refuse mtu that requires the support of scattered packets when this
-	 * feature has not been enabled before. */
-	if (!dev->data->scattered_rx &&
-	    (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
-	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
-		return -EINVAL;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-
-	/* switch to jumbo mode if needed */
-	if (frame_size > ETHER_MAX_LEN) {
-		dev->data->dev_conf.rxmode.jumbo_frame = 1;
-		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
-	} else {
-		dev->data->dev_conf.rxmode.jumbo_frame = 0;
-		hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
-	}
-	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
-
-	/* update max frame size */
-	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
-	maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
-	maxfrs &= 0x0000FFFF;
-	maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
-	IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
-
-	return 0;
-}
-
-/*
- * Virtual Function operations
- */
-static void
-ixgbevf_intr_disable(struct ixgbe_hw *hw)
-{
-	PMD_INIT_FUNC_TRACE();
-
-	/* Clear interrupt mask to stop from interrupts being generated */
-	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
-
-	IXGBE_WRITE_FLUSH(hw);
-}
-
-static int
-ixgbevf_dev_configure(struct rte_eth_dev *dev)
-{
-	struct rte_eth_conf* conf = &dev->data->dev_conf;
-
-	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
-		     dev->data->port_id);
-
-	/*
-	 * VF has no ability to enable/disable HW CRC
-	 * Keep the persistent behavior the same as Host PF
-	 */
-#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
-	if (!conf->rxmode.hw_strip_crc) {
-		PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
-		conf->rxmode.hw_strip_crc = 1;
-	}
-#else
-	if (conf->rxmode.hw_strip_crc) {
-		PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
-		conf->rxmode.hw_strip_crc = 0;
-	}
-#endif
-
-	return 0;
-}
-
-static int
-ixgbevf_dev_start(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	int err, mask = 0;
-
-	PMD_INIT_FUNC_TRACE();
-
-	hw->mac.ops.reset_hw(hw);
-	hw->mac.get_link_status = true;
-
-	/* negotiate mailbox API version to use with the PF. */
-	ixgbevf_negotiate_api(hw);
-
-	ixgbevf_dev_tx_init(dev);
-
-	/* This can fail when allocating mbufs for descriptor rings */
-	err = ixgbevf_dev_rx_init(dev);
-	if (err) {
-		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
-		ixgbe_dev_clear_queues(dev);
-		return err;
-	}
-
-	/* Set vfta */
-	ixgbevf_set_vfta_all(dev,1);
-
-	/* Set HW strip */
-	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
-		ETH_VLAN_EXTEND_MASK;
-	ixgbevf_vlan_offload_set(dev, mask);
-
-	ixgbevf_dev_rxtx_start(dev);
-
-	return 0;
-}
-
-static void
-ixgbevf_dev_stop(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	PMD_INIT_FUNC_TRACE();
-
-	hw->adapter_stopped = TRUE;
-	ixgbe_stop_adapter(hw);
-
-	/*
-	  * Clear what we set, but we still keep shadow_vfta to
-	  * restore after device starts
-	  */
-	ixgbevf_set_vfta_all(dev,0);
-
-	/* Clear stored conf */
-	dev->data->scattered_rx = 0;
-
-	ixgbe_dev_clear_queues(dev);
-}
-
-static void
-ixgbevf_dev_close(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	PMD_INIT_FUNC_TRACE();
-
-	ixgbe_reset_hw(hw);
-
-	ixgbevf_dev_stop(dev);
-
-	/* reprogram the RAR[0] in case user changed it. */
-	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
-}
-
-static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_vfta * shadow_vfta =
-		IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
-	int i = 0, j = 0, vfta = 0, mask = 1;
-
-	for (i = 0; i < IXGBE_VFTA_SIZE; i++){
-		vfta = shadow_vfta->vfta[i];
-		if(vfta){
-			mask = 1;
-			for (j = 0; j < 32; j++){
-				if(vfta & mask)
-					ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
-				mask<<=1;
-			}
-		}
-	}
-
-}
-
-static int
-ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_vfta * shadow_vfta =
-		IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
-	uint32_t vid_idx = 0;
-	uint32_t vid_bit = 0;
-	int ret = 0;
-
-	PMD_INIT_FUNC_TRACE();
-
-	/* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
-	ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on);
-	if(ret){
-		PMD_INIT_LOG(ERR, "Unable to set VF vlan");
-		return ret;
-	}
-	vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
-	vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
-
-	/* Save what we set and retore it after device reset */
-	if (on)
-		shadow_vfta->vfta[vid_idx] |= vid_bit;
-	else
-		shadow_vfta->vfta[vid_idx] &= ~vid_bit;
-
-	return 0;
-}
-
-static void
-ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t ctrl;
-
-	PMD_INIT_FUNC_TRACE();
-
-	if(queue >= hw->mac.max_rx_queues)
-		return;
-
-	ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
-	if(on)
-		ctrl |= IXGBE_RXDCTL_VME;
-	else
-		ctrl &= ~IXGBE_RXDCTL_VME;
-	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
-
-	ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on);
-}
-
-static void
-ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint16_t i;
-	int on = 0;
-
-	/* VF function only support hw strip feature, others are not support */
-	if(mask & ETH_VLAN_STRIP_MASK){
-		on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
-
-		for(i=0; i < hw->mac.max_rx_queues; i++)
-			ixgbevf_vlan_strip_queue_set(dev,i,on);
-	}
-}
-
-static int
-ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
-{
-	uint32_t reg_val;
-
-	/* we only need to do this if VMDq is enabled */
-	reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
-	if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
-		PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
-		return (-1);
-	}
-
-	return 0;
-}
-
-static uint32_t
-ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
-{
-	uint32_t vector = 0;
-	switch (hw->mac.mc_filter_type) {
-	case 0:   /* use bits [47:36] of the address */
-		vector = ((uc_addr->addr_bytes[4] >> 4) |
-			(((uint16_t)uc_addr->addr_bytes[5]) << 4));
-		break;
-	case 1:   /* use bits [46:35] of the address */
-		vector = ((uc_addr->addr_bytes[4] >> 3) |
-			(((uint16_t)uc_addr->addr_bytes[5]) << 5));
-		break;
-	case 2:   /* use bits [45:34] of the address */
-		vector = ((uc_addr->addr_bytes[4] >> 2) |
-			(((uint16_t)uc_addr->addr_bytes[5]) << 6));
-		break;
-	case 3:   /* use bits [43:32] of the address */
-		vector = ((uc_addr->addr_bytes[4]) |
-			(((uint16_t)uc_addr->addr_bytes[5]) << 8));
-		break;
-	default:  /* Invalid mc_filter_type */
-		break;
-	}
-
-	/* vector can only be 12-bits or boundary will be exceeded */
-	vector &= 0xFFF;
-	return vector;
-}
-
-static int
-ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
-			       uint8_t on)
-{
-	uint32_t vector;
-	uint32_t uta_idx;
-	uint32_t reg_val;
-	uint32_t uta_shift;
-	uint32_t rc;
-	const uint32_t ixgbe_uta_idx_mask = 0x7F;
-	const uint32_t ixgbe_uta_bit_shift = 5;
-	const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
-	const uint32_t bit1 = 0x1;
-
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_uta_info *uta_info =
-		IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
-
-	/* The UTA table only exists on 82599 hardware and newer */
-	if (hw->mac.type < ixgbe_mac_82599EB)
-		return (-ENOTSUP);
-
-	vector = ixgbe_uta_vector(hw,mac_addr);
-	uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
-	uta_shift = vector & ixgbe_uta_bit_mask;
-
-	rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
-	if(rc == on)
-		return 0;
-
-	reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
-	if (on) {
-		uta_info->uta_in_use++;
-		reg_val |= (bit1 << uta_shift);
-		uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
-	} else {
-		uta_info->uta_in_use--;
-		reg_val &= ~(bit1 << uta_shift);
-		uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
-	}
-
-	IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
-
-	if (uta_info->uta_in_use > 0)
-		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
-				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
-	else
-		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
-
-	return 0;
-}
-
-static int
-ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
-{
-	int i;
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_uta_info *uta_info =
-		IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
-
-	/* The UTA table only exists on 82599 hardware and newer */
-	if (hw->mac.type < ixgbe_mac_82599EB)
-		return (-ENOTSUP);
-
-	if(on) {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
-			uta_info->uta_shadow[i] = ~0;
-			IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
-		}
-	} else {
-		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
-			uta_info->uta_shadow[i] = 0;
-			IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
-		}
-	}
-	return 0;
-
-}
-
-uint32_t
-ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
-{
-	uint32_t new_val = orig_val;
-
-	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
-		new_val |= IXGBE_VMOLR_AUPE;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
-		new_val |= IXGBE_VMOLR_ROMPE;
-	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
-		new_val |= IXGBE_VMOLR_ROPE;
-	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
-		new_val |= IXGBE_VMOLR_BAM;
-	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
-		new_val |= IXGBE_VMOLR_MPE;
-
-	return new_val;
-}
-
-static int
-ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
-			       uint16_t rx_mask, uint8_t on)
-{
-	int val = 0;
-
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
-
-	if (hw->mac.type == ixgbe_mac_82598EB) {
-		PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
-			     " on 82599 hardware and newer");
-		return (-ENOTSUP);
-	}
-	if (ixgbe_vmdq_mode_check(hw) < 0)
-		return (-ENOTSUP);
-
-	val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
-
-	if (on)
-		vmolr |= val;
-	else
-		vmolr &= ~val;
-
-	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
-
-	return 0;
-}
-
-static int
-ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
-{
-	uint32_t reg,addr;
-	uint32_t val;
-	const uint8_t bit1 = 0x1;
-
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	if (ixgbe_vmdq_mode_check(hw) < 0)
-		return (-ENOTSUP);
-
-	addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
-	reg = IXGBE_READ_REG(hw, addr);
-	val = bit1 << pool;
-
-	if (on)
-		reg |= val;
-	else
-		reg &= ~val;
-
-	IXGBE_WRITE_REG(hw, addr,reg);
-
-	return 0;
-}
-
-static int
-ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
-{
-	uint32_t reg,addr;
-	uint32_t val;
-	const uint8_t bit1 = 0x1;
-
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	if (ixgbe_vmdq_mode_check(hw) < 0)
-		return (-ENOTSUP);
-
-	addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
-	reg = IXGBE_READ_REG(hw, addr);
-	val = bit1 << pool;
-
-	if (on)
-		reg |= val;
-	else
-		reg &= ~val;
-
-	IXGBE_WRITE_REG(hw, addr,reg);
-
-	return 0;
-}
-
-static int
-ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
-			uint64_t pool_mask, uint8_t vlan_on)
-{
-	int ret = 0;
-	uint16_t pool_idx;
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	if (ixgbe_vmdq_mode_check(hw) < 0)
-		return (-ENOTSUP);
-	for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
-		if (pool_mask & ((uint64_t)(1ULL << pool_idx)))
-			ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
-			if (ret < 0)
-				return ret;
-	}
-
-	return ret;
-}
-
-static int
-ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
-			struct rte_eth_vmdq_mirror_conf *mirror_conf,
-			uint8_t rule_id, uint8_t on)
-{
-	uint32_t mr_ctl,vlvf;
-	uint32_t mp_lsb = 0;
-	uint32_t mv_msb = 0;
-	uint32_t mv_lsb = 0;
-	uint32_t mp_msb = 0;
-	uint8_t i = 0;
-	int reg_index = 0;
-	uint64_t vlan_mask = 0;
-
-	const uint8_t pool_mask_offset = 32;
-	const uint8_t vlan_mask_offset = 32;
-	const uint8_t dst_pool_offset = 8;
-	const uint8_t rule_mr_offset  = 4;
-	const uint8_t mirror_rule_mask= 0x0F;
-
-	struct ixgbe_mirror_info *mr_info =
-			(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	if (ixgbe_vmdq_mode_check(hw) < 0)
-		return (-ENOTSUP);
-
-	/* Check if vlan mask is valid */
-	if ((mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) && (on)) {
-		if (mirror_conf->vlan.vlan_mask == 0)
-			return (-EINVAL);
-	}
-
-	/* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
-	if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) {
-		for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
-			if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
-				/* search vlan id related pool vlan filter index */
-				reg_index = ixgbe_find_vlvf_slot(hw,
-						mirror_conf->vlan.vlan_id[i]);
-				if(reg_index < 0)
-					return (-EINVAL);
-				vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
-				if ((vlvf & IXGBE_VLVF_VIEN) &&
-					((vlvf & IXGBE_VLVF_VLANID_MASK)
-						== mirror_conf->vlan.vlan_id[i]))
-					vlan_mask |= (1ULL << reg_index);
-				else
-					return (-EINVAL);
-			}
-		}
-
-		if (on) {
-			mv_lsb = vlan_mask & 0xFFFFFFFF;
-			mv_msb = vlan_mask >> vlan_mask_offset;
-
-			mr_info->mr_conf[rule_id].vlan.vlan_mask =
-						mirror_conf->vlan.vlan_mask;
-			for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
-				if(mirror_conf->vlan.vlan_mask & (1ULL << i))
-					mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
-						mirror_conf->vlan.vlan_id[i];
-			}
-		} else {
-			mv_lsb = 0;
-			mv_msb = 0;
-			mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
-			for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
-				mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
-		}
-	}
-
-	/*
-	 * if enable pool mirror, write related pool mask register,if disable
-	 * pool mirror, clear PFMRVM register
-	 */
-	if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
-		if (on) {
-			mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
-			mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
-			mr_info->mr_conf[rule_id].pool_mask =
-					mirror_conf->pool_mask;
-
-		} else {
-			mp_lsb = 0;
-			mp_msb = 0;
-			mr_info->mr_conf[rule_id].pool_mask = 0;
-		}
-	}
-
-	/* read  mirror control register and recalculate it */
-	mr_ctl = IXGBE_READ_REG(hw,IXGBE_MRCTL(rule_id));
-
-	if (on) {
-		mr_ctl |= mirror_conf->rule_type_mask;
-		mr_ctl &= mirror_rule_mask;
-		mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
-	} else
-		mr_ctl &= ~(mirror_conf->rule_type_mask & mirror_rule_mask);
-
-	mr_info->mr_conf[rule_id].rule_type_mask = (uint8_t)(mr_ctl & mirror_rule_mask);
-	mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
-
-	/* write mirrror control  register */
-	IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
-
-        /* write pool mirrror control  register */
-	if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
-		IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
-		IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
-				mp_msb);
-	}
-	/* write VLAN mirrror control  register */
-	if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) {
-		IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
-		IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
-				mv_msb);
-	}
-
-	return 0;
-}
-
-static int
-ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
-{
-	int mr_ctl = 0;
-	uint32_t lsb_val = 0;
-	uint32_t msb_val = 0;
-	const uint8_t rule_mr_offset = 4;
-
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_mirror_info *mr_info =
-		(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
-
-	if (ixgbe_vmdq_mode_check(hw) < 0)
-		return (-ENOTSUP);
-
-	memset(&mr_info->mr_conf[rule_id], 0,
-		sizeof(struct rte_eth_vmdq_mirror_conf));
-
-	/* clear PFVMCTL register */
-	IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
-
-	/* clear pool mask register */
-	IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
-	IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
-
-	/* clear vlan mask register */
-	IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
-	IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
-
-	return 0;
-}
-
-static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
-	uint16_t queue_idx, uint16_t tx_rate)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t rf_dec, rf_int;
-	uint32_t bcnrc_val;
-	uint16_t link_speed = dev->data->dev_link.link_speed;
-
-	if (queue_idx >= hw->mac.max_tx_queues)
-		return -EINVAL;
-
-	if (tx_rate != 0) {
-		/* Calculate the rate factor values to set */
-		rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
-		rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
-		rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
-
-		bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
-		bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
-				IXGBE_RTTBCNRC_RF_INT_MASK_M);
-		bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
-	} else {
-		bcnrc_val = 0;
-	}
-
-	/*
-	 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
-	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
-	 * set as 0x4.
-	 */
-	if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
-		(dev->data->dev_conf.rxmode.max_rx_pkt_len >=
-				IXGBE_MAX_JUMBO_FRAME_SIZE))
-		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
-			IXGBE_MMW_SIZE_JUMBO_FRAME);
-	else
-		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
-			IXGBE_MMW_SIZE_DEFAULT);
-
-	/* Set RTTBCNRC of queue X */
-	IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
-	IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
-	IXGBE_WRITE_FLUSH(hw);
-
-	return 0;
-}
-
-static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
-	uint16_t tx_rate, uint64_t q_msk)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_vf_info *vfinfo =
-		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
-	uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
-	uint32_t queue_stride =
-		IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
-	uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
-	uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
-	uint16_t total_rate = 0;
-
-	if (queue_end >= hw->mac.max_tx_queues)
-		return -EINVAL;
-
-	if (vfinfo != NULL) {
-		for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
-			if (vf_idx == vf)
-				continue;
-			for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
-				idx++)
-				total_rate += vfinfo[vf_idx].tx_rate[idx];
-		}
-	} else
-		return -EINVAL;
-
-	/* Store tx_rate for this vf. */
-	for (idx = 0; idx < nb_q_per_pool; idx++) {
-		if (((uint64_t)0x1 << idx) & q_msk) {
-			if (vfinfo[vf].tx_rate[idx] != tx_rate)
-				vfinfo[vf].tx_rate[idx] = tx_rate;
-			total_rate += tx_rate;
-		}
-	}
-
-	if (total_rate > dev->data->dev_link.link_speed) {
-		/*
-		 * Reset stored TX rate of the VF if it causes exceed
-		 * link speed.
-		 */
-		memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
-		return -EINVAL;
-	}
-
-	/* Set RTTBCNRC of each queue/pool for vf X  */
-	for (; queue_idx <= queue_end; queue_idx++) {
-		if (0x1 & q_msk)
-			ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
-		q_msk = q_msk >> 1;
-	}
-
-	return 0;
-}
-
-static void
-ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
-		     __attribute__((unused)) uint32_t index,
-		     __attribute__((unused)) uint32_t pool)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	int diag;
-
-	/*
-	 * On a 82599 VF, adding again the same MAC addr is not an idempotent
-	 * operation. Trap this case to avoid exhausting the [very limited]
-	 * set of PF resources used to store VF MAC addresses.
-	 */
-	if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
-		return;
-	diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
-	if (diag == 0)
-		return;
-	PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag);
-}
-
-static void
-ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
-	struct ether_addr *mac_addr;
-	uint32_t i;
-	int diag;
-
-	/*
-	 * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
-	 * not support the deletion of a given MAC address.
-	 * Instead, it imposes to delete all MAC addresses, then to add again
-	 * all MAC addresses with the exception of the one to be deleted.
-	 */
-	(void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
-
-	/*
-	 * Add again all MAC addresses, with the exception of the deleted one
-	 * and of the permanent MAC address.
-	 */
-	for (i = 0, mac_addr = dev->data->mac_addrs;
-	     i < hw->mac.num_rar_entries; i++, mac_addr++) {
-		/* Skip the deleted MAC address */
-		if (i == index)
-			continue;
-		/* Skip NULL MAC addresses */
-		if (is_zero_ether_addr(mac_addr))
-			continue;
-		/* Skip the permanent MAC address */
-		if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
-			continue;
-		diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
-		if (diag != 0)
-			PMD_DRV_LOG(ERR,
-				    "Adding again MAC address "
-				    "%02x:%02x:%02x:%02x:%02x:%02x failed "
-				    "diag=%d",
-				    mac_addr->addr_bytes[0],
-				    mac_addr->addr_bytes[1],
-				    mac_addr->addr_bytes[2],
-				    mac_addr->addr_bytes[3],
-				    mac_addr->addr_bytes[4],
-				    mac_addr->addr_bytes[5],
-				    diag);
-	}
-}
-
-#define MAC_TYPE_FILTER_SUP(type)    do {\
-	if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
-		(type) != ixgbe_mac_X550)\
-		return -ENOTSUP;\
-} while (0)
-
-static int
-ixgbe_syn_filter_set(struct rte_eth_dev *dev,
-			struct rte_eth_syn_filter *filter,
-			bool add)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t synqf;
-
-	if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
-		return -EINVAL;
-
-	synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
-
-	if (add) {
-		if (synqf & IXGBE_SYN_FILTER_ENABLE)
-			return -EINVAL;
-		synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
-			IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
-
-		if (filter->hig_pri)
-			synqf |= IXGBE_SYN_FILTER_SYNQFP;
-		else
-			synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
-	} else {
-		if (!(synqf & IXGBE_SYN_FILTER_ENABLE))
-			return -ENOENT;
-		synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
-	}
-	IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
-	IXGBE_WRITE_FLUSH(hw);
-	return 0;
-}
-
-static int
-ixgbe_syn_filter_get(struct rte_eth_dev *dev,
-			struct rte_eth_syn_filter *filter)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
-
-	if (synqf & IXGBE_SYN_FILTER_ENABLE) {
-		filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
-		filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
-		return 0;
-	}
-	return -ENOENT;
-}
-
-static int
-ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
-			enum rte_filter_op filter_op,
-			void *arg)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	int ret;
-
-	MAC_TYPE_FILTER_SUP(hw->mac.type);
-
-	if (filter_op == RTE_ETH_FILTER_NOP)
-		return 0;
-
-	if (arg == NULL) {
-		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
-			    filter_op);
-		return -EINVAL;
-	}
-
-	switch (filter_op) {
-	case RTE_ETH_FILTER_ADD:
-		ret = ixgbe_syn_filter_set(dev,
-				(struct rte_eth_syn_filter *)arg,
-				TRUE);
-		break;
-	case RTE_ETH_FILTER_DELETE:
-		ret = ixgbe_syn_filter_set(dev,
-				(struct rte_eth_syn_filter *)arg,
-				FALSE);
-		break;
-	case RTE_ETH_FILTER_GET:
-		ret = ixgbe_syn_filter_get(dev,
-				(struct rte_eth_syn_filter *)arg);
-		break;
-	default:
-		PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
-		ret = -EINVAL;
-		break;
-	}
-
-	return ret;
-}
-
-
-static inline enum ixgbe_5tuple_protocol
-convert_protocol_type(uint8_t protocol_value)
-{
-	if (protocol_value == IPPROTO_TCP)
-		return IXGBE_FILTER_PROTOCOL_TCP;
-	else if (protocol_value == IPPROTO_UDP)
-		return IXGBE_FILTER_PROTOCOL_UDP;
-	else if (protocol_value == IPPROTO_SCTP)
-		return IXGBE_FILTER_PROTOCOL_SCTP;
-	else
-		return IXGBE_FILTER_PROTOCOL_NONE;
-}
-
-/*
- * add a 5tuple filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * index: the index the filter allocates.
- * filter: ponter to the filter that will be added.
- * rx_queue: the queue id the filter assigned to.
- *
- * @return
- *    - On success, zero.
- *    - On failure, a negative value.
- */
-static int
-ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
-			struct ixgbe_5tuple_filter *filter)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_filter_info *filter_info =
-		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
-	int i, idx, shift;
-	uint32_t ftqf, sdpqf;
-	uint32_t l34timir = 0;
-	uint8_t mask = 0xff;
-
-	/*
-	 * look for an unused 5tuple filter index,
-	 * and insert the filter to list.
-	 */
-	for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
-		idx = i / (sizeof(uint32_t) * NBBY);
-		shift = i % (sizeof(uint32_t) * NBBY);
-		if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
-			filter_info->fivetuple_mask[idx] |= 1 << shift;
-			filter->index = i;
-			TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
-					  filter,
-					  entries);
-			break;
-		}
-	}
-	if (i >= IXGBE_MAX_FTQF_FILTERS) {
-		PMD_DRV_LOG(ERR, "5tuple filters are full.");
-		return -ENOSYS;
-	}
-
-	sdpqf = (uint32_t)(filter->filter_info.dst_port <<
-				IXGBE_SDPQF_DSTPORT_SHIFT);
-	sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
-
-	ftqf = (uint32_t)(filter->filter_info.proto &
-		IXGBE_FTQF_PROTOCOL_MASK);
-	ftqf |= (uint32_t)((filter->filter_info.priority &
-		IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
-	if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
-		mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
-	if (filter->filter_info.dst_ip_mask == 0)
-		mask &= IXGBE_FTQF_DEST_ADDR_MASK;
-	if (filter->filter_info.src_port_mask == 0)
-		mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
-	if (filter->filter_info.dst_port_mask == 0)
-		mask &= IXGBE_FTQF_DEST_PORT_MASK;
-	if (filter->filter_info.proto_mask == 0)
-		mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
-	ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
-	ftqf |= IXGBE_FTQF_POOL_MASK_EN;
-	ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
-
-	IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
-	IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
-	IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
-	IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
-
-	l34timir |= IXGBE_L34T_IMIR_RESERVE;
-	l34timir |= (uint32_t)(filter->queue <<
-				IXGBE_L34T_IMIR_QUEUE_SHIFT);
-	IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
-	return 0;
-}
-
-/*
- * remove a 5tuple filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * filter: the pointer of the filter will be removed.
- */
-static void
-ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
-			struct ixgbe_5tuple_filter *filter)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_filter_info *filter_info =
-		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
-	uint16_t index = filter->index;
-
-	filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
-				~(1 << (index % (sizeof(uint32_t) * NBBY)));
-	TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
-	rte_free(filter);
-
-	IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
-	IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
-	IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
-	IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
-	IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
-}
-
-static int
-ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
-{
-	struct ixgbe_hw *hw;
-	uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
-		return -EINVAL;
-
-	/* refuse mtu that requires the support of scattered packets when this
-	 * feature has not been enabled before. */
-	if (!dev->data->scattered_rx &&
-	    (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
-	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
-		return -EINVAL;
-
-	/*
-	 * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
-	 * request of the version 2.0 of the mailbox API.
-	 * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
-	 * of the mailbox API.
-	 * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
-	 * prior to 3.11.33 which contains the following change:
-	 * "ixgbe: Enable jumbo frames support w/ SR-IOV"
-	 */
-	ixgbevf_rlpml_set_vf(hw, max_frame);
-
-	/* update max frame size */
-	dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
-	return 0;
-}
-
-#define MAC_TYPE_FILTER_SUP_EXT(type)    do {\
-	if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\
-		return -ENOTSUP;\
-} while (0)
-
-static inline struct ixgbe_5tuple_filter *
-ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
-			struct ixgbe_5tuple_filter_info *key)
-{
-	struct ixgbe_5tuple_filter *it;
-
-	TAILQ_FOREACH(it, filter_list, entries) {
-		if (memcmp(key, &it->filter_info,
-			sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
-			return it;
-		}
-	}
-	return NULL;
-}
-
-/* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
-static inline int
-ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
-			struct ixgbe_5tuple_filter_info *filter_info)
-{
-	if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
-		filter->priority > IXGBE_5TUPLE_MAX_PRI ||
-		filter->priority < IXGBE_5TUPLE_MIN_PRI)
-		return -EINVAL;
-
-	switch (filter->dst_ip_mask) {
-	case UINT32_MAX:
-		filter_info->dst_ip_mask = 0;
-		filter_info->dst_ip = filter->dst_ip;
-		break;
-	case 0:
-		filter_info->dst_ip_mask = 1;
-		break;
-	default:
-		PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
-		return -EINVAL;
-	}
-
-	switch (filter->src_ip_mask) {
-	case UINT32_MAX:
-		filter_info->src_ip_mask = 0;
-		filter_info->src_ip = filter->src_ip;
-		break;
-	case 0:
-		filter_info->src_ip_mask = 1;
-		break;
-	default:
-		PMD_DRV_LOG(ERR, "invalid src_ip mask.");
-		return -EINVAL;
-	}
-
-	switch (filter->dst_port_mask) {
-	case UINT16_MAX:
-		filter_info->dst_port_mask = 0;
-		filter_info->dst_port = filter->dst_port;
-		break;
-	case 0:
-		filter_info->dst_port_mask = 1;
-		break;
-	default:
-		PMD_DRV_LOG(ERR, "invalid dst_port mask.");
-		return -EINVAL;
-	}
-
-	switch (filter->src_port_mask) {
-	case UINT16_MAX:
-		filter_info->src_port_mask = 0;
-		filter_info->src_port = filter->src_port;
-		break;
-	case 0:
-		filter_info->src_port_mask = 1;
-		break;
-	default:
-		PMD_DRV_LOG(ERR, "invalid src_port mask.");
-		return -EINVAL;
-	}
-
-	switch (filter->proto_mask) {
-	case UINT8_MAX:
-		filter_info->proto_mask = 0;
-		filter_info->proto =
-			convert_protocol_type(filter->proto);
-		break;
-	case 0:
-		filter_info->proto_mask = 1;
-		break;
-	default:
-		PMD_DRV_LOG(ERR, "invalid protocol mask.");
-		return -EINVAL;
-	}
-
-	filter_info->priority = (uint8_t)filter->priority;
-	return 0;
-}
-
-/*
- * add or delete a ntuple filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
- * add: if true, add filter, if false, remove filter
- *
- * @return
- *    - On success, zero.
- *    - On failure, a negative value.
- */
-static int
-ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
-			struct rte_eth_ntuple_filter *ntuple_filter,
-			bool add)
-{
-	struct ixgbe_filter_info *filter_info =
-		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
-	struct ixgbe_5tuple_filter_info filter_5tuple;
-	struct ixgbe_5tuple_filter *filter;
-	int ret;
-
-	if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
-		PMD_DRV_LOG(ERR, "only 5tuple is supported.");
-		return -EINVAL;
-	}
-
-	memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
-	ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
-	if (ret < 0)
-		return ret;
-
-	filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
-					 &filter_5tuple);
-	if (filter != NULL && add) {
-		PMD_DRV_LOG(ERR, "filter exists.");
-		return -EEXIST;
-	}
-	if (filter == NULL && !add) {
-		PMD_DRV_LOG(ERR, "filter doesn't exist.");
-		return -ENOENT;
-	}
-
-	if (add) {
-		filter = rte_zmalloc("ixgbe_5tuple_filter",
-				sizeof(struct ixgbe_5tuple_filter), 0);
-		if (filter == NULL)
-			return -ENOMEM;
-		(void)rte_memcpy(&filter->filter_info,
-				 &filter_5tuple,
-				 sizeof(struct ixgbe_5tuple_filter_info));
-		filter->queue = ntuple_filter->queue;
-		ret = ixgbe_add_5tuple_filter(dev, filter);
-		if (ret < 0) {
-			rte_free(filter);
-			return ret;
-		}
-	} else
-		ixgbe_remove_5tuple_filter(dev, filter);
-
-	return 0;
-}
-
-/*
- * get a ntuple filter
- *
- * @param
- * dev: Pointer to struct rte_eth_dev.
- * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
- *
- * @return
- *    - On success, zero.
- *    - On failure, a negative value.
- */
-static int
-ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
-			struct rte_eth_ntuple_filter *ntuple_filter)
-{
-	struct ixgbe_filter_info *filter_info =
-		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
-	struct ixgbe_5tuple_filter_info filter_5tuple;
-	struct ixgbe_5tuple_filter *filter;
-	int ret;
-
-	if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
-		PMD_DRV_LOG(ERR, "only 5tuple is supported.");
-		return -EINVAL;
-	}
-
-	memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
-	ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
-	if (ret < 0)
-		return ret;
-
-	filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
-					 &filter_5tuple);
-	if (filter == NULL) {
-		PMD_DRV_LOG(ERR, "filter doesn't exist.");
-		return -ENOENT;
-	}
-	ntuple_filter->queue = filter->queue;
-	return 0;
-}
-
-/*
- * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
- * @dev: pointer to rte_eth_dev structure
- * @filter_op:operation will be taken.
- * @arg: a pointer to specific structure corresponding to the filter_op
- *
- * @return
- *    - On success, zero.
- *    - On failure, a negative value.
- */
-static int
-ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
-				enum rte_filter_op filter_op,
-				void *arg)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	int ret;
-
-	MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
-
-	if (filter_op == RTE_ETH_FILTER_NOP)
-		return 0;
-
-	if (arg == NULL) {
-		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
-			    filter_op);
-		return -EINVAL;
-	}
-
-	switch (filter_op) {
-	case RTE_ETH_FILTER_ADD:
-		ret = ixgbe_add_del_ntuple_filter(dev,
-			(struct rte_eth_ntuple_filter *)arg,
-			TRUE);
-		break;
-	case RTE_ETH_FILTER_DELETE:
-		ret = ixgbe_add_del_ntuple_filter(dev,
-			(struct rte_eth_ntuple_filter *)arg,
-			FALSE);
-		break;
-	case RTE_ETH_FILTER_GET:
-		ret = ixgbe_get_ntuple_filter(dev,
-			(struct rte_eth_ntuple_filter *)arg);
-		break;
-	default:
-		PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
-		ret = -EINVAL;
-		break;
-	}
-	return ret;
-}
-
-static inline int
-ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
-			uint16_t ethertype)
-{
-	int i;
-
-	for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
-		if (filter_info->ethertype_filters[i] == ethertype &&
-		    (filter_info->ethertype_mask & (1 << i)))
-			return i;
-	}
-	return -1;
-}
-
-static inline int
-ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
-			uint16_t ethertype)
-{
-	int i;
-
-	for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
-		if (!(filter_info->ethertype_mask & (1 << i))) {
-			filter_info->ethertype_mask |= 1 << i;
-			filter_info->ethertype_filters[i] = ethertype;
-			return i;
-		}
-	}
-	return -1;
-}
-
-static inline int
-ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
-			uint8_t idx)
-{
-	if (idx >= IXGBE_MAX_ETQF_FILTERS)
-		return -1;
-	filter_info->ethertype_mask &= ~(1 << idx);
-	filter_info->ethertype_filters[idx] = 0;
-	return idx;
-}
-
-static int
-ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
-			struct rte_eth_ethertype_filter *filter,
-			bool add)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_filter_info *filter_info =
-		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
-	uint32_t etqf = 0;
-	uint32_t etqs = 0;
-	int ret;
-
-	if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
-		return -EINVAL;
-
-	if (filter->ether_type == ETHER_TYPE_IPv4 ||
-		filter->ether_type == ETHER_TYPE_IPv6) {
-		PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
-			" ethertype filter.", filter->ether_type);
-		return -EINVAL;
-	}
-
-	if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
-		PMD_DRV_LOG(ERR, "mac compare is unsupported.");
-		return -EINVAL;
-	}
-	if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
-		PMD_DRV_LOG(ERR, "drop option is unsupported.");
-		return -EINVAL;
-	}
-
-	ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
-	if (ret >= 0 && add) {
-		PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
-			    filter->ether_type);
-		return -EEXIST;
-	}
-	if (ret < 0 && !add) {
-		PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
-			    filter->ether_type);
-		return -ENOENT;
-	}
-
-	if (add) {
-		ret = ixgbe_ethertype_filter_insert(filter_info,
-			filter->ether_type);
-		if (ret < 0) {
-			PMD_DRV_LOG(ERR, "ethertype filters are full.");
-			return -ENOSYS;
-		}
-		etqf = IXGBE_ETQF_FILTER_EN;
-		etqf |= (uint32_t)filter->ether_type;
-		etqs |= (uint32_t)((filter->queue <<
-				    IXGBE_ETQS_RX_QUEUE_SHIFT) &
-				    IXGBE_ETQS_RX_QUEUE);
-		etqs |= IXGBE_ETQS_QUEUE_EN;
-	} else {
-		ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
-		if (ret < 0)
-			return -ENOSYS;
-	}
-	IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
-	IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
-	IXGBE_WRITE_FLUSH(hw);
-
-	return 0;
-}
-
-static int
-ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
-			struct rte_eth_ethertype_filter *filter)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_filter_info *filter_info =
-		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
-	uint32_t etqf, etqs;
-	int ret;
-
-	ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
-	if (ret < 0) {
-		PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
-			    filter->ether_type);
-		return -ENOENT;
-	}
-
-	etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
-	if (etqf & IXGBE_ETQF_FILTER_EN) {
-		etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
-		filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
-		filter->flags = 0;
-		filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
-			       IXGBE_ETQS_RX_QUEUE_SHIFT;
-		return 0;
-	}
-	return -ENOENT;
-}
-
-/*
- * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
- * @dev: pointer to rte_eth_dev structure
- * @filter_op:operation will be taken.
- * @arg: a pointer to specific structure corresponding to the filter_op
- */
-static int
-ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
-				enum rte_filter_op filter_op,
-				void *arg)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	int ret;
-
-	MAC_TYPE_FILTER_SUP(hw->mac.type);
-
-	if (filter_op == RTE_ETH_FILTER_NOP)
-		return 0;
-
-	if (arg == NULL) {
-		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
-			    filter_op);
-		return -EINVAL;
-	}
-
-	switch (filter_op) {
-	case RTE_ETH_FILTER_ADD:
-		ret = ixgbe_add_del_ethertype_filter(dev,
-			(struct rte_eth_ethertype_filter *)arg,
-			TRUE);
-		break;
-	case RTE_ETH_FILTER_DELETE:
-		ret = ixgbe_add_del_ethertype_filter(dev,
-			(struct rte_eth_ethertype_filter *)arg,
-			FALSE);
-		break;
-	case RTE_ETH_FILTER_GET:
-		ret = ixgbe_get_ethertype_filter(dev,
-			(struct rte_eth_ethertype_filter *)arg);
-		break;
-	default:
-		PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
-		ret = -EINVAL;
-		break;
-	}
-	return ret;
-}
-
-static int
-ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
-		     enum rte_filter_type filter_type,
-		     enum rte_filter_op filter_op,
-		     void *arg)
-{
-	int ret = -EINVAL;
-
-	switch (filter_type) {
-	case RTE_ETH_FILTER_NTUPLE:
-		ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
-		break;
-	case RTE_ETH_FILTER_ETHERTYPE:
-		ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
-		break;
-	case RTE_ETH_FILTER_SYN:
-		ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
-		break;
-	case RTE_ETH_FILTER_FDIR:
-		ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
-		break;
-	default:
-		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
-							filter_type);
-		break;
-	}
-
-	return ret;
-}
-
-static struct rte_driver rte_ixgbe_driver = {
-	.type = PMD_PDEV,
-	.init = rte_ixgbe_pmd_init,
-};
-
-static struct rte_driver rte_ixgbevf_driver = {
-	.type = PMD_PDEV,
-	.init = rte_ixgbevf_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(rte_ixgbe_driver);
-PMD_REGISTER_DRIVER(rte_ixgbevf_driver);
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
deleted file mode 100644
index 419ea5d..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
+++ /dev/null
@@ -1,400 +0,0 @@ 
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _IXGBE_ETHDEV_H_
-#define _IXGBE_ETHDEV_H_
-#include "ixgbe/ixgbe_dcb.h"
-#include "ixgbe/ixgbe_dcb_82599.h"
-#include "ixgbe/ixgbe_dcb_82598.h"
-#include "ixgbe_bypass.h"
-
-/* need update link, bit flag */
-#define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
-#define IXGBE_FLAG_MAILBOX          (uint32_t)(1 << 1)
-
-/*
- * Defines that were not part of ixgbe_type.h as they are not used by the
- * FreeBSD driver.
- */
-#define IXGBE_ADVTXD_MAC_1588       0x00080000 /* IEEE1588 Timestamp packet */
-#define IXGBE_RXD_STAT_TMST         0x10000    /* Timestamped Packet indication */
-#define IXGBE_ADVTXD_TUCMD_L4T_RSV  0x00001800 /* L4 Packet TYPE, resvd  */
-#define IXGBE_RXDADV_ERR_CKSUM_BIT  30
-#define IXGBE_RXDADV_ERR_CKSUM_MSK  3
-#define IXGBE_ADVTXD_MACLEN_SHIFT   9          /* Bit shift for l2_len */
-#define IXGBE_NB_STAT_MAPPING_REGS  32
-#define IXGBE_EXTENDED_VLAN	  (uint32_t)(1 << 26) /* EXTENDED VLAN ENABLE */
-#define IXGBE_VFTA_SIZE 128
-#define IXGBE_VLAN_TAG_SIZE 4
-#define IXGBE_MAX_RX_QUEUE_NUM	128
-#ifndef NBBY
-#define NBBY	8	/* number of bits in a byte */
-#endif
-#define IXGBE_HWSTRIP_BITMAP_SIZE (IXGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY))
-
-/* EITR Inteval is in 2048ns uinits for 1G and 10G link */
-#define IXGBE_EITR_INTERVAL_UNIT_NS	2048
-#define IXGBE_EITR_ITR_INT_SHIFT       3
-#define IXGBE_EITR_INTERVAL_US(us) \
-	(((us) * 1000 / IXGBE_EITR_INTERVAL_UNIT_NS << IXGBE_EITR_ITR_INT_SHIFT) & \
-		IXGBE_EITR_ITR_INT_MASK)
-
-
-/* Loopback operation modes */
-/* 82599 specific loopback operation types */
-#define IXGBE_LPBK_82599_NONE   0x0 /* Default value. Loopback is disabled. */
-#define IXGBE_LPBK_82599_TX_RX  0x1 /* Tx->Rx loopback operation is enabled. */
-
-#define IXGBE_MAX_JUMBO_FRAME_SIZE      0x2600 /* Maximum Jumbo frame size. */
-
-#define IXGBE_RTTBCNRC_RF_INT_MASK_BASE 0x000003FF
-#define IXGBE_RTTBCNRC_RF_INT_MASK_M \
-	(IXGBE_RTTBCNRC_RF_INT_MASK_BASE << IXGBE_RTTBCNRC_RF_INT_SHIFT)
-
-#define IXGBE_MAX_QUEUE_NUM_PER_VF  8
-
-#define IXGBE_SYN_FILTER_ENABLE         0x00000001 /* syn filter enable field */
-#define IXGBE_SYN_FILTER_QUEUE          0x000000FE /* syn filter queue field */
-#define IXGBE_SYN_FILTER_QUEUE_SHIFT    1          /* syn filter queue field shift */
-#define IXGBE_SYN_FILTER_SYNQFP         0x80000000 /* syn filter SYNQFP */
-
-#define IXGBE_ETQF_UP                   0x00070000 /* ethertype filter priority field */
-#define IXGBE_ETQF_SHIFT                16
-#define IXGBE_ETQF_UP_EN                0x00080000
-#define IXGBE_ETQF_ETHERTYPE            0x0000FFFF /* ethertype filter ethertype field */
-#define IXGBE_ETQF_MAX_PRI              7
-
-#define IXGBE_SDPQF_DSTPORT             0xFFFF0000 /* dst port field */
-#define IXGBE_SDPQF_DSTPORT_SHIFT       16         /* dst port field shift */
-#define IXGBE_SDPQF_SRCPORT             0x0000FFFF /* src port field */
-
-#define IXGBE_L34T_IMIR_SIZE_BP         0x00001000
-#define IXGBE_L34T_IMIR_RESERVE         0x00080000 /* bit 13 to 19 must be set to 1000000b. */
-#define IXGBE_L34T_IMIR_LLI             0x00100000
-#define IXGBE_L34T_IMIR_QUEUE           0x0FE00000
-#define IXGBE_L34T_IMIR_QUEUE_SHIFT     21
-#define IXGBE_5TUPLE_MAX_PRI            7
-#define IXGBE_5TUPLE_MIN_PRI            1
-
-#define IXGBE_RSS_OFFLOAD_ALL ( \
-	ETH_RSS_IPV4 | \
-	ETH_RSS_NONFRAG_IPV4_TCP | \
-	ETH_RSS_NONFRAG_IPV4_UDP | \
-	ETH_RSS_IPV6 | \
-	ETH_RSS_NONFRAG_IPV6_TCP | \
-	ETH_RSS_NONFRAG_IPV6_UDP | \
-	ETH_RSS_IPV6_EX | \
-	ETH_RSS_IPV6_TCP_EX | \
-	ETH_RSS_IPV6_UDP_EX)
-
-/*
- * Information about the fdir mode.
- */
-
-struct ixgbe_hw_fdir_mask {
-	uint16_t vlan_tci_mask;
-	uint32_t src_ipv4_mask;
-	uint32_t dst_ipv4_mask;
-	uint16_t src_ipv6_mask;
-	uint16_t dst_ipv6_mask;
-	uint16_t src_port_mask;
-	uint16_t dst_port_mask;
-	uint16_t flex_bytes_mask;
-};
-
-struct ixgbe_hw_fdir_info {
-	struct ixgbe_hw_fdir_mask mask;
-	uint8_t     flex_bytes_offset;
-	uint16_t    collision;
-	uint16_t    free;
-	uint16_t    maxhash;
-	uint8_t     maxlen;
-	uint64_t    add;
-	uint64_t    remove;
-	uint64_t    f_add;
-	uint64_t    f_remove;
-};
-
-/* structure for interrupt relative data */
-struct ixgbe_interrupt {
-	uint32_t flags;
-	uint32_t mask;
-};
-
-struct ixgbe_stat_mapping_registers {
-	uint32_t tqsm[IXGBE_NB_STAT_MAPPING_REGS];
-	uint32_t rqsmr[IXGBE_NB_STAT_MAPPING_REGS];
-};
-
-struct ixgbe_vfta {
-	uint32_t vfta[IXGBE_VFTA_SIZE];
-};
-
-struct ixgbe_hwstrip {
-	uint32_t bitmap[IXGBE_HWSTRIP_BITMAP_SIZE];
-};
-
-/*
- * VF data which used by PF host only
- */
-#define IXGBE_MAX_VF_MC_ENTRIES		30
-#define IXGBE_MAX_MR_RULE_ENTRIES	4 /* number of mirroring rules supported */
-#define IXGBE_MAX_UTA                   128
-
-struct ixgbe_uta_info {
-	uint8_t  uc_filter_type;
-	uint16_t uta_in_use;
-	uint32_t uta_shadow[IXGBE_MAX_UTA];
-};
-
-struct ixgbe_mirror_info {
-	struct rte_eth_vmdq_mirror_conf mr_conf[ETH_VMDQ_NUM_MIRROR_RULE];
-	/**< store PF mirror rules configuration*/
-};
-
-struct ixgbe_vf_info {
-	uint8_t vf_mac_addresses[ETHER_ADDR_LEN];
-	uint16_t vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
-	uint16_t num_vf_mc_hashes;
-	uint16_t default_vf_vlan_id;
-	uint16_t vlans_enabled;
-	bool clear_to_send;
-	uint16_t tx_rate[IXGBE_MAX_QUEUE_NUM_PER_VF];
-	uint16_t vlan_count;
-	uint8_t spoofchk_enabled;
-	uint8_t api_version;
-};
-
-/*
- *  Possible l4type of 5tuple filters.
- */
-enum ixgbe_5tuple_protocol {
-	IXGBE_FILTER_PROTOCOL_TCP = 0,
-	IXGBE_FILTER_PROTOCOL_UDP,
-	IXGBE_FILTER_PROTOCOL_SCTP,
-	IXGBE_FILTER_PROTOCOL_NONE,
-};
-
-TAILQ_HEAD(ixgbe_5tuple_filter_list, ixgbe_5tuple_filter);
-
-struct ixgbe_5tuple_filter_info {
-	uint32_t dst_ip;
-	uint32_t src_ip;
-	uint16_t dst_port;
-	uint16_t src_port;
-	enum ixgbe_5tuple_protocol proto;        /* l4 protocol. */
-	uint8_t priority;        /* seven levels (001b-111b), 111b is highest,
-				      used when more than one filter matches. */
-	uint8_t dst_ip_mask:1,   /* if mask is 1b, do not compare dst ip. */
-		src_ip_mask:1,   /* if mask is 1b, do not compare src ip. */
-		dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
-		src_port_mask:1, /* if mask is 1b, do not compare src port. */
-		proto_mask:1;    /* if mask is 1b, do not compare protocol. */
-};
-
-/* 5tuple filter structure */
-struct ixgbe_5tuple_filter {
-	TAILQ_ENTRY(ixgbe_5tuple_filter) entries;
-	uint16_t index;       /* the index of 5tuple filter */
-	struct ixgbe_5tuple_filter_info filter_info;
-	uint16_t queue;       /* rx queue assigned to */
-};
-
-#define IXGBE_5TUPLE_ARRAY_SIZE \
-	(RTE_ALIGN(IXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \
-	 (sizeof(uint32_t) * NBBY))
-
-/*
- * Structure to store filters' info.
- */
-struct ixgbe_filter_info {
-	uint8_t ethertype_mask;  /* Bit mask for every used ethertype filter */
-	/* store used ethertype filters*/
-	uint16_t ethertype_filters[IXGBE_MAX_ETQF_FILTERS];
-	/* Bit mask for every used 5tuple filter */
-	uint32_t fivetuple_mask[IXGBE_5TUPLE_ARRAY_SIZE];
-	struct ixgbe_5tuple_filter_list fivetuple_list;
-};
-
-/*
- * Structure to store private data for each driver instance (for each port).
- */
-struct ixgbe_adapter {
-	struct ixgbe_hw             hw;
-	struct ixgbe_hw_stats       stats;
-	struct ixgbe_hw_fdir_info   fdir;
-	struct ixgbe_interrupt      intr;
-	struct ixgbe_stat_mapping_registers stat_mappings;
-	struct ixgbe_vfta           shadow_vfta;
-	struct ixgbe_hwstrip		hwstrip;
-	struct ixgbe_dcb_config     dcb_config;
-	struct ixgbe_mirror_info    mr_data;
-	struct ixgbe_vf_info        *vfdata;
-	struct ixgbe_uta_info       uta_info;
-#ifdef RTE_NIC_BYPASS
-	struct ixgbe_bypass_info    bps;
-#endif /* RTE_NIC_BYPASS */
-	struct ixgbe_filter_info    filter;
-
-	bool rx_bulk_alloc_allowed;
-	bool rx_vec_allowed;
-};
-
-#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
-	(&((struct ixgbe_adapter *)adapter)->hw)
-
-#define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \
-	(&((struct ixgbe_adapter *)adapter)->stats)
-
-#define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \
-	(&((struct ixgbe_adapter *)adapter)->intr)
-
-#define IXGBE_DEV_PRIVATE_TO_FDIR_INFO(adapter) \
-	(&((struct ixgbe_adapter *)adapter)->fdir)
-
-#define IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(adapter) \
-	(&((struct ixgbe_adapter *)adapter)->stat_mappings)
-
-#define IXGBE_DEV_PRIVATE_TO_VFTA(adapter) \
-	(&((struct ixgbe_adapter *)adapter)->shadow_vfta)
-
-#define IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(adapter) \
-	(&((struct ixgbe_adapter *)adapter)->hwstrip)
-
-#define IXGBE_DEV_PRIVATE_TO_DCB_CFG(adapter) \
-	(&((struct ixgbe_adapter *)adapter)->dcb_config)
-
-#define IXGBE_DEV_PRIVATE_TO_P_VFDATA(adapter) \
-	(&((struct ixgbe_adapter *)adapter)->vfdata)
-
-#define IXGBE_DEV_PRIVATE_TO_PFDATA(adapter) \
-	(&((struct ixgbe_adapter *)adapter)->mr_data)
-
-#define IXGBE_DEV_PRIVATE_TO_UTA(adapter) \
-	(&((struct ixgbe_adapter *)adapter)->uta_info)
-
-#define IXGBE_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
-	(&((struct ixgbe_adapter *)adapter)->filter)
-
-/*
- * RX/TX function prototypes
- */
-void ixgbe_dev_clear_queues(struct rte_eth_dev *dev);
-
-void ixgbe_dev_rx_queue_release(void *rxq);
-
-void ixgbe_dev_tx_queue_release(void *txq);
-
-int  ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
-		uint16_t nb_rx_desc, unsigned int socket_id,
-		const struct rte_eth_rxconf *rx_conf,
-		struct rte_mempool *mb_pool);
-
-int  ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
-		uint16_t nb_tx_desc, unsigned int socket_id,
-		const struct rte_eth_txconf *tx_conf);
-
-uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
-		uint16_t rx_queue_id);
-
-int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
-
-int ixgbe_dev_rx_init(struct rte_eth_dev *dev);
-
-void ixgbe_dev_tx_init(struct rte_eth_dev *dev);
-
-int ixgbe_dev_rxtx_start(struct rte_eth_dev *dev);
-
-int ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
-
-int ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
-
-int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
-
-int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
-
-int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);
-
-void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
-
-void ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev);
-
-uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-		uint16_t nb_pkts);
-
-uint16_t ixgbe_recv_pkts_lro_single_alloc(void *rx_queue,
-		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
-uint16_t ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue,
-		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
-
-uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-		uint16_t nb_pkts);
-
-uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
-		uint16_t nb_pkts);
-
-int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
-			      struct rte_eth_rss_conf *rss_conf);
-
-int ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
-				struct rte_eth_rss_conf *rss_conf);
-
-/*
- * Flow director function prototypes
- */
-int ixgbe_fdir_configure(struct rte_eth_dev *dev);
-
-void ixgbe_configure_dcb(struct rte_eth_dev *dev);
-
-/*
- * misc function prototypes
- */
-void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
-
-void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
-
-void ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev);
-
-void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev);
-
-void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
-
-void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev);
-
-int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
-
-uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
-
-int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
-			enum rte_filter_op filter_op, void *arg);
-#endif /* _IXGBE_ETHDEV_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe_fdir.c b/lib/librte_pmd_ixgbe/ixgbe_fdir.c
deleted file mode 100644
index afc53cb..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe_fdir.c
+++ /dev/null
@@ -1,1144 +0,0 @@ 
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <stdint.h>
-#include <stdarg.h>
-#include <errno.h>
-#include <sys/queue.h>
-
-#include <rte_interrupts.h>
-#include <rte_log.h>
-#include <rte_debug.h>
-#include <rte_pci.h>
-#include <rte_ether.h>
-#include <rte_ethdev.h>
-
-#include "ixgbe_logs.h"
-#include "ixgbe/ixgbe_api.h"
-#include "ixgbe/ixgbe_common.h"
-#include "ixgbe_ethdev.h"
-
-/* To get PBALLOC (Packet Buffer Allocation) bits from FDIRCTRL value */
-#define FDIRCTRL_PBALLOC_MASK           0x03
-
-/* For calculating memory required for FDIR filters */
-#define PBALLOC_SIZE_SHIFT              15
-
-/* Number of bits used to mask bucket hash for different pballoc sizes */
-#define PERFECT_BUCKET_64KB_HASH_MASK   0x07FF  /* 11 bits */
-#define PERFECT_BUCKET_128KB_HASH_MASK  0x0FFF  /* 12 bits */
-#define PERFECT_BUCKET_256KB_HASH_MASK  0x1FFF  /* 13 bits */
-#define SIG_BUCKET_64KB_HASH_MASK       0x1FFF  /* 13 bits */
-#define SIG_BUCKET_128KB_HASH_MASK      0x3FFF  /* 14 bits */
-#define SIG_BUCKET_256KB_HASH_MASK      0x7FFF  /* 15 bits */
-#define IXGBE_DEFAULT_FLEXBYTES_OFFSET  12 /* default flexbytes offset in bytes */
-#define IXGBE_FDIR_MAX_FLEX_LEN         2 /* len in bytes of flexbytes */
-#define IXGBE_MAX_FLX_SOURCE_OFF        62
-#define IXGBE_FDIRCTRL_FLEX_MASK        (0x1F << IXGBE_FDIRCTRL_FLEX_SHIFT)
-#define IXGBE_FDIRCMD_CMD_INTERVAL_US   10
-
-#define IXGBE_FDIR_FLOW_TYPES ( \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
-	(1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER))
-
-#define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
-	uint8_t ipv6_addr[16]; \
-	uint8_t i; \
-	rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\
-	(ipv6m) = 0; \
-	for (i = 0; i < sizeof(ipv6_addr); i++) { \
-		if (ipv6_addr[i] == UINT8_MAX) \
-			(ipv6m) |= 1 << i; \
-		else if (ipv6_addr[i] != 0) { \
-			PMD_DRV_LOG(ERR, " invalid IPv6 address mask."); \
-			return -EINVAL; \
-		} \
-	} \
-} while (0)
-
-#define IPV6_MASK_TO_ADDR(ipv6m, ipaddr) do { \
-	uint8_t ipv6_addr[16]; \
-	uint8_t i; \
-	for (i = 0; i < sizeof(ipv6_addr); i++) { \
-		if ((ipv6m) & (1 << i)) \
-			ipv6_addr[i] = UINT8_MAX; \
-		else \
-			ipv6_addr[i] = 0; \
-	} \
-	rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
-} while (0)
-
-static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
-static int fdir_set_input_mask_82599(struct rte_eth_dev *dev,
-		const struct rte_eth_fdir_masks *input_mask);
-static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
-		const struct rte_eth_fdir_flex_conf *conf);
-static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
-static int ixgbe_fdir_filter_to_atr_input(
-		const struct rte_eth_fdir_filter *fdir_filter,
-		union ixgbe_atr_input *input);
-static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
-				 uint32_t key);
-static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc);
-static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc);
-static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
-			union ixgbe_atr_input *input, uint8_t queue,
-			uint32_t fdircmd, uint32_t fdirhash);
-static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
-		union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
-		uint32_t fdirhash);
-static int ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
-			      const struct rte_eth_fdir_filter *fdir_filter,
-			      bool del,
-			      bool update);
-static int ixgbe_fdir_flush(struct rte_eth_dev *dev);
-static void ixgbe_fdir_info_get(struct rte_eth_dev *dev,
-			struct rte_eth_fdir_info *fdir_info);
-static void ixgbe_fdir_stats_get(struct rte_eth_dev *dev,
-			struct rte_eth_fdir_stats *fdir_stats);
-
-/**
- * This function is based on ixgbe_fdir_enable_82599() in ixgbe/ixgbe_82599.c.
- * It adds extra configuration of fdirctrl that is common for all filter types.
- *
- *  Initialize Flow Director control registers
- *  @hw: pointer to hardware structure
- *  @fdirctrl: value to write to flow director control register
- **/
-static int
-fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
-{
-	int i;
-
-	PMD_INIT_FUNC_TRACE();
-
-	/* Prime the keys for hashing */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
-
-	/*
-	 * Continue setup of fdirctrl register bits:
-	 *  Set the maximum length per hash bucket to 0xA filters
-	 *  Send interrupt when 64 filters are left
-	 */
-	fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
-		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
-
-	/*
-	 * Poll init-done after we write the register.  Estimated times:
-	 *      10G: PBALLOC = 11b, timing is 60us
-	 *       1G: PBALLOC = 11b, timing is 600us
-	 *     100M: PBALLOC = 11b, timing is 6ms
-	 *
-	 *     Multiple these timings by 4 if under full Rx load
-	 *
-	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
-	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
-	 * this might not finish in our poll time, but we can live with that
-	 * for now.
-	 */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
-	IXGBE_WRITE_FLUSH(hw);
-	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
-		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
-		                   IXGBE_FDIRCTRL_INIT_DONE)
-			break;
-		msec_delay(1);
-	}
-
-	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
-		PMD_INIT_LOG(ERR, "Flow Director poll time exceeded "
-			"during enabling!");
-		return -ETIMEDOUT;
-	}
-	return 0;
-}
-
-/*
- * Set appropriate bits in fdirctrl for: variable reporting levels, moving
- * flexbytes matching field, and drop queue (only for perfect matching mode).
- */
-static inline int
-configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
-{
-	*fdirctrl = 0;
-
-	switch (conf->pballoc) {
-	case RTE_FDIR_PBALLOC_64K:
-		/* 8k - 1 signature filters */
-		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
-		break;
-	case RTE_FDIR_PBALLOC_128K:
-		/* 16k - 1 signature filters */
-		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
-		break;
-	case RTE_FDIR_PBALLOC_256K:
-		/* 32k - 1 signature filters */
-		*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
-		break;
-	default:
-		/* bad value */
-		PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value");
-		return -EINVAL;
-	};
-
-	/* status flags: write hash & swindex in the rx descriptor */
-	switch (conf->status) {
-	case RTE_FDIR_NO_REPORT_STATUS:
-		/* do nothing, default mode */
-		break;
-	case RTE_FDIR_REPORT_STATUS:
-		/* report status when the packet matches a fdir rule */
-		*fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
-		break;
-	case RTE_FDIR_REPORT_STATUS_ALWAYS:
-		/* always report status */
-		*fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS;
-		break;
-	default:
-		/* bad value */
-		PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value");
-		return -EINVAL;
-	};
-
-	*fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) <<
-		     IXGBE_FDIRCTRL_FLEX_SHIFT;
-
-	if (conf->mode == RTE_FDIR_MODE_PERFECT) {
-		*fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
-		*fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
-	}
-	/*
-	 * Continue setup of fdirctrl register bits:
-	 *  Set the maximum length per hash bucket to 0xA filters
-	 *  Send interrupt when 64 filters are left
-	 */
-	*fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
-		    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
-
-	return 0;
-}
-
-/**
- * Reverse the bits in FDIR registers that store 2 x 16 bit masks.
- *
- *  @hi_dword: Bits 31:16 mask to be bit swapped.
- *  @lo_dword: Bits 15:0  mask to be bit swapped.
- *
- *  Flow director uses several registers to store 2 x 16 bit masks with the
- *  bits reversed such as FDIRTCPM, FDIRUDPM. The LS bit of the
- *  mask affects the MS bit/byte of the target. This function reverses the
- *  bits in these masks.
- *  **/
-static inline uint32_t
-reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
-{
-	uint32_t mask = hi_dword << 16;
-	mask |= lo_dword;
-	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
-	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
-	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
-	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
-}
-
-/*
- * This is based on ixgbe_fdir_set_input_mask_82599() in ixgbe/ixgbe_82599.c,
- * but makes use of the rte_fdir_masks structure to see which bits to set.
- */
-static int
-fdir_set_input_mask_82599(struct rte_eth_dev *dev,
-		const struct rte_eth_fdir_masks *input_mask)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_hw_fdir_info *info =
-			IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
-	/*
-	 * mask VM pool and DIPv6 since there are currently not supported
-	 * mask FLEX byte, it will be set in flex_conf
-	 */
-	uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX;
-	uint32_t fdirtcpm;  /* TCP source and destination port masks. */
-	uint32_t fdiripv6m; /* IPv6 source and destination masks. */
-	uint16_t dst_ipv6m = 0;
-	uint16_t src_ipv6m = 0;
-
-	PMD_INIT_FUNC_TRACE();
-
-	/*
-	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
-	 * are zero, then assume a full mask for that field. Also assume that
-	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
-	 * cannot be masked out in this implementation.
-	 */
-	if (input_mask->dst_port_mask == 0 && input_mask->src_port_mask == 0)
-		/* use the L4 protocol mask for raw IPv4/IPv6 traffic */
-		fdirm |= IXGBE_FDIRM_L4P;
-
-	if (input_mask->vlan_tci_mask == 0x0FFF)
-		/* mask VLAN Priority */
-		fdirm |= IXGBE_FDIRM_VLANP;
-	else if (input_mask->vlan_tci_mask == 0xE000)
-		/* mask VLAN ID */
-		fdirm |= IXGBE_FDIRM_VLANID;
-	else if (input_mask->vlan_tci_mask == 0)
-		/* mask VLAN ID and Priority */
-		fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
-	else if (input_mask->vlan_tci_mask != 0xEFFF) {
-		PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
-		return -EINVAL;
-	}
-	info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
-
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
-
-	/* store the TCP/UDP port masks, bit reversed from port layout */
-	fdirtcpm = reverse_fdir_bitmasks(input_mask->dst_port_mask,
-					 input_mask->src_port_mask);
-
-	/* write both the same so that UDP and TCP use the same mask */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
-	info->mask.src_port_mask = input_mask->src_port_mask;
-	info->mask.dst_port_mask = input_mask->dst_port_mask;
-
-	/* Store source and destination IPv4 masks (big-endian) */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, ~(input_mask->ipv4_mask.src_ip));
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, ~(input_mask->ipv4_mask.dst_ip));
-	info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
-	info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
-
-	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
-		/*
-		 * IPv6 mask is only meaningful in signature mode
-		 * Store source and destination IPv6 masks (bit reversed)
-		 */
-		IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
-		IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
-		fdiripv6m = (dst_ipv6m << 16) | src_ipv6m;
-
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
-		info->mask.src_ipv6_mask = src_ipv6m;
-		info->mask.dst_ipv6_mask = dst_ipv6m;
-	}
-
-	return IXGBE_SUCCESS;
-}
-
-/*
- * ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
- * arguments are valid
- */
-static int
-ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
-		const struct rte_eth_fdir_flex_conf *conf)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_hw_fdir_info *info =
-			IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
-	const struct rte_eth_flex_payload_cfg *flex_cfg;
-	const struct rte_eth_fdir_flex_mask *flex_mask;
-	uint32_t fdirctrl, fdirm;
-	uint16_t flexbytes = 0;
-	uint16_t i;
-
-	fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
-	fdirm = IXGBE_READ_REG(hw, IXGBE_FDIRM);
-
-	if (conf == NULL) {
-		PMD_DRV_LOG(INFO, "NULL pointer.");
-		return -EINVAL;
-	}
-
-	for (i = 0; i < conf->nb_payloads; i++) {
-		flex_cfg = &conf->flex_set[i];
-		if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) {
-			PMD_DRV_LOG(ERR, "unsupported payload type.");
-			return -EINVAL;
-		}
-		if (((flex_cfg->src_offset[0] & 0x1) == 0) &&
-		    (flex_cfg->src_offset[1] == flex_cfg->src_offset[0] + 1) &&
-		    (flex_cfg->src_offset[0] <= IXGBE_MAX_FLX_SOURCE_OFF)) {
-			fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK;
-			fdirctrl |= (flex_cfg->src_offset[0] / sizeof(uint16_t)) <<
-					IXGBE_FDIRCTRL_FLEX_SHIFT;
-		} else {
-			PMD_DRV_LOG(ERR, "invalid flexbytes arguments.");
-			return -EINVAL;
-		}
-	}
-
-	for (i = 0; i < conf->nb_flexmasks; i++) {
-		flex_mask = &conf->flex_mask[i];
-		if (flex_mask->flow_type != RTE_ETH_FLOW_UNKNOWN) {
-			PMD_DRV_LOG(ERR, "flexmask should be set globally.");
-			return -EINVAL;
-		}
-		flexbytes = (uint16_t)(((flex_mask->mask[0] << 8) & 0xFF00) |
-					((flex_mask->mask[1]) & 0xFF));
-		if (flexbytes == UINT16_MAX)
-			fdirm &= ~IXGBE_FDIRM_FLEX;
-		else if (flexbytes != 0) {
-			/* IXGBE_FDIRM_FLEX is set by default when set mask */
-			PMD_DRV_LOG(ERR, " invalid flexbytes mask arguments.");
-			return -EINVAL;
-		}
-	}
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
-	info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0;
-	info->flex_bytes_offset = (uint8_t)((fdirctrl &
-					    IXGBE_FDIRCTRL_FLEX_MASK) >>
-					    IXGBE_FDIRCTRL_FLEX_SHIFT);
-	return 0;
-}
-
-int
-ixgbe_fdir_configure(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	int err;
-	uint32_t fdirctrl, pbsize;
-	int i;
-
-	PMD_INIT_FUNC_TRACE();
-
-	if (hw->mac.type != ixgbe_mac_82599EB &&
-		hw->mac.type != ixgbe_mac_X540 &&
-		hw->mac.type != ixgbe_mac_X550 &&
-		hw->mac.type != ixgbe_mac_X550EM_x)
-		return -ENOSYS;
-
-	err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
-	if (err)
-		return err;
-
-	/*
-	 * Before enabling Flow Director, the Rx Packet Buffer size
-	 * must be reduced.  The new value is the current size minus
-	 * flow director memory usage size.
-	 */
-	pbsize = (1 << (PBALLOC_SIZE_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
-	IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
-	    (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
-
-	/*
-	 * The defaults in the HW for RX PB 1-7 are not zero and so should be
-	 * intialized to zero for non DCB mode otherwise actual total RX PB
-	 * would be bigger than programmed and filter space would run into
-	 * the PB 0 region.
-	 */
-	for (i = 1; i < 8; i++)
-		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
-
-	err = fdir_set_input_mask_82599(dev, &dev->data->dev_conf.fdir_conf.mask);
-	if (err < 0) {
-		PMD_INIT_LOG(ERR, " Error on setting FD mask");
-		return err;
-	}
-	err = ixgbe_set_fdir_flex_conf(dev,
-		&dev->data->dev_conf.fdir_conf.flex_conf);
-	if (err < 0) {
-		PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments.");
-		return err;
-	}
-
-	err = fdir_enable_82599(hw, fdirctrl);
-	if (err < 0) {
-		PMD_INIT_LOG(ERR, " Error on enabling FD.");
-		return err;
-	}
-	return 0;
-}
-
-/*
- * Convert DPDK rte_eth_fdir_filter struct to ixgbe_atr_input union that is used
- * by the IXGBE driver code.
- */
-static int
-ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
-		union ixgbe_atr_input *input)
-{
-	input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci;
-	input->formatted.flex_bytes = (uint16_t)(
-		(fdir_filter->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
-		(fdir_filter->input.flow_ext.flexbytes[0] & 0xFF));
-
-	switch (fdir_filter->input.flow_type) {
-	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
-		input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
-		break;
-	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
-		input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
-		break;
-	case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
-		input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
-		break;
-	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
-		input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
-		break;
-	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
-		input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV6;
-		break;
-	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
-		input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
-		break;
-	case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
-		input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV6;
-		break;
-	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
-		input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6;
-		break;
-	default:
-		PMD_DRV_LOG(ERR, " Error on flow_type input");
-		return -EINVAL;
-	}
-
-	switch (fdir_filter->input.flow_type) {
-	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
-	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
-		input->formatted.src_port =
-			fdir_filter->input.flow.udp4_flow.src_port;
-		input->formatted.dst_port =
-			fdir_filter->input.flow.udp4_flow.dst_port;
-	/*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
-	case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
-	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
-		input->formatted.src_ip[0] =
-			fdir_filter->input.flow.ip4_flow.src_ip;
-		input->formatted.dst_ip[0] =
-			fdir_filter->input.flow.ip4_flow.dst_ip;
-		break;
-
-	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
-	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
-		input->formatted.src_port =
-			fdir_filter->input.flow.udp6_flow.src_port;
-		input->formatted.dst_port =
-			fdir_filter->input.flow.udp6_flow.dst_port;
-	/*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
-	case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
-	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
-		rte_memcpy(input->formatted.src_ip,
-			   fdir_filter->input.flow.ipv6_flow.src_ip,
-			   sizeof(input->formatted.src_ip));
-		rte_memcpy(input->formatted.dst_ip,
-			   fdir_filter->input.flow.ipv6_flow.dst_ip,
-			   sizeof(input->formatted.dst_ip));
-		break;
-	default:
-		PMD_DRV_LOG(ERR, " Error on flow_type input");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-/*
- * The below function is taken from the FreeBSD IXGBE drivers release
- * 2.3.8. The only change is not to mask hash_result with IXGBE_ATR_HASH_MASK
- * before returning, as the signature hash can use 16bits.
- *
- * The newer driver has optimised functions for calculating bucket and
- * signature hashes. However they don't support IPv6 type packets for signature
- * filters so are not used here.
- *
- * Note that the bkt_hash field in the ixgbe_atr_input structure is also never
- * set.
- *
- * Compute the hashes for SW ATR
- *  @stream: input bitstream to compute the hash on
- *  @key: 32-bit hash key
- **/
-static uint32_t
-ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
-				 uint32_t key)
-{
-	/*
-	 * The algorithm is as follows:
-	 *    Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
-	 *    where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
-	 *    and A[n] x B[n] is bitwise AND between same length strings
-	 *
-	 *    K[n] is 16 bits, defined as:
-	 *       for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
-	 *       for n modulo 32 < 15, K[n] =
-	 *             K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
-	 *
-	 *    S[n] is 16 bits, defined as:
-	 *       for n >= 15, S[n] = S[n:n - 15]
-	 *       for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
-	 *
-	 *    To simplify for programming, the algorithm is implemented
-	 *    in software this way:
-	 *
-	 *    key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
-	 *
-	 *    for (i = 0; i < 352; i+=32)
-	 *        hi_hash_dword[31:0] ^= Stream[(i+31):i];
-	 *
-	 *    lo_hash_dword[15:0]  ^= Stream[15:0];
-	 *    lo_hash_dword[15:0]  ^= hi_hash_dword[31:16];
-	 *    lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
-	 *
-	 *    hi_hash_dword[31:0]  ^= Stream[351:320];
-	 *
-	 *    if(key[0])
-	 *        hash[15:0] ^= Stream[15:0];
-	 *
-	 *    for (i = 0; i < 16; i++) {
-	 *        if (key[i])
-	 *            hash[15:0] ^= lo_hash_dword[(i+15):i];
-	 *        if (key[i + 16])
-	 *            hash[15:0] ^= hi_hash_dword[(i+15):i];
-	 *    }
-	 *
-	 */
-	__be32 common_hash_dword = 0;
-	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
-	u32 hash_result = 0;
-	u8 i;
-
-	/* record the flow_vm_vlan bits as they are a key part to the hash */
-	flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]);
-
-	/* generate common hash dword */
-	for (i = 1; i <= 13; i++)
-		common_hash_dword ^= atr_input->dword_stream[i];
-
-	hi_hash_dword = IXGBE_NTOHL(common_hash_dword);
-
-	/* low dword is word swapped version of common */
-	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
-
-	/* apply flow ID/VM pool/VLAN ID bits to hash words */
-	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
-
-	/* Process bits 0 and 16 */
-	if (key & 0x0001) hash_result ^= lo_hash_dword;
-	if (key & 0x00010000) hash_result ^= hi_hash_dword;
-
-	/*
-	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
-	 * delay this because bit 0 of the stream should not be processed
-	 * so we do not add the vlan until after bit 0 was processed
-	 */
-	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
-
-
-	/* process the remaining 30 bits in the key 2 bits at a time */
-	for (i = 15; i; i-- ) {
-		if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
-		if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
-	}
-
-	return hash_result;
-}
-
-static uint32_t
-atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
-{
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
-		return ixgbe_atr_compute_hash_82599(input,
-				IXGBE_ATR_BUCKET_HASH_KEY) &
-				PERFECT_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
-		return ixgbe_atr_compute_hash_82599(input,
-				IXGBE_ATR_BUCKET_HASH_KEY) &
-				PERFECT_BUCKET_128KB_HASH_MASK;
-	else
-		return ixgbe_atr_compute_hash_82599(input,
-				IXGBE_ATR_BUCKET_HASH_KEY) &
-				PERFECT_BUCKET_64KB_HASH_MASK;
-}
-
-/**
- * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
- * @hw: pointer to hardware structure
- */
-static inline int
-ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd)
-{
-	int i;
-
-	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
-		*fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
-		if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
-			return 0;
-		rte_delay_us(IXGBE_FDIRCMD_CMD_INTERVAL_US);
-	}
-
-	return -ETIMEDOUT;
-}
-
-/*
- * Calculate the hash value needed for signature-match filters. In the FreeBSD
- * driver, this is done by the optimised function
- * ixgbe_atr_compute_sig_hash_82599(). However that can't be used here as it
- * doesn't support calculating a hash for an IPv6 filter.
- */
-static uint32_t
-atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
-		enum rte_fdir_pballoc_type pballoc)
-{
-	uint32_t bucket_hash, sig_hash;
-
-	if (pballoc == RTE_FDIR_PBALLOC_256K)
-		bucket_hash = ixgbe_atr_compute_hash_82599(input,
-				IXGBE_ATR_BUCKET_HASH_KEY) &
-				SIG_BUCKET_256KB_HASH_MASK;
-	else if (pballoc == RTE_FDIR_PBALLOC_128K)
-		bucket_hash = ixgbe_atr_compute_hash_82599(input,
-				IXGBE_ATR_BUCKET_HASH_KEY) &
-				SIG_BUCKET_128KB_HASH_MASK;
-	else
-		bucket_hash = ixgbe_atr_compute_hash_82599(input,
-				IXGBE_ATR_BUCKET_HASH_KEY) &
-				SIG_BUCKET_64KB_HASH_MASK;
-
-	sig_hash = ixgbe_atr_compute_hash_82599(input,
-			IXGBE_ATR_SIGNATURE_HASH_KEY);
-
-	return (sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT) | bucket_hash;
-}
-
-/*
- * This is based on ixgbe_fdir_write_perfect_filter_82599() in
- * ixgbe/ixgbe_82599.c, with the ability to set extra flags in FDIRCMD register
- * added, and IPv6 support also added. The hash value is also pre-calculated
- * as the pballoc value is needed to do it.
- */
-static int
-fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
-			union ixgbe_atr_input *input, uint8_t queue,
-			uint32_t fdircmd, uint32_t fdirhash)
-{
-	uint32_t fdirport, fdirvlan;
-	int err = 0;
-
-	/* record the IPv4 address (big-endian) */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
-
-	/* record source and destination port (little-endian)*/
-	fdirport = IXGBE_NTOHS(input->formatted.dst_port);
-	fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
-	fdirport |= IXGBE_NTOHS(input->formatted.src_port);
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
-
-	/* record vlan (little-endian) and flex_bytes(big-endian) */
-	fdirvlan = input->formatted.flex_bytes;
-	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
-	fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
-
-	/* configure FDIRHASH register */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
-
-	/*
-	 * flush all previous writes to make certain registers are
-	 * programmed prior to issuing the command
-	 */
-	IXGBE_WRITE_FLUSH(hw);
-
-	/* configure FDIRCMD register */
-	fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
-		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
-	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
-	fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
-	fdircmd |= (uint32_t)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
-
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
-
-	PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
-
-	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
-	if (err < 0)
-		PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
-
-	return err;
-}
-
-/**
- * This function is based on ixgbe_atr_add_signature_filter_82599() in
- * ixgbe/ixgbe_82599.c, but uses a pre-calculated hash value. It also supports
- * setting extra fields in the FDIRCMD register, and removes the code that was
- * verifying the flow_type field. According to the documentation, a flow type of
- * 00 (i.e. not TCP, UDP, or SCTP) is not supported, however it appears to
- * work ok...
- *
- *  Adds a signature hash filter
- *  @hw: pointer to hardware structure
- *  @input: unique input dword
- *  @queue: queue index to direct traffic to
- *  @fdircmd: any extra flags to set in fdircmd register
- *  @fdirhash: pre-calculated hash value for the filter
- **/
-static int
-fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
-		union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
-		uint32_t fdirhash)
-{
-	int err = 0;
-
-	PMD_INIT_FUNC_TRACE();
-
-	/* configure FDIRCMD register */
-	fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
-	          IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
-	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
-	fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
-
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
-
-	PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
-
-	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
-	if (err < 0)
-		PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
-
-	return err;
-}
-
-/*
- * This is based on ixgbe_fdir_erase_perfect_filter_82599() in
- * ixgbe/ixgbe_82599.c. It is modified to take in the hash as a parameter so
- * that it can be used for removing signature and perfect filters.
- */
-static int
-fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash)
-{
-	uint32_t fdircmd = 0;
-	int err = 0;
-
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
-
-	/* flush hash to HW */
-	IXGBE_WRITE_FLUSH(hw);
-
-	/* Query if filter is present */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
-
-	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
-	if (err < 0) {
-		PMD_INIT_LOG(ERR, "Timeout querying for flow director filter.");
-		return err;
-	}
-
-	/* if filter exists in hardware then remove it */
-	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
-		IXGBE_WRITE_FLUSH(hw);
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
-				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
-	}
-	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
-	if (err < 0)
-		PMD_INIT_LOG(ERR, "Timeout erasing flow director filter.");
-	return err;
-
-}
-
-/*
- * ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
- * @dev: pointer to the structure rte_eth_dev
- * @fdir_filter: fdir filter entry
- * @del: 1 - delete, 0 - add
- * @update: 1 - update
- */
-static int
-ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
-			      const struct rte_eth_fdir_filter *fdir_filter,
-			      bool del,
-			      bool update)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t fdircmd_flags;
-	uint32_t fdirhash;
-	union ixgbe_atr_input input;
-	uint8_t queue;
-	bool is_perfect = FALSE;
-	int err;
-
-	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_NONE)
-		return -ENOTSUP;
-
-	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
-		is_perfect = TRUE;
-
-	memset(&input, 0, sizeof(input));
-
-	err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input);
-	if (err)
-		return err;
-
-	if (is_perfect) {
-		if (input.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
-			PMD_DRV_LOG(ERR, "IPv6 is not supported in"
-					 " perfect mode!");
-			return -ENOTSUP;
-		}
-		fdirhash = atr_compute_perfect_hash_82599(&input,
-				dev->data->dev_conf.fdir_conf.pballoc);
-		fdirhash |= fdir_filter->soft_id <<
-				IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
-	} else
-		fdirhash = atr_compute_sig_hash_82599(&input,
-				dev->data->dev_conf.fdir_conf.pballoc);
-
-	if (del) {
-		err = fdir_erase_filter_82599(hw, fdirhash);
-		if (err < 0)
-			PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!");
-		else
-			PMD_DRV_LOG(DEBUG, "Success to delete FDIR filter!");
-		return err;
-	}
-	/* add or update an fdir filter*/
-	fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
-	if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT) {
-		if (is_perfect) {
-			queue = dev->data->dev_conf.fdir_conf.drop_queue;
-			fdircmd_flags |= IXGBE_FDIRCMD_DROP;
-		} else {
-			PMD_DRV_LOG(ERR, "Drop option is not supported in"
-				" signature mode.");
-			return -EINVAL;
-		}
-	} else if (fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
-		queue = (uint8_t)fdir_filter->action.rx_queue;
-	else
-		return -EINVAL;
-
-	if (is_perfect) {
-		err = fdir_write_perfect_filter_82599(hw, &input, queue,
-				fdircmd_flags, fdirhash);
-	} else {
-		err = fdir_add_signature_filter_82599(hw, &input, queue,
-				fdircmd_flags, fdirhash);
-	}
-	if (err < 0)
-		PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
-	else
-		PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
-
-	return err;
-}
-
-static int
-ixgbe_fdir_flush(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_hw_fdir_info *info =
-			IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
-	int ret;
-
-	ret = ixgbe_reinit_fdir_tables_82599(hw);
-	if (ret < 0) {
-		PMD_INIT_LOG(ERR, "Failed to re-initialize FD table.");
-		return ret;
-	}
-
-	info->f_add = 0;
-	info->f_remove = 0;
-	info->add = 0;
-	info->remove = 0;
-
-	return ret;
-}
-
-#define FDIRENTRIES_NUM_SHIFT 10
-static void
-ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_hw_fdir_info *info =
-			IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
-	uint32_t fdirctrl, max_num;
-	uint8_t offset;
-
-	fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
-	offset = ((fdirctrl & IXGBE_FDIRCTRL_FLEX_MASK) >>
-			IXGBE_FDIRCTRL_FLEX_SHIFT) * sizeof(uint16_t);
-
-	fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
-	max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
-			(fdirctrl & FDIRCTRL_PBALLOC_MASK)));
-	if (fdir_info->mode == RTE_FDIR_MODE_PERFECT)
-		fdir_info->guarant_spc = max_num;
-	else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE)
-		fdir_info->guarant_spc = max_num * 4;
-
-	fdir_info->mask.vlan_tci_mask = info->mask.vlan_tci_mask;
-	fdir_info->mask.ipv4_mask.src_ip = info->mask.src_ipv4_mask;
-	fdir_info->mask.ipv4_mask.dst_ip = info->mask.dst_ipv4_mask;
-	IPV6_MASK_TO_ADDR(info->mask.src_ipv6_mask,
-			fdir_info->mask.ipv6_mask.src_ip);
-	IPV6_MASK_TO_ADDR(info->mask.dst_ipv6_mask,
-			fdir_info->mask.ipv6_mask.dst_ip);
-	fdir_info->mask.src_port_mask = info->mask.src_port_mask;
-	fdir_info->mask.dst_port_mask = info->mask.dst_port_mask;
-	fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN;
-	fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
-	fdir_info->flex_payload_unit = sizeof(uint16_t);
-	fdir_info->max_flex_payload_segment_num = 1;
-	fdir_info->flex_payload_limit = 62;
-	fdir_info->flex_conf.nb_payloads = 1;
-	fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD;
-	fdir_info->flex_conf.flex_set[0].src_offset[0] = offset;
-	fdir_info->flex_conf.flex_set[0].src_offset[1] = offset + 1;
-	fdir_info->flex_conf.nb_flexmasks = 1;
-	fdir_info->flex_conf.flex_mask[0].flow_type = RTE_ETH_FLOW_UNKNOWN;
-	fdir_info->flex_conf.flex_mask[0].mask[0] =
-			(uint8_t)(info->mask.flex_bytes_mask & 0x00FF);
-	fdir_info->flex_conf.flex_mask[0].mask[1] =
-			(uint8_t)((info->mask.flex_bytes_mask & 0xFF00) >> 8);
-}
-
-static void
-ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_stats)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_hw_fdir_info *info =
-			IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
-	uint32_t reg, max_num;
-
-	/* Get the information from registers */
-	reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
-	info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >>
-					IXGBE_FDIRFREE_COLL_SHIFT);
-	info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >>
-				   IXGBE_FDIRFREE_FREE_SHIFT);
-
-	reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
-	info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >>
-				      IXGBE_FDIRLEN_MAXHASH_SHIFT);
-	info->maxlen  = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >>
-				     IXGBE_FDIRLEN_MAXLEN_SHIFT);
-
-	reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
-	info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >>
-		IXGBE_FDIRUSTAT_REMOVE_SHIFT;
-	info->add += (reg & IXGBE_FDIRUSTAT_ADD_MASK) >>
-		IXGBE_FDIRUSTAT_ADD_SHIFT;
-
-	reg = IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT) & 0xFFFF;
-	info->f_remove += (reg & IXGBE_FDIRFSTAT_FREMOVE_MASK) >>
-		IXGBE_FDIRFSTAT_FREMOVE_SHIFT;
-	info->f_add += (reg & IXGBE_FDIRFSTAT_FADD_MASK) >>
-		IXGBE_FDIRFSTAT_FADD_SHIFT;
-
-	/*  Copy the new information in the fdir parameter */
-	fdir_stats->collision = info->collision;
-	fdir_stats->free = info->free;
-	fdir_stats->maxhash = info->maxhash;
-	fdir_stats->maxlen = info->maxlen;
-	fdir_stats->remove = info->remove;
-	fdir_stats->add = info->add;
-	fdir_stats->f_remove = info->f_remove;
-	fdir_stats->f_add = info->f_add;
-
-	reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
-	max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
-			(reg & FDIRCTRL_PBALLOC_MASK)));
-	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
-			fdir_stats->guarant_cnt = max_num - fdir_stats->free;
-	else if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE)
-		fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
-
-}
-
-/*
- * ixgbe_fdir_ctrl_func - deal with all operations on flow director.
- * @dev: pointer to the structure rte_eth_dev
- * @filter_op:operation will be taken
- * @arg: a pointer to specific structure corresponding to the filter_op
- */
-int
-ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
-			enum rte_filter_op filter_op, void *arg)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	int ret = 0;
-
-	if (hw->mac.type != ixgbe_mac_82599EB &&
-		hw->mac.type != ixgbe_mac_X540 &&
-		hw->mac.type != ixgbe_mac_X550 &&
-		hw->mac.type != ixgbe_mac_X550EM_x)
-		return -ENOTSUP;
-
-	if (filter_op == RTE_ETH_FILTER_NOP)
-		return 0;
-
-	if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
-		return -EINVAL;
-
-	switch (filter_op) {
-	case RTE_ETH_FILTER_ADD:
-		ret = ixgbe_add_del_fdir_filter(dev,
-			(struct rte_eth_fdir_filter *)arg, FALSE, FALSE);
-		break;
-	case RTE_ETH_FILTER_UPDATE:
-		ret = ixgbe_add_del_fdir_filter(dev,
-			(struct rte_eth_fdir_filter *)arg, FALSE, TRUE);
-		break;
-	case RTE_ETH_FILTER_DELETE:
-		ret = ixgbe_add_del_fdir_filter(dev,
-			(struct rte_eth_fdir_filter *)arg, TRUE, FALSE);
-		break;
-	case RTE_ETH_FILTER_FLUSH:
-		ret = ixgbe_fdir_flush(dev);
-		break;
-	case RTE_ETH_FILTER_INFO:
-		ixgbe_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
-		break;
-	case RTE_ETH_FILTER_STATS:
-		ixgbe_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
-		break;
-	default:
-		PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
-		ret = -EINVAL;
-		break;
-	}
-	return ret;
-}
diff --git a/lib/librte_pmd_ixgbe/ixgbe_logs.h b/lib/librte_pmd_ixgbe/ixgbe_logs.h
deleted file mode 100644
index 572e030..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe_logs.h
+++ /dev/null
@@ -1,78 +0,0 @@ 
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _IXGBE_LOGS_H_
-#define _IXGBE_LOGS_H_
-
-#define PMD_INIT_LOG(level, fmt, args...) \
-	rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
-		"PMD: %s(): " fmt "\n", __func__, ##args)
-
-#ifdef RTE_LIBRTE_IXGBE_DEBUG_INIT
-#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_FUNC_TRACE() do { } while(0)
-#endif
-
-#ifdef RTE_LIBRTE_IXGBE_DEBUG_RX
-#define PMD_RX_LOG(level, fmt, args...) \
-	RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
-#endif
-
-#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX
-#define PMD_TX_LOG(level, fmt, args...) \
-	RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
-#endif
-
-#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE
-#define PMD_TX_FREE_LOG(level, fmt, args...) \
-	RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
-#endif
-
-#ifdef RTE_LIBRTE_IXGBE_DEBUG_DRIVER
-#define PMD_DRV_LOG_RAW(level, fmt, args...) \
-	RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-#endif
-
-#define PMD_DRV_LOG(level, fmt, args...) \
-	PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
-
-#endif /* _IXGBE_LOGS_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe_pf.c b/lib/librte_pmd_ixgbe/ixgbe_pf.c
deleted file mode 100644
index dbda9b5..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe_pf.c
+++ /dev/null
@@ -1,629 +0,0 @@ 
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <errno.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <stdarg.h>
-#include <inttypes.h>
-
-#include <rte_interrupts.h>
-#include <rte_log.h>
-#include <rte_debug.h>
-#include <rte_eal.h>
-#include <rte_ether.h>
-#include <rte_ethdev.h>
-#include <rte_memcpy.h>
-#include <rte_malloc.h>
-#include <rte_random.h>
-
-#include "ixgbe/ixgbe_common.h"
-#include "ixgbe_ethdev.h"
-
-#define IXGBE_MAX_VFTA     (128)
-#define IXGBE_VF_MSG_SIZE_DEFAULT 1
-#define IXGBE_VF_GET_QUEUE_MSG_SIZE 5
-
-static inline uint16_t
-dev_num_vf(struct rte_eth_dev *eth_dev)
-{
-	return eth_dev->pci_dev->max_vfs;
-}
-
-static inline
-int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
-{
-	unsigned char vf_mac_addr[ETHER_ADDR_LEN];
-	struct ixgbe_vf_info *vfinfo =
-		*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
-	uint16_t vfn;
-
-	for (vfn = 0; vfn < vf_num; vfn++) {
-		eth_random_addr(vf_mac_addr);
-		/* keep the random address as default */
-		memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
-			   ETHER_ADDR_LEN);
-	}
-
-	return 0;
-}
-
-static inline int
-ixgbe_mb_intr_setup(struct rte_eth_dev *dev)
-{
-	struct ixgbe_interrupt *intr =
-		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-
-	intr->mask |= IXGBE_EICR_MAILBOX;
-
-	return 0;
-}
-
-void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
-{
-	struct ixgbe_vf_info **vfinfo =
-		IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
-	struct ixgbe_mirror_info *mirror_info =
-        IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private);
-	struct ixgbe_uta_info *uta_info =
-        IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
-	uint16_t vf_num;
-	uint8_t nb_queue;
-
-	PMD_INIT_FUNC_TRACE();
-
-	RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
-	if (0 == (vf_num = dev_num_vf(eth_dev)))
-		return;
-
-	*vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0);
-	if (*vfinfo == NULL)
-		rte_panic("Cannot allocate memory for private VF data\n");
-
-	memset(mirror_info,0,sizeof(struct ixgbe_mirror_info));
-	memset(uta_info,0,sizeof(struct ixgbe_uta_info));
-	hw->mac.mc_filter_type = 0;
-
-	if (vf_num >= ETH_32_POOLS) {
-		nb_queue = 2;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
-	} else if (vf_num >= ETH_16_POOLS) {
-		nb_queue = 4;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
-	} else {
-		nb_queue = 8;
-		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
-	}
-
-	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
-	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
-	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
-
-	ixgbe_vf_perm_addr_gen(eth_dev, vf_num);
-
-	/* init_mailbox_params */
-	hw->mbx.ops.init_params(hw);
-
-	/* set mb interrupt mask */
-	ixgbe_mb_intr_setup(eth_dev);
-
-	return;
-}
-
-int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
-{
-	uint32_t vtctl, fcrth;
-	uint32_t vfre_slot, vfre_offset;
-	uint16_t vf_num;
-	const uint8_t VFRE_SHIFT = 5;  /* VFRE 32 bits per slot */
-	const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
-	uint32_t gpie, gcr_ext;
-	uint32_t vlanctrl;
-	int i;
-
-	if (0 == (vf_num = dev_num_vf(eth_dev)))
-		return -1;
-
-	/* enable VMDq and set the default pool for PF */
-	vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
-	vtctl |= IXGBE_VMD_CTL_VMDQ_EN;
-	vtctl &= ~IXGBE_VT_CTL_POOL_MASK;
-	vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx
-		<< IXGBE_VT_CTL_POOL_SHIFT;
-	vtctl |= IXGBE_VT_CTL_REPLEN;
-	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
-
-	vfre_offset = vf_num & VFRE_MASK;
-	vfre_slot = (vf_num >> VFRE_SHIFT) > 0 ? 1 : 0;
-
-	/* Enable pools reserved to PF only */
-	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot), (~0) << vfre_offset);
-	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot ^ 1), vfre_slot - 1);
-	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot), (~0) << vfre_offset);
-	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot ^ 1), vfre_slot - 1);
-
-	/* PFDMA Tx General Switch Control Enables VMDQ loopback */
-	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
-
-	/* clear VMDq map to perment rar 0 */
-	hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
-
-	/* clear VMDq map to scan rar 127 */
-	IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(hw->mac.num_rar_entries), 0);
-	IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(hw->mac.num_rar_entries), 0);
-
-	/* set VMDq map to default PF pool */
-	hw->mac.ops.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
-
-	/*
-	 * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
-	 */
-	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
-	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
-
-	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
-	gpie &= ~IXGBE_GPIE_VTMODE_MASK;
-	gpie |= IXGBE_GPIE_MSIX_MODE;
-
-	switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
-	case ETH_64_POOLS:
-		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
-		gpie |= IXGBE_GPIE_VTMODE_64;
-		break;
-	case ETH_32_POOLS:
-		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
-		gpie |= IXGBE_GPIE_VTMODE_32;
-		break;
-	case ETH_16_POOLS:
-		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_16;
-		gpie |= IXGBE_GPIE_VTMODE_16;
-		break;
-	}
-
-	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
-        IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
-
-        /*
-	 * enable vlan filtering and allow all vlan tags through
-	 */
-        vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-        vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
-        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
-
-        /* VFTA - enable all vlan filters */
-        for (i = 0; i < IXGBE_MAX_VFTA; i++) {
-                IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
-        }
-
-	/* Enable MAC Anti-Spoofing */
-	hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);
-
-	/* set flow control threshold to max to avoid tx switch hang */
-	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
-		fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
-		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
-	}
-
-	return 0;
-}
-
-static void
-set_rx_mode(struct rte_eth_dev *dev)
-{
-	struct rte_eth_dev_data *dev_data =
-		(struct rte_eth_dev_data*)dev->data->dev_private;
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
-	uint16_t vfn = dev_num_vf(dev);
-
-	/* Check for Promiscuous and All Multicast modes */
-	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-
-	/* set all bits that we expect to always be set */
-	fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
-	fctrl |= IXGBE_FCTRL_BAM;
-
-	/* clear the bits we are changing the status of */
-	fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-
-	if (dev_data->promiscuous) {
-		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-		vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
-	} else {
-		if (dev_data->all_multicast) {
-			fctrl |= IXGBE_FCTRL_MPE;
-			vmolr |= IXGBE_VMOLR_MPE;
-		} else {
-			vmolr |= IXGBE_VMOLR_ROMPE;
-		}
-	}
-
-	if (hw->mac.type != ixgbe_mac_82598EB) {
-		vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(vfn)) &
-			 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
-			   IXGBE_VMOLR_ROPE);
-		IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vfn), vmolr);
-	}
-
-	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-
-	if (dev->data->dev_conf.rxmode.hw_vlan_strip)
-		ixgbe_vlan_hw_strip_enable_all(dev);
-	else
-		ixgbe_vlan_hw_strip_disable_all(dev);
-}
-
-static inline void
-ixgbe_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_vf_info *vfinfo =
-		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
-	int rar_entry = hw->mac.num_rar_entries - (vf + 1);
-	uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
-
-	vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_ROMPE |
-			IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
-	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
-
-	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
-
-	/* reset multicast table array for vf */
-	vfinfo[vf].num_vf_mc_hashes = 0;
-
-	/* reset rx mode */
-	set_rx_mode(dev);
-
-	hw->mac.ops.clear_rar(hw, rar_entry);
-}
-
-static inline void
-ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t reg;
-	uint32_t reg_offset, vf_shift;
-	const uint8_t VFRE_SHIFT = 5;  /* VFRE 32 bits per slot */
-	const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
-
-	vf_shift = vf & VFRE_MASK;
-	reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0;
-
-	/* enable transmit and receive for vf */
-	reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
-	reg |= (reg | (1 << vf_shift));
-	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
-
-	reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
-	reg |= (reg | (1 << vf_shift));
-	IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
-
-	/* Enable counting of spoofed packets in the SSVPC register */
-	reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
-	reg |= (1 << vf_shift);
-	IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
-
-	ixgbe_vf_reset_event(dev, vf);
-}
-
-static int
-ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_vf_info *vfinfo =
-		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
-	unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses;
-	int rar_entry = hw->mac.num_rar_entries - (vf + 1);
-	uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
-
-	ixgbe_vf_reset_msg(dev, vf);
-
-	hw->mac.ops.set_rar(hw, rar_entry, vf_mac, vf, IXGBE_RAH_AV);
-
-	/* reply to reset with ack and vf mac address */
-	msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
-	rte_memcpy(new_mac, vf_mac, ETHER_ADDR_LEN);
-	/*
-	 * Piggyback the multicast filter type so VF can compute the
-	 * correct vectors
-	 */
-	msgbuf[3] = hw->mac.mc_filter_type;
-	ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
-
-	return 0;
-}
-
-static int
-ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_vf_info *vfinfo =
-		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
-	int rar_entry = hw->mac.num_rar_entries - (vf + 1);
-	uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
-
-	if (is_valid_assigned_ether_addr((struct ether_addr*)new_mac)) {
-		rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
-		return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV);
-	}
-	return -1;
-}
-
-static int
-ixgbe_vf_set_multicast(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_vf_info *vfinfo =
-		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
-	int nb_entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
-		IXGBE_VT_MSGINFO_SHIFT;
-	uint16_t *hash_list = (uint16_t *)&msgbuf[1];
-	uint32_t mta_idx;
-	uint32_t mta_shift;
-	const uint32_t IXGBE_MTA_INDEX_MASK = 0x7F;
-	const uint32_t IXGBE_MTA_BIT_SHIFT = 5;
-	const uint32_t IXGBE_MTA_BIT_MASK = (0x1 << IXGBE_MTA_BIT_SHIFT) - 1;
-	uint32_t reg_val;
-	int i;
-
-	/* only so many hash values supported */
-	nb_entries = RTE_MIN(nb_entries, IXGBE_MAX_VF_MC_ENTRIES);
-
-	/* store the mc entries  */
-	vfinfo->num_vf_mc_hashes = (uint16_t)nb_entries;
-	for (i = 0; i < nb_entries; i++) {
-		vfinfo->vf_mc_hashes[i] = hash_list[i];
-	}
-
-	for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
-		mta_idx = (vfinfo->vf_mc_hashes[i] >> IXGBE_MTA_BIT_SHIFT)
-				& IXGBE_MTA_INDEX_MASK;
-		mta_shift = vfinfo->vf_mc_hashes[i] & IXGBE_MTA_BIT_MASK;
-		reg_val = IXGBE_READ_REG(hw, IXGBE_MTA(mta_idx));
-		reg_val |= (1 << mta_shift);
-		IXGBE_WRITE_REG(hw, IXGBE_MTA(mta_idx), reg_val);
-	}
-
-	return 0;
-}
-
-static int
-ixgbe_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
-{
-	int add, vid;
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_vf_info *vfinfo =
-		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
-
-	add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
-		>> IXGBE_VT_MSGINFO_SHIFT;
-	vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
-
-	if (add)
-		vfinfo[vf].vlan_count++;
-	else if (vfinfo[vf].vlan_count)
-		vfinfo[vf].vlan_count--;
-	return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add);
-}
-
-static int
-ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t new_mtu = msgbuf[1];
-	uint32_t max_frs;
-	int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
-
-	/* X540 and X550 support jumbo frames in IOV mode */
-	if (hw->mac.type != ixgbe_mac_X540 &&
-		hw->mac.type != ixgbe_mac_X550 &&
-		hw->mac.type != ixgbe_mac_X550EM_x)
-		return -1;
-
-	if ((max_frame < ETHER_MIN_LEN) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
-		return -1;
-
-	max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
-		   IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
-	if (max_frs < new_mtu) {
-		max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
-		IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
-	}
-
-	return 0;
-}
-
-static int
-ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
-{
-	uint32_t api_version = msgbuf[1];
-	struct ixgbe_vf_info *vfinfo =
-		*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
-
-	switch (api_version) {
-	case ixgbe_mbox_api_10:
-	case ixgbe_mbox_api_11:
-		vfinfo[vf].api_version = (uint8_t)api_version;
-		return 0;
-	default:
-		break;
-	}
-
-	RTE_LOG(ERR, PMD, "Negotiate invalid api version %u from VF %d\n",
-		api_version, vf);
-
-	return -1;
-}
-
-static int
-ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
-{
-	struct ixgbe_vf_info *vfinfo =
-		*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
-	uint32_t default_q = vf * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
-
-	/* Verify if the PF supports the mbox APIs version or not */
-	switch (vfinfo[vf].api_version) {
-	case ixgbe_mbox_api_20:
-	case ixgbe_mbox_api_11:
-		break;
-	default:
-		return -1;
-	}
-
-	/* Notify VF of Rx and Tx queue number */
-	msgbuf[IXGBE_VF_RX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
-	msgbuf[IXGBE_VF_TX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
-
-	/* Notify VF of default queue */
-	msgbuf[IXGBE_VF_DEF_QUEUE] = default_q;
-
-	/*
-	 * FIX ME if it needs fill msgbuf[IXGBE_VF_TRANS_VLAN]
-	 * for VLAN strip or VMDQ_DCB or VMDQ_DCB_RSS
-	 */
-
-	return 0;
-}
-
-static int
-ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
-{
-	uint16_t mbx_size = IXGBE_VFMAILBOX_SIZE;
-	uint16_t msg_size = IXGBE_VF_MSG_SIZE_DEFAULT;
-	uint32_t msgbuf[IXGBE_VFMAILBOX_SIZE];
-	int32_t retval;
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_vf_info *vfinfo =
-		*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
-
-	retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
-	if (retval) {
-		PMD_DRV_LOG(ERR, "Error mbx recv msg from VF %d", vf);
-		return retval;
-	}
-
-	/* do nothing with the message already been processed */
-	if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
-		return retval;
-
-	/* flush the ack before we write any messages back */
-	IXGBE_WRITE_FLUSH(hw);
-
-	/* perform VF reset */
-	if (msgbuf[0] == IXGBE_VF_RESET) {
-		int ret = ixgbe_vf_reset(dev, vf, msgbuf);
-		vfinfo[vf].clear_to_send = true;
-		return ret;
-	}
-
-	/* check & process VF to PF mailbox message */
-	switch ((msgbuf[0] & 0xFFFF)) {
-	case IXGBE_VF_SET_MAC_ADDR:
-		retval = ixgbe_vf_set_mac_addr(dev, vf, msgbuf);
-		break;
-	case IXGBE_VF_SET_MULTICAST:
-		retval = ixgbe_vf_set_multicast(dev, vf, msgbuf);
-		break;
-	case IXGBE_VF_SET_LPE:
-		retval = ixgbe_set_vf_lpe(dev, vf, msgbuf);
-		break;
-	case IXGBE_VF_SET_VLAN:
-		retval = ixgbe_vf_set_vlan(dev, vf, msgbuf);
-		break;
-	case IXGBE_VF_API_NEGOTIATE:
-		retval = ixgbe_negotiate_vf_api(dev, vf, msgbuf);
-		break;
-	case IXGBE_VF_GET_QUEUES:
-		retval = ixgbe_get_vf_queues(dev, vf, msgbuf);
-		msg_size = IXGBE_VF_GET_QUEUE_MSG_SIZE;
-		break;
-	default:
-		PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]);
-		retval = IXGBE_ERR_MBX;
-		break;
-	}
-
-	/* response the VF according to the message process result */
-	if (retval)
-		msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
-	else
-		msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
-
-	msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
-
-	ixgbe_write_mbx(hw, msgbuf, msg_size, vf);
-
-	return retval;
-}
-
-static inline void
-ixgbe_rcv_ack_from_vf(struct rte_eth_dev *dev, uint16_t vf)
-{
-	uint32_t msg = IXGBE_VT_MSGTYPE_NACK;
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct ixgbe_vf_info *vfinfo =
-		*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
-
-	if (!vfinfo[vf].clear_to_send)
-		ixgbe_write_mbx(hw, &msg, 1, vf);
-}
-
-void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev)
-{
-	uint16_t vf;
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
-
-	for (vf = 0; vf < dev_num_vf(eth_dev); vf++) {
-		/* check & process vf function level reset */
-		if (!ixgbe_check_for_rst(hw, vf))
-			ixgbe_vf_reset_event(eth_dev, vf);
-
-		/* check & process vf mailbox messages */
-		if (!ixgbe_check_for_msg(hw, vf))
-			ixgbe_rcv_msg_from_vf(eth_dev, vf);
-
-		/* check & process acks from vf */
-		if (!ixgbe_check_for_ack(hw, vf))
-			ixgbe_rcv_ack_from_vf(eth_dev, vf);
-	}
-}
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
deleted file mode 100644
index 57c9430..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ /dev/null
@@ -1,4780 +0,0 @@ 
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   Copyright 2014 6WIND S.A.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <sys/queue.h>
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-#include <stdint.h>
-#include <stdarg.h>
-#include <unistd.h>
-#include <inttypes.h>
-
-#include <rte_byteorder.h>
-#include <rte_common.h>
-#include <rte_cycles.h>
-#include <rte_log.h>
-#include <rte_debug.h>
-#include <rte_interrupts.h>
-#include <rte_pci.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_launch.h>
-#include <rte_eal.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_atomic.h>
-#include <rte_branch_prediction.h>
-#include <rte_ring.h>
-#include <rte_mempool.h>
-#include <rte_malloc.h>
-#include <rte_mbuf.h>
-#include <rte_ether.h>
-#include <rte_ethdev.h>
-#include <rte_prefetch.h>
-#include <rte_udp.h>
-#include <rte_tcp.h>
-#include <rte_sctp.h>
-#include <rte_string_fns.h>
-#include <rte_errno.h>
-#include <rte_ip.h>
-
-#include "ixgbe_logs.h"
-#include "ixgbe/ixgbe_api.h"
-#include "ixgbe/ixgbe_vf.h"
-#include "ixgbe_ethdev.h"
-#include "ixgbe/ixgbe_dcb.h"
-#include "ixgbe/ixgbe_common.h"
-#include "ixgbe_rxtx.h"
-
-/* Bit Mask to indicate what bits required for building TX context */
-#define IXGBE_TX_OFFLOAD_MASK (			 \
-		PKT_TX_VLAN_PKT |		 \
-		PKT_TX_IP_CKSUM |		 \
-		PKT_TX_L4_MASK |		 \
-		PKT_TX_TCP_SEG)
-
-static inline struct rte_mbuf *
-rte_rxmbuf_alloc(struct rte_mempool *mp)
-{
-	struct rte_mbuf *m;
-
-	m = __rte_mbuf_raw_alloc(mp);
-	__rte_mbuf_sanity_check_raw(m, 0);
-	return (m);
-}
-
-
-#if 1
-#define RTE_PMD_USE_PREFETCH
-#endif
-
-#ifdef RTE_PMD_USE_PREFETCH
-/*
- * Prefetch a cache line into all cache levels.
- */
-#define rte_ixgbe_prefetch(p)   rte_prefetch0(p)
-#else
-#define rte_ixgbe_prefetch(p)   do {} while(0)
-#endif
-
-/*********************************************************************
- *
- *  TX functions
- *
- **********************************************************************/
-
-/*
- * Check for descriptors with their DD bit set and free mbufs.
- * Return the total number of buffers freed.
- */
-static inline int __attribute__((always_inline))
-ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
-{
-	struct ixgbe_tx_entry *txep;
-	uint32_t status;
-	int i;
-
-	/* check DD bit on threshold descriptor */
-	status = txq->tx_ring[txq->tx_next_dd].wb.status;
-	if (! (status & IXGBE_ADVTXD_STAT_DD))
-		return 0;
-
-	/*
-	 * first buffer to free from S/W ring is at index
-	 * tx_next_dd - (tx_rs_thresh-1)
-	 */
-	txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
-
-	/* free buffers one at a time */
-	if ((txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) != 0) {
-		for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
-			txep->mbuf->next = NULL;
-			rte_mempool_put(txep->mbuf->pool, txep->mbuf);
-			txep->mbuf = NULL;
-		}
-	} else {
-		for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
-			rte_pktmbuf_free_seg(txep->mbuf);
-			txep->mbuf = NULL;
-		}
-	}
-
-	/* buffers were freed, update counters */
-	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
-	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
-	if (txq->tx_next_dd >= txq->nb_tx_desc)
-		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
-
-	return txq->tx_rs_thresh;
-}
-
-/* Populate 4 descriptors with data from 4 mbufs */
-static inline void
-tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
-{
-	uint64_t buf_dma_addr;
-	uint32_t pkt_len;
-	int i;
-
-	for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
-		buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
-		pkt_len = (*pkts)->data_len;
-
-		/* write data to descriptor */
-		txdp->read.buffer_addr = buf_dma_addr;
-		txdp->read.cmd_type_len =
-				((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
-		txdp->read.olinfo_status =
-				(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
-		rte_prefetch0(&(*pkts)->pool);
-	}
-}
-
-/* Populate 1 descriptor with data from 1 mbuf */
-static inline void
-tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
-{
-	uint64_t buf_dma_addr;
-	uint32_t pkt_len;
-
-	buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
-	pkt_len = (*pkts)->data_len;
-
-	/* write data to descriptor */
-	txdp->read.buffer_addr = buf_dma_addr;
-	txdp->read.cmd_type_len =
-			((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
-	txdp->read.olinfo_status =
-			(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
-	rte_prefetch0(&(*pkts)->pool);
-}
-
-/*
- * Fill H/W descriptor ring with mbuf data.
- * Copy mbuf pointers to the S/W ring.
- */
-static inline void
-ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
-		      uint16_t nb_pkts)
-{
-	volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
-	struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
-	const int N_PER_LOOP = 4;
-	const int N_PER_LOOP_MASK = N_PER_LOOP-1;
-	int mainpart, leftover;
-	int i, j;
-
-	/*
-	 * Process most of the packets in chunks of N pkts.  Any
-	 * leftover packets will get processed one at a time.
-	 */
-	mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
-	leftover = (nb_pkts & ((uint32_t)  N_PER_LOOP_MASK));
-	for (i = 0; i < mainpart; i += N_PER_LOOP) {
-		/* Copy N mbuf pointers to the S/W ring */
-		for (j = 0; j < N_PER_LOOP; ++j) {
-			(txep + i + j)->mbuf = *(pkts + i + j);
-		}
-		tx4(txdp + i, pkts + i);
-	}
-
-	if (unlikely(leftover > 0)) {
-		for (i = 0; i < leftover; ++i) {
-			(txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
-			tx1(txdp + mainpart + i, pkts + mainpart + i);
-		}
-	}
-}
-
-static inline uint16_t
-tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-	     uint16_t nb_pkts)
-{
-	struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
-	volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
-	uint16_t n = 0;
-
-	/*
-	 * Begin scanning the H/W ring for done descriptors when the
-	 * number of available descriptors drops below tx_free_thresh.  For
-	 * each done descriptor, free the associated buffer.
-	 */
-	if (txq->nb_tx_free < txq->tx_free_thresh)
-		ixgbe_tx_free_bufs(txq);
-
-	/* Only use descriptors that are available */
-	nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
-	if (unlikely(nb_pkts == 0))
-		return 0;
-
-	/* Use exactly nb_pkts descriptors */
-	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
-
-	/*
-	 * At this point, we know there are enough descriptors in the
-	 * ring to transmit all the packets.  This assumes that each
-	 * mbuf contains a single segment, and that no new offloads
-	 * are expected, which would require a new context descriptor.
-	 */
-
-	/*
-	 * See if we're going to wrap-around. If so, handle the top
-	 * of the descriptor ring first, then do the bottom.  If not,
-	 * the processing looks just like the "bottom" part anyway...
-	 */
-	if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
-		n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
-		ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
-
-		/*
-		 * We know that the last descriptor in the ring will need to
-		 * have its RS bit set because tx_rs_thresh has to be
-		 * a divisor of the ring size
-		 */
-		tx_r[txq->tx_next_rs].read.cmd_type_len |=
-			rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
-		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
-
-		txq->tx_tail = 0;
-	}
-
-	/* Fill H/W descriptor ring with mbuf data */
-	ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
-	txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
-
-	/*
-	 * Determine if RS bit should be set
-	 * This is what we actually want:
-	 *   if ((txq->tx_tail - 1) >= txq->tx_next_rs)
-	 * but instead of subtracting 1 and doing >=, we can just do
-	 * greater than without subtracting.
-	 */
-	if (txq->tx_tail > txq->tx_next_rs) {
-		tx_r[txq->tx_next_rs].read.cmd_type_len |=
-			rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
-		txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
-						txq->tx_rs_thresh);
-		if (txq->tx_next_rs >= txq->nb_tx_desc)
-			txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
-	}
-
-	/*
-	 * Check for wrap-around. This would only happen if we used
-	 * up to the last descriptor in the ring, no more, no less.
-	 */
-	if (txq->tx_tail >= txq->nb_tx_desc)
-		txq->tx_tail = 0;
-
-	/* update tail pointer */
-	rte_wmb();
-	IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
-
-	return nb_pkts;
-}
-
-uint16_t
-ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
-		       uint16_t nb_pkts)
-{
-	uint16_t nb_tx;
-
-	/* Try to transmit at least chunks of TX_MAX_BURST pkts */
-	if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
-		return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
-
-	/* transmit more than the max burst, in chunks of TX_MAX_BURST */
-	nb_tx = 0;
-	while (nb_pkts) {
-		uint16_t ret, n;
-		n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
-		ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
-		nb_tx = (uint16_t)(nb_tx + ret);
-		nb_pkts = (uint16_t)(nb_pkts - ret);
-		if (ret < n)
-			break;
-	}
-
-	return nb_tx;
-}
-
-static inline void
-ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
-		volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
-		uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
-{
-	uint32_t type_tucmd_mlhl;
-	uint32_t mss_l4len_idx = 0;
-	uint32_t ctx_idx;
-	uint32_t vlan_macip_lens;
-	union ixgbe_tx_offload tx_offload_mask;
-
-	ctx_idx = txq->ctx_curr;
-	tx_offload_mask.data = 0;
-	type_tucmd_mlhl = 0;
-
-	/* Specify which HW CTX to upload. */
-	mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
-
-	if (ol_flags & PKT_TX_VLAN_PKT) {
-		tx_offload_mask.vlan_tci |= ~0;
-	}
-
-	/* check if TCP segmentation required for this packet */
-	if (ol_flags & PKT_TX_TCP_SEG) {
-		/* implies IP cksum and TCP cksum */
-		type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
-			IXGBE_ADVTXD_TUCMD_L4T_TCP |
-			IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
-
-		tx_offload_mask.l2_len |= ~0;
-		tx_offload_mask.l3_len |= ~0;
-		tx_offload_mask.l4_len |= ~0;
-		tx_offload_mask.tso_segsz |= ~0;
-		mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
-		mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
-	} else { /* no TSO, check if hardware checksum is needed */
-		if (ol_flags & PKT_TX_IP_CKSUM) {
-			type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
-			tx_offload_mask.l2_len |= ~0;
-			tx_offload_mask.l3_len |= ~0;
-		}
-
-		switch (ol_flags & PKT_TX_L4_MASK) {
-		case PKT_TX_UDP_CKSUM:
-			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
-				IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
-			mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
-			tx_offload_mask.l2_len |= ~0;
-			tx_offload_mask.l3_len |= ~0;
-			break;
-		case PKT_TX_TCP_CKSUM:
-			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
-				IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
-			mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
-			tx_offload_mask.l2_len |= ~0;
-			tx_offload_mask.l3_len |= ~0;
-			tx_offload_mask.l4_len |= ~0;
-			break;
-		case PKT_TX_SCTP_CKSUM:
-			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
-				IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
-			mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
-			tx_offload_mask.l2_len |= ~0;
-			tx_offload_mask.l3_len |= ~0;
-			break;
-		default:
-			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
-				IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
-			break;
-		}
-	}
-
-	txq->ctx_cache[ctx_idx].flags = ol_flags;
-	txq->ctx_cache[ctx_idx].tx_offload.data  =
-		tx_offload_mask.data & tx_offload.data;
-	txq->ctx_cache[ctx_idx].tx_offload_mask    = tx_offload_mask;
-
-	ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
-	vlan_macip_lens = tx_offload.l3_len;
-	vlan_macip_lens |= (tx_offload.l2_len << IXGBE_ADVTXD_MACLEN_SHIFT);
-	vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
-	ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
-	ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
-	ctx_txd->seqnum_seed     = 0;
-}
-
-/*
- * Check which hardware context can be used. Use the existing match
- * or create a new context descriptor.
- */
-static inline uint32_t
-what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
-		union ixgbe_tx_offload tx_offload)
-{
-	/* If match with the current used context */
-	if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
-		(txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
-		(txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
-			return txq->ctx_curr;
-	}
-
-	/* What if match with the next context  */
-	txq->ctx_curr ^= 1;
-	if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
-		(txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
-		(txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
-			return txq->ctx_curr;
-	}
-
-	/* Mismatch, use the previous context */
-	return (IXGBE_CTX_NUM);
-}
-
-static inline uint32_t
-tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
-{
-	uint32_t tmp = 0;
-	if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
-		tmp |= IXGBE_ADVTXD_POPTS_TXSM;
-	if (ol_flags & PKT_TX_IP_CKSUM)
-		tmp |= IXGBE_ADVTXD_POPTS_IXSM;
-	if (ol_flags & PKT_TX_TCP_SEG)
-		tmp |= IXGBE_ADVTXD_POPTS_TXSM;
-	return tmp;
-}
-
-static inline uint32_t
-tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
-{
-	uint32_t cmdtype = 0;
-	if (ol_flags & PKT_TX_VLAN_PKT)
-		cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
-	if (ol_flags & PKT_TX_TCP_SEG)
-		cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
-	return cmdtype;
-}
-
-/* Default RS bit threshold values */
-#ifndef DEFAULT_TX_RS_THRESH
-#define DEFAULT_TX_RS_THRESH   32
-#endif
-#ifndef DEFAULT_TX_FREE_THRESH
-#define DEFAULT_TX_FREE_THRESH 32
-#endif
-
-/* Reset transmit descriptors after they have been used */
-static inline int
-ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
-{
-	struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
-	volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
-	uint16_t last_desc_cleaned = txq->last_desc_cleaned;
-	uint16_t nb_tx_desc = txq->nb_tx_desc;
-	uint16_t desc_to_clean_to;
-	uint16_t nb_tx_to_clean;
-
-	/* Determine the last descriptor needing to be cleaned */
-	desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
-	if (desc_to_clean_to >= nb_tx_desc)
-		desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
-
-	/* Check to make sure the last descriptor to clean is done */
-	desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
-	if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD))
-	{
-		PMD_TX_FREE_LOG(DEBUG,
-				"TX descriptor %4u is not done"
-				"(port=%d queue=%d)",
-				desc_to_clean_to,
-				txq->port_id, txq->queue_id);
-		/* Failed to clean any descriptors, better luck next time */
-		return -(1);
-	}
-
-	/* Figure out how many descriptors will be cleaned */
-	if (last_desc_cleaned > desc_to_clean_to)
-		nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
-							desc_to_clean_to);
-	else
-		nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
-						last_desc_cleaned);
-
-	PMD_TX_FREE_LOG(DEBUG,
-			"Cleaning %4u TX descriptors: %4u to %4u "
-			"(port=%d queue=%d)",
-			nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
-			txq->port_id, txq->queue_id);
-
-	/*
-	 * The last descriptor to clean is done, so that means all the
-	 * descriptors from the last descriptor that was cleaned
-	 * up to the last descriptor with the RS bit set
-	 * are done. Only reset the threshold descriptor.
-	 */
-	txr[desc_to_clean_to].wb.status = 0;
-
-	/* Update the txq to reflect the last descriptor that was cleaned */
-	txq->last_desc_cleaned = desc_to_clean_to;
-	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
-
-	/* No Error */
-	return (0);
-}
-
-uint16_t
-ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-		uint16_t nb_pkts)
-{
-	struct ixgbe_tx_queue *txq;
-	struct ixgbe_tx_entry *sw_ring;
-	struct ixgbe_tx_entry *txe, *txn;
-	volatile union ixgbe_adv_tx_desc *txr;
-	volatile union ixgbe_adv_tx_desc *txd;
-	struct rte_mbuf     *tx_pkt;
-	struct rte_mbuf     *m_seg;
-	uint64_t buf_dma_addr;
-	uint32_t olinfo_status;
-	uint32_t cmd_type_len;
-	uint32_t pkt_len;
-	uint16_t slen;
-	uint64_t ol_flags;
-	uint16_t tx_id;
-	uint16_t tx_last;
-	uint16_t nb_tx;
-	uint16_t nb_used;
-	uint64_t tx_ol_req;
-	uint32_t ctx = 0;
-	uint32_t new_ctx;
-	union ixgbe_tx_offload tx_offload = {0};
-
-	txq = tx_queue;
-	sw_ring = txq->sw_ring;
-	txr     = txq->tx_ring;
-	tx_id   = txq->tx_tail;
-	txe = &sw_ring[tx_id];
-
-	/* Determine if the descriptor ring needs to be cleaned. */
-	if ((txq->nb_tx_desc - txq->nb_tx_free) > txq->tx_free_thresh) {
-		ixgbe_xmit_cleanup(txq);
-	}
-
-	rte_prefetch0(&txe->mbuf->pool);
-
-	/* TX loop */
-	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
-		new_ctx = 0;
-		tx_pkt = *tx_pkts++;
-		pkt_len = tx_pkt->pkt_len;
-
-		/*
-		 * Determine how many (if any) context descriptors
-		 * are needed for offload functionality.
-		 */
-		ol_flags = tx_pkt->ol_flags;
-
-		/* If hardware offload required */
-		tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
-		if (tx_ol_req) {
-			tx_offload.l2_len = tx_pkt->l2_len;
-			tx_offload.l3_len = tx_pkt->l3_len;
-			tx_offload.l4_len = tx_pkt->l4_len;
-			tx_offload.vlan_tci = tx_pkt->vlan_tci;
-			tx_offload.tso_segsz = tx_pkt->tso_segsz;
-
-			/* If new context need be built or reuse the exist ctx. */
-			ctx = what_advctx_update(txq, tx_ol_req,
-				tx_offload);
-			/* Only allocate context descriptor if required*/
-			new_ctx = (ctx == IXGBE_CTX_NUM);
-			ctx = txq->ctx_curr;
-		}
-
-		/*
-		 * Keep track of how many descriptors are used this loop
-		 * This will always be the number of segments + the number of
-		 * Context descriptors required to transmit the packet
-		 */
-		nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
-
-		/*
-		 * The number of descriptors that must be allocated for a
-		 * packet is the number of segments of that packet, plus 1
-		 * Context Descriptor for the hardware offload, if any.
-		 * Determine the last TX descriptor to allocate in the TX ring
-		 * for the packet, starting from the current position (tx_id)
-		 * in the ring.
-		 */
-		tx_last = (uint16_t) (tx_id + nb_used - 1);
-
-		/* Circular ring */
-		if (tx_last >= txq->nb_tx_desc)
-			tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
-
-		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
-			   " tx_first=%u tx_last=%u",
-			   (unsigned) txq->port_id,
-			   (unsigned) txq->queue_id,
-			   (unsigned) pkt_len,
-			   (unsigned) tx_id,
-			   (unsigned) tx_last);
-
-		/*
-		 * Make sure there are enough TX descriptors available to
-		 * transmit the entire packet.
-		 * nb_used better be less than or equal to txq->tx_rs_thresh
-		 */
-		if (nb_used > txq->nb_tx_free) {
-			PMD_TX_FREE_LOG(DEBUG,
-					"Not enough free TX descriptors "
-					"nb_used=%4u nb_free=%4u "
-					"(port=%d queue=%d)",
-					nb_used, txq->nb_tx_free,
-					txq->port_id, txq->queue_id);
-
-			if (ixgbe_xmit_cleanup(txq) != 0) {
-				/* Could not clean any descriptors */
-				if (nb_tx == 0)
-					return (0);
-				goto end_of_tx;
-			}
-
-			/* nb_used better be <= txq->tx_rs_thresh */
-			if (unlikely(nb_used > txq->tx_rs_thresh)) {
-				PMD_TX_FREE_LOG(DEBUG,
-					"The number of descriptors needed to "
-					"transmit the packet exceeds the "
-					"RS bit threshold. This will impact "
-					"performance."
-					"nb_used=%4u nb_free=%4u "
-					"tx_rs_thresh=%4u. "
-					"(port=%d queue=%d)",
-					nb_used, txq->nb_tx_free,
-					txq->tx_rs_thresh,
-					txq->port_id, txq->queue_id);
-				/*
-				 * Loop here until there are enough TX
-				 * descriptors or until the ring cannot be
-				 * cleaned.
-				 */
-				while (nb_used > txq->nb_tx_free) {
-					if (ixgbe_xmit_cleanup(txq) != 0) {
-						/*
-						 * Could not clean any
-						 * descriptors
-						 */
-						if (nb_tx == 0)
-							return (0);
-						goto end_of_tx;
-					}
-				}
-			}
-		}
-
-		/*
-		 * By now there are enough free TX descriptors to transmit
-		 * the packet.
-		 */
-
-		/*
-		 * Set common flags of all TX Data Descriptors.
-		 *
-		 * The following bits must be set in all Data Descriptors:
-		 *   - IXGBE_ADVTXD_DTYP_DATA
-		 *   - IXGBE_ADVTXD_DCMD_DEXT
-		 *
-		 * The following bits must be set in the first Data Descriptor
-		 * and are ignored in the other ones:
-		 *   - IXGBE_ADVTXD_DCMD_IFCS
-		 *   - IXGBE_ADVTXD_MAC_1588
-		 *   - IXGBE_ADVTXD_DCMD_VLE
-		 *
-		 * The following bits must only be set in the last Data
-		 * Descriptor:
-		 *   - IXGBE_TXD_CMD_EOP
-		 *
-		 * The following bits can be set in any Data Descriptor, but
-		 * are only set in the last Data Descriptor:
-		 *   - IXGBE_TXD_CMD_RS
-		 */
-		cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
-			IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
-
-#ifdef RTE_LIBRTE_IEEE1588
-		if (ol_flags & PKT_TX_IEEE1588_TMST)
-			cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
-#endif
-
-		olinfo_status = 0;
-		if (tx_ol_req) {
-
-			if (ol_flags & PKT_TX_TCP_SEG) {
-				/* when TSO is on, paylen in descriptor is the
-				 * not the packet len but the tcp payload len */
-				pkt_len -= (tx_offload.l2_len +
-					tx_offload.l3_len + tx_offload.l4_len);
-			}
-
-			/*
-			 * Setup the TX Advanced Context Descriptor if required
-			 */
-			if (new_ctx) {
-				volatile struct ixgbe_adv_tx_context_desc *
-				    ctx_txd;
-
-				ctx_txd = (volatile struct
-				    ixgbe_adv_tx_context_desc *)
-				    &txr[tx_id];
-
-				txn = &sw_ring[txe->next_id];
-				rte_prefetch0(&txn->mbuf->pool);
-
-				if (txe->mbuf != NULL) {
-					rte_pktmbuf_free_seg(txe->mbuf);
-					txe->mbuf = NULL;
-				}
-
-				ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
-					tx_offload);
-
-				txe->last_id = tx_last;
-				tx_id = txe->next_id;
-				txe = txn;
-			}
-
-			/*
-			 * Setup the TX Advanced Data Descriptor,
-			 * This path will go through
-			 * whatever new/reuse the context descriptor
-			 */
-			cmd_type_len  |= tx_desc_ol_flags_to_cmdtype(ol_flags);
-			olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
-			olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
-		}
-
-		olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
-
-		m_seg = tx_pkt;
-		do {
-			txd = &txr[tx_id];
-			txn = &sw_ring[txe->next_id];
-			rte_prefetch0(&txn->mbuf->pool);
-
-			if (txe->mbuf != NULL)
-				rte_pktmbuf_free_seg(txe->mbuf);
-			txe->mbuf = m_seg;
-
-			/*
-			 * Set up Transmit Data Descriptor.
-			 */
-			slen = m_seg->data_len;
-			buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
-			txd->read.buffer_addr =
-				rte_cpu_to_le_64(buf_dma_addr);
-			txd->read.cmd_type_len =
-				rte_cpu_to_le_32(cmd_type_len | slen);
-			txd->read.olinfo_status =
-				rte_cpu_to_le_32(olinfo_status);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
-			txe = txn;
-			m_seg = m_seg->next;
-		} while (m_seg != NULL);
-
-		/*
-		 * The last packet data descriptor needs End Of Packet (EOP)
-		 */
-		cmd_type_len |= IXGBE_TXD_CMD_EOP;
-		txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
-		txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
-
-		/* Set RS bit only on threshold packets' last descriptor */
-		if (txq->nb_tx_used >= txq->tx_rs_thresh) {
-			PMD_TX_FREE_LOG(DEBUG,
-					"Setting RS bit on TXD id="
-					"%4u (port=%d queue=%d)",
-					tx_last, txq->port_id, txq->queue_id);
-
-			cmd_type_len |= IXGBE_TXD_CMD_RS;
-
-			/* Update txq RS bit counters */
-			txq->nb_tx_used = 0;
-		}
-		txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
-	}
-end_of_tx:
-	rte_wmb();
-
-	/*
-	 * Set the Transmit Descriptor Tail (TDT)
-	 */
-	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
-		   (unsigned) txq->port_id, (unsigned) txq->queue_id,
-		   (unsigned) tx_id, (unsigned) nb_tx);
-	IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
-	txq->tx_tail = tx_id;
-
-	return (nb_tx);
-}
-
-/*********************************************************************
- *
- *  RX functions
- *
- **********************************************************************/
-static inline uint64_t
-rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
-{
-	uint64_t pkt_flags;
-
-	static const uint64_t ip_pkt_types_map[16] = {
-		0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
-		PKT_RX_IPV6_HDR, 0, 0, 0,
-		PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
-		PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
-	};
-
-	static const uint64_t ip_rss_types_map[16] = {
-		0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
-		0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
-		PKT_RX_RSS_HASH, 0, 0, 0,
-		0, 0, 0,  PKT_RX_FDIR,
-	};
-
-#ifdef RTE_LIBRTE_IEEE1588
-	static uint64_t ip_pkt_etqf_map[8] = {
-		0, 0, 0, PKT_RX_IEEE1588_PTP,
-		0, 0, 0, 0,
-	};
-
-	pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ?
-			ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
-			ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
-#else
-	pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? 0 :
-			ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
-
-#endif
-	return pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF];
-}
-
-static inline uint64_t
-rx_desc_status_to_pkt_flags(uint32_t rx_status)
-{
-	uint64_t pkt_flags;
-
-	/*
-	 * Check if VLAN present only.
-	 * Do not check whether L3/L4 rx checksum done by NIC or not,
-	 * That can be found from rte_eth_rxmode.hw_ip_checksum flag
-	 */
-	pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ?  PKT_RX_VLAN_PKT : 0;
-
-#ifdef RTE_LIBRTE_IEEE1588
-	if (rx_status & IXGBE_RXD_STAT_TMST)
-		pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
-#endif
-	return pkt_flags;
-}
-
-static inline uint64_t
-rx_desc_error_to_pkt_flags(uint32_t rx_status)
-{
-	/*
-	 * Bit 31: IPE, IPv4 checksum error
-	 * Bit 30: L4I, L4I integrity error
-	 */
-	static uint64_t error_to_pkt_flags_map[4] = {
-		0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
-		PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
-	};
-	return error_to_pkt_flags_map[(rx_status >>
-		IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
-}
-
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-/*
- * LOOK_AHEAD defines how many desc statuses to check beyond the
- * current descriptor.
- * It must be a pound define for optimal performance.
- * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
- * function only works with LOOK_AHEAD=8.
- */
-#define LOOK_AHEAD 8
-#if (LOOK_AHEAD != 8)
-#error "PMD IXGBE: LOOK_AHEAD must be 8\n"
-#endif
-static inline int
-ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
-{
-	volatile union ixgbe_adv_rx_desc *rxdp;
-	struct ixgbe_rx_entry *rxep;
-	struct rte_mbuf *mb;
-	uint16_t pkt_len;
-	uint64_t pkt_flags;
-	int s[LOOK_AHEAD], nb_dd;
-	int i, j, nb_rx = 0;
-
-
-	/* get references to current descriptor and S/W ring entry */
-	rxdp = &rxq->rx_ring[rxq->rx_tail];
-	rxep = &rxq->sw_ring[rxq->rx_tail];
-
-	/* check to make sure there is at least 1 packet to receive */
-	if (! (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD))
-		return 0;
-
-	/*
-	 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
-	 * reference packets that are ready to be received.
-	 */
-	for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
-	     i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD)
-	{
-		/* Read desc statuses backwards to avoid race condition */
-		for (j = LOOK_AHEAD-1; j >= 0; --j)
-			s[j] = rxdp[j].wb.upper.status_error;
-
-		/* Compute how many status bits were set */
-		nb_dd = 0;
-		for (j = 0; j < LOOK_AHEAD; ++j)
-			nb_dd += s[j] & IXGBE_RXDADV_STAT_DD;
-
-		nb_rx += nb_dd;
-
-		/* Translate descriptor info to mbuf format */
-		for (j = 0; j < nb_dd; ++j) {
-			mb = rxep[j].mbuf;
-			pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len);
-			mb->data_len = pkt_len;
-			mb->pkt_len = pkt_len;
-			mb->vlan_tci = rxdp[j].wb.upper.vlan;
-			mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
-
-			/* convert descriptor fields to rte mbuf flags */
-			pkt_flags  = rx_desc_hlen_type_rss_to_pkt_flags(
-					rxdp[j].wb.lower.lo_dword.data);
-			/* reuse status field from scan list */
-			pkt_flags |= rx_desc_status_to_pkt_flags(s[j]);
-			pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
-			mb->ol_flags = pkt_flags;
-
-			if (likely(pkt_flags & PKT_RX_RSS_HASH))
-				mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;
-			else if (pkt_flags & PKT_RX_FDIR) {
-				mb->hash.fdir.hash =
-					(uint16_t)((rxdp[j].wb.lower.hi_dword.csum_ip.csum)
-						& IXGBE_ATR_HASH_MASK);
-				mb->hash.fdir.id = rxdp[j].wb.lower.hi_dword.csum_ip.ip_id;
-			}
-		}
-
-		/* Move mbuf pointers from the S/W ring to the stage */
-		for (j = 0; j < LOOK_AHEAD; ++j) {
-			rxq->rx_stage[i + j] = rxep[j].mbuf;
-		}
-
-		/* stop if all requested packets could not be received */
-		if (nb_dd != LOOK_AHEAD)
-			break;
-	}
-
-	/* clear software ring entries so we can cleanup correctly */
-	for (i = 0; i < nb_rx; ++i) {
-		rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
-	}
-
-
-	return nb_rx;
-}
-
-static inline int
-ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
-{
-	volatile union ixgbe_adv_rx_desc *rxdp;
-	struct ixgbe_rx_entry *rxep;
-	struct rte_mbuf *mb;
-	uint16_t alloc_idx;
-	__le64 dma_addr;
-	int diag, i;
-
-	/* allocate buffers in bulk directly into the S/W ring */
-	alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
-	rxep = &rxq->sw_ring[alloc_idx];
-	diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
-				    rxq->rx_free_thresh);
-	if (unlikely(diag != 0))
-		return (-ENOMEM);
-
-	rxdp = &rxq->rx_ring[alloc_idx];
-	for (i = 0; i < rxq->rx_free_thresh; ++i) {
-		/* populate the static rte mbuf fields */
-		mb = rxep[i].mbuf;
-		if (reset_mbuf) {
-			mb->next = NULL;
-			mb->nb_segs = 1;
-			mb->port = rxq->port_id;
-		}
-
-		rte_mbuf_refcnt_set(mb, 1);
-		mb->data_off = RTE_PKTMBUF_HEADROOM;
-
-		/* populate the descriptors */
-		dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
-		rxdp[i].read.hdr_addr = dma_addr;
-		rxdp[i].read.pkt_addr = dma_addr;
-	}
-
-	/* update state of internal queue structure */
-	rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
-	if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
-		rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
-
-	/* no errors */
-	return 0;
-}
-
-static inline uint16_t
-ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
-			 uint16_t nb_pkts)
-{
-	struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
-	int i;
-
-	/* how many packets are ready to return? */
-	nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
-
-	/* copy mbuf pointers to the application's packet list */
-	for (i = 0; i < nb_pkts; ++i)
-		rx_pkts[i] = stage[i];
-
-	/* update internal queue state */
-	rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
-	rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
-
-	return nb_pkts;
-}
-
-static inline uint16_t
-rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-	     uint16_t nb_pkts)
-{
-	struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
-	uint16_t nb_rx = 0;
-
-	/* Any previously recv'd pkts will be returned from the Rx stage */
-	if (rxq->rx_nb_avail)
-		return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
-
-	/* Scan the H/W ring for packets to receive */
-	nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
-
-	/* update internal queue state */
-	rxq->rx_next_avail = 0;
-	rxq->rx_nb_avail = nb_rx;
-	rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
-
-	/* if required, allocate new buffers to replenish descriptors */
-	if (rxq->rx_tail > rxq->rx_free_trigger) {
-		uint16_t cur_free_trigger = rxq->rx_free_trigger;
-
-		if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
-			int i, j;
-			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
-				   "queue_id=%u", (unsigned) rxq->port_id,
-				   (unsigned) rxq->queue_id);
-
-			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
-				rxq->rx_free_thresh;
-
-			/*
-			 * Need to rewind any previous receives if we cannot
-			 * allocate new buffers to replenish the old ones.
-			 */
-			rxq->rx_nb_avail = 0;
-			rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
-			for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
-				rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
-
-			return 0;
-		}
-
-		/* update tail pointer */
-		rte_wmb();
-		IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, cur_free_trigger);
-	}
-
-	if (rxq->rx_tail >= rxq->nb_rx_desc)
-		rxq->rx_tail = 0;
-
-	/* received any packets this loop? */
-	if (rxq->rx_nb_avail)
-		return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
-
-	return 0;
-}
-
-/* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
-static uint16_t
-ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
-			   uint16_t nb_pkts)
-{
-	uint16_t nb_rx;
-
-	if (unlikely(nb_pkts == 0))
-		return 0;
-
-	if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
-		return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
-
-	/* request is relatively large, chunk it up */
-	nb_rx = 0;
-	while (nb_pkts) {
-		uint16_t ret, n;
-		n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
-		ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
-		nb_rx = (uint16_t)(nb_rx + ret);
-		nb_pkts = (uint16_t)(nb_pkts - ret);
-		if (ret < n)
-			break;
-	}
-
-	return nb_rx;
-}
-
-#else
-
-/* Stub to avoid extra ifdefs */
-static uint16_t
-ixgbe_recv_pkts_bulk_alloc(__rte_unused void *rx_queue,
-	__rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
-{
-	return 0;
-}
-
-static inline int
-ixgbe_rx_alloc_bufs(__rte_unused struct ixgbe_rx_queue *rxq,
-		    __rte_unused bool reset_mbuf)
-{
-	return -ENOMEM;
-}
-#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
-
-uint16_t
-ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-		uint16_t nb_pkts)
-{
-	struct ixgbe_rx_queue *rxq;
-	volatile union ixgbe_adv_rx_desc *rx_ring;
-	volatile union ixgbe_adv_rx_desc *rxdp;
-	struct ixgbe_rx_entry *sw_ring;
-	struct ixgbe_rx_entry *rxe;
-	struct rte_mbuf *rxm;
-	struct rte_mbuf *nmb;
-	union ixgbe_adv_rx_desc rxd;
-	uint64_t dma_addr;
-	uint32_t staterr;
-	uint32_t hlen_type_rss;
-	uint16_t pkt_len;
-	uint16_t rx_id;
-	uint16_t nb_rx;
-	uint16_t nb_hold;
-	uint64_t pkt_flags;
-
-	nb_rx = 0;
-	nb_hold = 0;
-	rxq = rx_queue;
-	rx_id = rxq->rx_tail;
-	rx_ring = rxq->rx_ring;
-	sw_ring = rxq->sw_ring;
-	while (nb_rx < nb_pkts) {
-		/*
-		 * The order of operations here is important as the DD status
-		 * bit must not be read after any other descriptor fields.
-		 * rx_ring and rxdp are pointing to volatile data so the order
-		 * of accesses cannot be reordered by the compiler. If they were
-		 * not volatile, they could be reordered which could lead to
-		 * using invalid descriptor fields when read from rxd.
-		 */
-		rxdp = &rx_ring[rx_id];
-		staterr = rxdp->wb.upper.status_error;
-		if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
-			break;
-		rxd = *rxdp;
-
-		/*
-		 * End of packet.
-		 *
-		 * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
-		 * is likely to be invalid and to be dropped by the various
-		 * validation checks performed by the network stack.
-		 *
-		 * Allocate a new mbuf to replenish the RX ring descriptor.
-		 * If the allocation fails:
-		 *    - arrange for that RX descriptor to be the first one
-		 *      being parsed the next time the receive function is
-		 *      invoked [on the same queue].
-		 *
-		 *    - Stop parsing the RX ring and return immediately.
-		 *
-		 * This policy do not drop the packet received in the RX
-		 * descriptor for which the allocation of a new mbuf failed.
-		 * Thus, it allows that packet to be later retrieved if
-		 * mbuf have been freed in the mean time.
-		 * As a side effect, holding RX descriptors instead of
-		 * systematically giving them back to the NIC may lead to
-		 * RX ring exhaustion situations.
-		 * However, the NIC can gracefully prevent such situations
-		 * to happen by sending specific "back-pressure" flow control
-		 * frames to its peer(s).
-		 */
-		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
-			   "ext_err_stat=0x%08x pkt_len=%u",
-			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
-			   (unsigned) rx_id, (unsigned) staterr,
-			   (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
-
-		nmb = rte_rxmbuf_alloc(rxq->mb_pool);
-		if (nmb == NULL) {
-			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
-				   "queue_id=%u", (unsigned) rxq->port_id,
-				   (unsigned) rxq->queue_id);
-			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
-			break;
-		}
-
-		nb_hold++;
-		rxe = &sw_ring[rx_id];
-		rx_id++;
-		if (rx_id == rxq->nb_rx_desc)
-			rx_id = 0;
-
-		/* Prefetch next mbuf while processing current one. */
-		rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
-
-		/*
-		 * When next RX descriptor is on a cache-line boundary,
-		 * prefetch the next 4 RX descriptors and the next 8 pointers
-		 * to mbufs.
-		 */
-		if ((rx_id & 0x3) == 0) {
-			rte_ixgbe_prefetch(&rx_ring[rx_id]);
-			rte_ixgbe_prefetch(&sw_ring[rx_id]);
-		}
-
-		rxm = rxe->mbuf;
-		rxe->mbuf = nmb;
-		dma_addr =
-			rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
-		rxdp->read.hdr_addr = dma_addr;
-		rxdp->read.pkt_addr = dma_addr;
-
-		/*
-		 * Initialize the returned mbuf.
-		 * 1) setup generic mbuf fields:
-		 *    - number of segments,
-		 *    - next segment,
-		 *    - packet length,
-		 *    - RX port identifier.
-		 * 2) integrate hardware offload data, if any:
-		 *    - RSS flag & hash,
-		 *    - IP checksum flag,
-		 *    - VLAN TCI, if any,
-		 *    - error flags.
-		 */
-		pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
-				      rxq->crc_len);
-		rxm->data_off = RTE_PKTMBUF_HEADROOM;
-		rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
-		rxm->nb_segs = 1;
-		rxm->next = NULL;
-		rxm->pkt_len = pkt_len;
-		rxm->data_len = pkt_len;
-		rxm->port = rxq->port_id;
-
-		hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
-		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
-		rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
-
-		pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
-		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
-		pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
-		rxm->ol_flags = pkt_flags;
-
-		if (likely(pkt_flags & PKT_RX_RSS_HASH))
-			rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
-		else if (pkt_flags & PKT_RX_FDIR) {
-			rxm->hash.fdir.hash =
-				(uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
-					   & IXGBE_ATR_HASH_MASK);
-			rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
-		}
-		/*
-		 * Store the mbuf address into the next entry of the array
-		 * of returned packets.
-		 */
-		rx_pkts[nb_rx++] = rxm;
-	}
-	rxq->rx_tail = rx_id;
-
-	/*
-	 * If the number of free RX descriptors is greater than the RX free
-	 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
-	 * register.
-	 * Update the RDT with the value of the last processed RX descriptor
-	 * minus 1, to guarantee that the RDT register is never equal to the
-	 * RDH register, which creates a "full" ring situtation from the
-	 * hardware point of view...
-	 */
-	nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
-	if (nb_hold > rxq->rx_free_thresh) {
-		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
-			   "nb_hold=%u nb_rx=%u",
-			   (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
-			   (unsigned) rx_id, (unsigned) nb_hold,
-			   (unsigned) nb_rx);
-		rx_id = (uint16_t) ((rx_id == 0) ?
-				     (rxq->nb_rx_desc - 1) : (rx_id - 1));
-		IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
-		nb_hold = 0;
-	}
-	rxq->nb_rx_hold = nb_hold;
-	return (nb_rx);
-}
-
-/**
- * Detect an RSC descriptor.
- */
-static inline uint32_t
-ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
-{
-	return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
-		IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
-}
-
-/**
- * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
- *
- * Fill the following info in the HEAD buffer of the Rx cluster:
- *    - RX port identifier
- *    - hardware offload data, if any:
- *      - RSS flag & hash
- *      - IP checksum flag
- *      - VLAN TCI, if any
- *      - error flags
- * @head HEAD of the packet cluster
- * @desc HW descriptor to get data from
- * @port_id Port ID of the Rx queue
- */
-static inline void
-ixgbe_fill_cluster_head_buf(
-	struct rte_mbuf *head,
-	union ixgbe_adv_rx_desc *desc,
-	uint8_t port_id,
-	uint32_t staterr)
-{
-	uint32_t hlen_type_rss;
-	uint64_t pkt_flags;
-
-	head->port = port_id;
-
-	/*
-	 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
-	 * set in the pkt_flags field.
-	 */
-	head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
-	hlen_type_rss = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
-	pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
-	pkt_flags |= rx_desc_status_to_pkt_flags(staterr);
-	pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
-	head->ol_flags = pkt_flags;
-
-	if (likely(pkt_flags & PKT_RX_RSS_HASH))
-		head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
-	else if (pkt_flags & PKT_RX_FDIR) {
-		head->hash.fdir.hash =
-			rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
-							  & IXGBE_ATR_HASH_MASK;
-		head->hash.fdir.id =
-			rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
-	}
-}
-
-/**
- * ixgbe_recv_pkts_lro - receive handler for and LRO case.
- *
- * @rx_queue Rx queue handle
- * @rx_pkts table of received packets
- * @nb_pkts size of rx_pkts table
- * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
- *
- * Handles the Rx HW ring completions when RSC feature is configured. Uses an
- * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
- *
- * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
- * 1) When non-EOP RSC completion arrives:
- *    a) Update the HEAD of the current RSC aggregation cluster with the new
- *       segment's data length.
- *    b) Set the "next" pointer of the current segment to point to the segment
- *       at the NEXTP index.
- *    c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
- *       in the sw_rsc_ring.
- * 2) When EOP arrives we just update the cluster's total length and offload
- *    flags and deliver the cluster up to the upper layers. In our case - put it
- *    in the rx_pkts table.
- *
- * Returns the number of received packets/clusters (according to the "bulk
- * receive" interface).
- */
-static inline uint16_t
-ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
-		    bool bulk_alloc)
-{
-	struct ixgbe_rx_queue *rxq = rx_queue;
-	volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
-	struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
-	struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
-	uint16_t rx_id = rxq->rx_tail;
-	uint16_t nb_rx = 0;
-	uint16_t nb_hold = rxq->nb_rx_hold;
-	uint16_t prev_id = rxq->rx_tail;
-
-	while (nb_rx < nb_pkts) {
-		bool eop;
-		struct ixgbe_rx_entry *rxe;
-		struct ixgbe_scattered_rx_entry *sc_entry;
-		struct ixgbe_scattered_rx_entry *next_sc_entry;
-		struct ixgbe_rx_entry *next_rxe;
-		struct rte_mbuf *first_seg;
-		struct rte_mbuf *rxm;
-		struct rte_mbuf *nmb;
-		union ixgbe_adv_rx_desc rxd;
-		uint16_t data_len;
-		uint16_t next_id;
-		volatile union ixgbe_adv_rx_desc *rxdp;
-		uint32_t staterr;
-
-next_desc:
-		/*
-		 * The code in this whole file uses the volatile pointer to
-		 * ensure the read ordering of the status and the rest of the
-		 * descriptor fields (on the compiler level only!!!). This is so
-		 * UGLY - why not to just use the compiler barrier instead? DPDK
-		 * even has the rte_compiler_barrier() for that.
-		 *
-		 * But most importantly this is just wrong because this doesn't
-		 * ensure memory ordering in a general case at all. For
-		 * instance, DPDK is supposed to work on Power CPUs where
-		 * compiler barrier may just not be enough!
-		 *
-		 * I tried to write only this function properly to have a
-		 * starting point (as a part of an LRO/RSC series) but the
-		 * compiler cursed at me when I tried to cast away the
-		 * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
-		 * keeping it the way it is for now.
-		 *
-		 * The code in this file is broken in so many other places and
-		 * will just not work on a big endian CPU anyway therefore the
-		 * lines below will have to be revisited together with the rest
-		 * of the ixgbe PMD.
-		 *
-		 * TODO:
-		 *    - Get rid of "volatile" crap and let the compiler do its
-		 *      job.
-		 *    - Use the proper memory barrier (rte_rmb()) to ensure the
-		 *      memory ordering below.
-		 */
-		rxdp = &rx_ring[rx_id];
-		staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
-
-		if (!(staterr & IXGBE_RXDADV_STAT_DD))
-			break;
-
-		rxd = *rxdp;
-
-		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
-				  "staterr=0x%x data_len=%u",
-			   rxq->port_id, rxq->queue_id, rx_id, staterr,
-			   rte_le_to_cpu_16(rxd.wb.upper.length));
-
-		if (!bulk_alloc) {
-			nmb = rte_rxmbuf_alloc(rxq->mb_pool);
-			if (nmb == NULL) {
-				PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
-						  "port_id=%u queue_id=%u",
-					   rxq->port_id, rxq->queue_id);
-
-				rte_eth_devices[rxq->port_id].data->
-							rx_mbuf_alloc_failed++;
-				break;
-			}
-		} else if (nb_hold > rxq->rx_free_thresh) {
-			uint16_t next_rdt = rxq->rx_free_trigger;
-
-			if (!ixgbe_rx_alloc_bufs(rxq, false)) {
-				rte_wmb();
-				IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr,
-						    next_rdt);
-				nb_hold -= rxq->rx_free_thresh;
-			} else {
-				PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
-						  "port_id=%u queue_id=%u",
-					   rxq->port_id, rxq->queue_id);
-
-				rte_eth_devices[rxq->port_id].data->
-							rx_mbuf_alloc_failed++;
-				break;
-			}
-		}
-
-		nb_hold++;
-		rxe = &sw_ring[rx_id];
-		eop = staterr & IXGBE_RXDADV_STAT_EOP;
-
-		next_id = rx_id + 1;
-		if (next_id == rxq->nb_rx_desc)
-			next_id = 0;
-
-		/* Prefetch next mbuf while processing current one. */
-		rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
-
-		/*
-		 * When next RX descriptor is on a cache-line boundary,
-		 * prefetch the next 4 RX descriptors and the next 4 pointers
-		 * to mbufs.
-		 */
-		if ((next_id & 0x3) == 0) {
-			rte_ixgbe_prefetch(&rx_ring[next_id]);
-			rte_ixgbe_prefetch(&sw_ring[next_id]);
-		}
-
-		rxm = rxe->mbuf;
-
-		if (!bulk_alloc) {
-			__le64 dma =
-			  rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
-			/*
-			 * Update RX descriptor with the physical address of the
-			 * new data buffer of the new allocated mbuf.
-			 */
-			rxe->mbuf = nmb;
-
-			rxm->data_off = RTE_PKTMBUF_HEADROOM;
-			rxdp->read.hdr_addr = dma;
-			rxdp->read.pkt_addr = dma;
-		} else
-			rxe->mbuf = NULL;
-
-		/*
-		 * Set data length & data buffer address of mbuf.
-		 */
-		data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
-		rxm->data_len = data_len;
-
-		if (!eop) {
-			uint16_t nextp_id;
-			/*
-			 * Get next descriptor index:
-			 *  - For RSC it's in the NEXTP field.
-			 *  - For a scattered packet - it's just a following
-			 *    descriptor.
-			 */
-			if (ixgbe_rsc_count(&rxd))
-				nextp_id =
-					(staterr & IXGBE_RXDADV_NEXTP_MASK) >>
-						       IXGBE_RXDADV_NEXTP_SHIFT;
-			else
-				nextp_id = next_id;
-
-			next_sc_entry = &sw_sc_ring[nextp_id];
-			next_rxe = &sw_ring[nextp_id];
-			rte_ixgbe_prefetch(next_rxe);
-		}
-
-		sc_entry = &sw_sc_ring[rx_id];
-		first_seg = sc_entry->fbuf;
-		sc_entry->fbuf = NULL;
-
-		/*
-		 * If this is the first buffer of the received packet,
-		 * set the pointer to the first mbuf of the packet and
-		 * initialize its context.
-		 * Otherwise, update the total length and the number of segments
-		 * of the current scattered packet, and update the pointer to
-		 * the last mbuf of the current packet.
-		 */
-		if (first_seg == NULL) {
-			first_seg = rxm;
-			first_seg->pkt_len = data_len;
-			first_seg->nb_segs = 1;
-		} else {
-			first_seg->pkt_len += data_len;
-			first_seg->nb_segs++;
-		}
-
-		prev_id = rx_id;
-		rx_id = next_id;
-
-		/*
-		 * If this is not the last buffer of the received packet, update
-		 * the pointer to the first mbuf at the NEXTP entry in the
-		 * sw_sc_ring and continue to parse the RX ring.
-		 */
-		if (!eop) {
-			rxm->next = next_rxe->mbuf;
-			next_sc_entry->fbuf = first_seg;
-			goto next_desc;
-		}
-
-		/*
-		 * This is the last buffer of the received packet - return
-		 * the current cluster to the user.
-		 */
-		rxm->next = NULL;
-
-		/* Initialize the first mbuf of the returned packet */
-		ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id,
-					    staterr);
-
-		/* Prefetch data of first segment, if configured to do so. */
-		rte_packet_prefetch((char *)first_seg->buf_addr +
-			first_seg->data_off);
-
-		/*
-		 * Store the mbuf address into the next entry of the array
-		 * of returned packets.
-		 */
-		rx_pkts[nb_rx++] = first_seg;
-	}
-
-	/*
-	 * Record index of the next RX descriptor to probe.
-	 */
-	rxq->rx_tail = rx_id;
-
-	/*
-	 * If the number of free RX descriptors is greater than the RX free
-	 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
-	 * register.
-	 * Update the RDT with the value of the last processed RX descriptor
-	 * minus 1, to guarantee that the RDT register is never equal to the
-	 * RDH register, which creates a "full" ring situtation from the
-	 * hardware point of view...
-	 */
-	if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
-		PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
-			   "nb_hold=%u nb_rx=%u",
-			   rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
-
-		rte_wmb();
-		IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, prev_id);
-		nb_hold = 0;
-	}
-
-	rxq->nb_rx_hold = nb_hold;
-	return nb_rx;
-}
-
-uint16_t
-ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
-				 uint16_t nb_pkts)
-{
-	return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
-}
-
-uint16_t
-ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
-			       uint16_t nb_pkts)
-{
-	return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
-}
-
-/*********************************************************************
- *
- *  Queue management functions
- *
- **********************************************************************/
-
-/*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
- * also optimize cache line size effect. H/W supports up to cache line size 128.
- */
-#define IXGBE_ALIGN 128
-
-/*
- * Maximum number of Ring Descriptors.
- *
- * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
- * descriptors should meet the following condition:
- *      (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
- */
-#define IXGBE_MIN_RING_DESC 32
-#define IXGBE_MAX_RING_DESC 4096
-
-/*
- * Create memzone for HW rings. malloc can't be used as the physical address is
- * needed. If the memzone is already created, then this function returns a ptr
- * to the old one.
- */
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
-		      uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
-	char z_name[RTE_MEMZONE_NAMESIZE];
-	const struct rte_memzone *mz;
-
-	snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-			dev->driver->pci_drv.name, ring_name,
-			dev->data->port_id, queue_id);
-
-	mz = rte_memzone_lookup(z_name);
-	if (mz)
-		return mz;
-
-#ifdef RTE_LIBRTE_XEN_DOM0
-	return rte_memzone_reserve_bounded(z_name, ring_size,
-		socket_id, 0, IXGBE_ALIGN, RTE_PGSIZE_2M);
-#else
-	return rte_memzone_reserve_aligned(z_name, ring_size,
-		socket_id, 0, IXGBE_ALIGN);
-#endif
-}
-
-static void
-ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
-{
-	unsigned i;
-
-	if (txq->sw_ring != NULL) {
-		for (i = 0; i < txq->nb_tx_desc; i++) {
-			if (txq->sw_ring[i].mbuf != NULL) {
-				rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
-				txq->sw_ring[i].mbuf = NULL;
-			}
-		}
-	}
-}
-
-static void
-ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
-{
-	if (txq != NULL &&
-	    txq->sw_ring != NULL)
-		rte_free(txq->sw_ring);
-}
-
-static void
-ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
-{
-	if (txq != NULL && txq->ops != NULL) {
-		txq->ops->release_mbufs(txq);
-		txq->ops->free_swring(txq);
-		rte_free(txq);
-	}
-}
-
-void
-ixgbe_dev_tx_queue_release(void *txq)
-{
-	ixgbe_tx_queue_release(txq);
-}
-
-/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
-static void
-ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
-{
-	static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
-	struct ixgbe_tx_entry *txe = txq->sw_ring;
-	uint16_t prev, i;
-
-	/* Zero out HW ring memory */
-	for (i = 0; i < txq->nb_tx_desc; i++) {
-		txq->tx_ring[i] = zeroed_desc;
-	}
-
-	/* Initialize SW ring entries */
-	prev = (uint16_t) (txq->nb_tx_desc - 1);
-	for (i = 0; i < txq->nb_tx_desc; i++) {
-		volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
-		txd->wb.status = IXGBE_TXD_STAT_DD;
-		txe[i].mbuf = NULL;
-		txe[i].last_id = i;
-		txe[prev].next_id = i;
-		prev = i;
-	}
-
-	txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
-	txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
-
-	txq->tx_tail = 0;
-	txq->nb_tx_used = 0;
-	/*
-	 * Always allow 1 descriptor to be un-allocated to avoid
-	 * a H/W race condition
-	 */
-	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
-	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
-	txq->ctx_curr = 0;
-	memset((void*)&txq->ctx_cache, 0,
-		IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
-}
-
-static const struct ixgbe_txq_ops def_txq_ops = {
-	.release_mbufs = ixgbe_tx_queue_release_mbufs,
-	.free_swring = ixgbe_tx_free_swring,
-	.reset = ixgbe_reset_tx_queue,
-};
-
-/* Takes an ethdev and a queue and sets up the tx function to be used based on
- * the queue parameters. Used in tx_queue_setup by primary process and then
- * in dev_init by secondary process when attaching to an existing ethdev.
- */
-void
-ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
-{
-	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
-	if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
-			&& (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
-		PMD_INIT_LOG(INFO, "Using simple tx code path");
-#ifdef RTE_IXGBE_INC_VECTOR
-		if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
-				(rte_eal_process_type() != RTE_PROC_PRIMARY ||
-					ixgbe_txq_vec_setup(txq) == 0)) {
-			PMD_INIT_LOG(INFO, "Vector tx enabled.");
-			dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
-		} else
-#endif
-		dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
-	} else {
-		PMD_INIT_LOG(INFO, "Using full-featured tx code path");
-		PMD_INIT_LOG(INFO,
-				" - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
-				(unsigned long)txq->txq_flags,
-				(unsigned long)IXGBE_SIMPLE_FLAGS);
-		PMD_INIT_LOG(INFO,
-				" - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
-				(unsigned long)txq->tx_rs_thresh,
-				(unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
-		dev->tx_pkt_burst = ixgbe_xmit_pkts;
-	}
-}
-
-int
-ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
-			 uint16_t queue_idx,
-			 uint16_t nb_desc,
-			 unsigned int socket_id,
-			 const struct rte_eth_txconf *tx_conf)
-{
-	const struct rte_memzone *tz;
-	struct ixgbe_tx_queue *txq;
-	struct ixgbe_hw     *hw;
-	uint16_t tx_rs_thresh, tx_free_thresh;
-
-	PMD_INIT_FUNC_TRACE();
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	/*
-	 * Validate number of transmit descriptors.
-	 * It must not exceed hardware maximum, and must be multiple
-	 * of IXGBE_ALIGN.
-	 */
-	if (((nb_desc * sizeof(union ixgbe_adv_tx_desc)) % IXGBE_ALIGN) != 0 ||
-	    (nb_desc > IXGBE_MAX_RING_DESC) ||
-	    (nb_desc < IXGBE_MIN_RING_DESC)) {
-		return -EINVAL;
-	}
-
-	/*
-	 * The following two parameters control the setting of the RS bit on
-	 * transmit descriptors.
-	 * TX descriptors will have their RS bit set after txq->tx_rs_thresh
-	 * descriptors have been used.
-	 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
-	 * descriptors are used or if the number of descriptors required
-	 * to transmit a packet is greater than the number of free TX
-	 * descriptors.
-	 * The following constraints must be satisfied:
-	 *  tx_rs_thresh must be greater than 0.
-	 *  tx_rs_thresh must be less than the size of the ring minus 2.
-	 *  tx_rs_thresh must be less than or equal to tx_free_thresh.
-	 *  tx_rs_thresh must be a divisor of the ring size.
-	 *  tx_free_thresh must be greater than 0.
-	 *  tx_free_thresh must be less than the size of the ring minus 3.
-	 * One descriptor in the TX ring is used as a sentinel to avoid a
-	 * H/W race condition, hence the maximum threshold constraints.
-	 * When set to zero use default values.
-	 */
-	tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
-			tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
-	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
-			tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
-	if (tx_rs_thresh >= (nb_desc - 2)) {
-		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
-			     "of TX descriptors minus 2. (tx_rs_thresh=%u "
-			     "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
-			     (int)dev->data->port_id, (int)queue_idx);
-		return -(EINVAL);
-	}
-	if (tx_free_thresh >= (nb_desc - 3)) {
-		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
-			     "tx_free_thresh must be less than the number of "
-			     "TX descriptors minus 3. (tx_free_thresh=%u "
-			     "port=%d queue=%d)",
-			     (unsigned int)tx_free_thresh,
-			     (int)dev->data->port_id, (int)queue_idx);
-		return -(EINVAL);
-	}
-	if (tx_rs_thresh > tx_free_thresh) {
-		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
-			     "tx_free_thresh. (tx_free_thresh=%u "
-			     "tx_rs_thresh=%u port=%d queue=%d)",
-			     (unsigned int)tx_free_thresh,
-			     (unsigned int)tx_rs_thresh,
-			     (int)dev->data->port_id,
-			     (int)queue_idx);
-		return -(EINVAL);
-	}
-	if ((nb_desc % tx_rs_thresh) != 0) {
-		PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
-			     "number of TX descriptors. (tx_rs_thresh=%u "
-			     "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
-			     (int)dev->data->port_id, (int)queue_idx);
-		return -(EINVAL);
-	}
-
-	/*
-	 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
-	 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
-	 * by the NIC and all descriptors are written back after the NIC
-	 * accumulates WTHRESH descriptors.
-	 */
-	if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
-		PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
-			     "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
-			     "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
-			     (int)dev->data->port_id, (int)queue_idx);
-		return -(EINVAL);
-	}
-
-	/* Free memory prior to re-allocation if needed... */
-	if (dev->data->tx_queues[queue_idx] != NULL) {
-		ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
-		dev->data->tx_queues[queue_idx] = NULL;
-	}
-
-	/* First allocate the tx queue data structure */
-	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
-				 RTE_CACHE_LINE_SIZE, socket_id);
-	if (txq == NULL)
-		return (-ENOMEM);
-
-	/*
-	 * Allocate TX ring hardware descriptors. A memzone large enough to
-	 * handle the maximum ring size is allocated in order to allow for
-	 * resizing in later calls to the queue setup function.
-	 */
-	tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
-			sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
-			socket_id);
-	if (tz == NULL) {
-		ixgbe_tx_queue_release(txq);
-		return (-ENOMEM);
-	}
-
-	txq->nb_tx_desc = nb_desc;
-	txq->tx_rs_thresh = tx_rs_thresh;
-	txq->tx_free_thresh = tx_free_thresh;
-	txq->pthresh = tx_conf->tx_thresh.pthresh;
-	txq->hthresh = tx_conf->tx_thresh.hthresh;
-	txq->wthresh = tx_conf->tx_thresh.wthresh;
-	txq->queue_id = queue_idx;
-	txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
-		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
-	txq->port_id = dev->data->port_id;
-	txq->txq_flags = tx_conf->txq_flags;
-	txq->ops = &def_txq_ops;
-	txq->tx_deferred_start = tx_conf->tx_deferred_start;
-
-	/*
-	 * Modification to set VFTDT for virtual function if vf is detected
-	 */
-	if (hw->mac.type == ixgbe_mac_82599_vf ||
-	    hw->mac.type == ixgbe_mac_X540_vf ||
-	    hw->mac.type == ixgbe_mac_X550_vf ||
-	    hw->mac.type == ixgbe_mac_X550EM_x_vf)
-		txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
-	else
-		txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
-#ifndef	RTE_LIBRTE_XEN_DOM0
-	txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
-#else
-	txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#endif
-	txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
-
-	/* Allocate software ring */
-	txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
-				sizeof(struct ixgbe_tx_entry) * nb_desc,
-				RTE_CACHE_LINE_SIZE, socket_id);
-	if (txq->sw_ring == NULL) {
-		ixgbe_tx_queue_release(txq);
-		return (-ENOMEM);
-	}
-	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
-		     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
-
-	/* set up vector or scalar TX function as appropriate */
-	ixgbe_set_tx_function(dev, txq);
-
-	txq->ops->reset(txq);
-
-	dev->data->tx_queues[queue_idx] = txq;
-
-
-	return (0);
-}
-
-/**
- * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
- *
- * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
- * in the sw_rsc_ring is not set to NULL but rather points to the next
- * mbuf of this RSC aggregation (that has not been completed yet and still
- * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
- * will just free first "nb_segs" segments of the cluster explicitly by calling
- * an rte_pktmbuf_free_seg().
- *
- * @m scattered cluster head
- */
-static void
-ixgbe_free_sc_cluster(struct rte_mbuf *m)
-{
-	uint8_t i, nb_segs = m->nb_segs;
-	struct rte_mbuf *next_seg;
-
-	for (i = 0; i < nb_segs; i++) {
-		next_seg = m->next;
-		rte_pktmbuf_free_seg(m);
-		m = next_seg;
-	}
-}
-
-static void
-ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
-{
-	unsigned i;
-
-	if (rxq->sw_ring != NULL) {
-		for (i = 0; i < rxq->nb_rx_desc; i++) {
-			if (rxq->sw_ring[i].mbuf != NULL) {
-				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
-				rxq->sw_ring[i].mbuf = NULL;
-			}
-		}
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-		if (rxq->rx_nb_avail) {
-			for (i = 0; i < rxq->rx_nb_avail; ++i) {
-				struct rte_mbuf *mb;
-				mb = rxq->rx_stage[rxq->rx_next_avail + i];
-				rte_pktmbuf_free_seg(mb);
-			}
-			rxq->rx_nb_avail = 0;
-		}
-#endif
-	}
-
-	if (rxq->sw_sc_ring)
-		for (i = 0; i < rxq->nb_rx_desc; i++)
-			if (rxq->sw_sc_ring[i].fbuf) {
-				ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
-				rxq->sw_sc_ring[i].fbuf = NULL;
-			}
-}
-
-static void
-ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
-{
-	if (rxq != NULL) {
-		ixgbe_rx_queue_release_mbufs(rxq);
-		rte_free(rxq->sw_ring);
-		rte_free(rxq->sw_sc_ring);
-		rte_free(rxq);
-	}
-}
-
-void
-ixgbe_dev_rx_queue_release(void *rxq)
-{
-	ixgbe_rx_queue_release(rxq);
-}
-
-/*
- * Check if Rx Burst Bulk Alloc function can be used.
- * Return
- *        0: the preconditions are satisfied and the bulk allocation function
- *           can be used.
- *  -EINVAL: the preconditions are NOT satisfied and the default Rx burst
- *           function must be used.
- */
-static inline int
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
-#else
-check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq)
-#endif
-{
-	int ret = 0;
-
-	/*
-	 * Make sure the following pre-conditions are satisfied:
-	 *   rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
-	 *   rxq->rx_free_thresh < rxq->nb_rx_desc
-	 *   (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
-	 *   rxq->nb_rx_desc<(IXGBE_MAX_RING_DESC-RTE_PMD_IXGBE_RX_MAX_BURST)
-	 * Scattered packets are not supported.  This should be checked
-	 * outside of this function.
-	 */
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-	if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
-		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
-			     "rxq->rx_free_thresh=%d, "
-			     "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
-			     rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
-		ret = -EINVAL;
-	} else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
-		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
-			     "rxq->rx_free_thresh=%d, "
-			     "rxq->nb_rx_desc=%d",
-			     rxq->rx_free_thresh, rxq->nb_rx_desc);
-		ret = -EINVAL;
-	} else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
-		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
-			     "rxq->nb_rx_desc=%d, "
-			     "rxq->rx_free_thresh=%d",
-			     rxq->nb_rx_desc, rxq->rx_free_thresh);
-		ret = -EINVAL;
-	} else if (!(rxq->nb_rx_desc <
-	       (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) {
-		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
-			     "rxq->nb_rx_desc=%d, "
-			     "IXGBE_MAX_RING_DESC=%d, "
-			     "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
-			     rxq->nb_rx_desc, IXGBE_MAX_RING_DESC,
-			     RTE_PMD_IXGBE_RX_MAX_BURST);
-		ret = -EINVAL;
-	}
-#else
-	ret = -EINVAL;
-#endif
-
-	return ret;
-}
-
-/* Reset dynamic ixgbe_rx_queue fields back to defaults */
-static void
-ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
-{
-	static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
-	unsigned i;
-	uint16_t len = rxq->nb_rx_desc;
-
-	/*
-	 * By default, the Rx queue setup function allocates enough memory for
-	 * IXGBE_MAX_RING_DESC.  The Rx Burst bulk allocation function requires
-	 * extra memory at the end of the descriptor ring to be zero'd out. A
-	 * pre-condition for using the Rx burst bulk alloc function is that the
-	 * number of descriptors is less than or equal to
-	 * (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST). Check all the
-	 * constraints here to see if we need to zero out memory after the end
-	 * of the H/W descriptor ring.
-	 */
-	if (adapter->rx_bulk_alloc_allowed)
-		/* zero out extra memory */
-		len += RTE_PMD_IXGBE_RX_MAX_BURST;
-
-	/*
-	 * Zero out HW ring memory. Zero out extra memory at the end of
-	 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
-	 * reads extra memory as zeros.
-	 */
-	for (i = 0; i < len; i++) {
-		rxq->rx_ring[i] = zeroed_desc;
-	}
-
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-	/*
-	 * initialize extra software ring entries. Space for these extra
-	 * entries is always allocated
-	 */
-	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
-	for (i = rxq->nb_rx_desc; i < len; ++i) {
-		rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
-	}
-
-	rxq->rx_nb_avail = 0;
-	rxq->rx_next_avail = 0;
-	rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
-#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
-	rxq->rx_tail = 0;
-	rxq->nb_rx_hold = 0;
-	rxq->pkt_first_seg = NULL;
-	rxq->pkt_last_seg = NULL;
-}
-
-int
-ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
-			 uint16_t queue_idx,
-			 uint16_t nb_desc,
-			 unsigned int socket_id,
-			 const struct rte_eth_rxconf *rx_conf,
-			 struct rte_mempool *mp)
-{
-	const struct rte_memzone *rz;
-	struct ixgbe_rx_queue *rxq;
-	struct ixgbe_hw     *hw;
-	uint16_t len;
-	struct ixgbe_adapter *adapter =
-		(struct ixgbe_adapter *)dev->data->dev_private;
-
-	PMD_INIT_FUNC_TRACE();
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	/*
-	 * Validate number of receive descriptors.
-	 * It must not exceed hardware maximum, and must be multiple
-	 * of IXGBE_ALIGN.
-	 */
-	if (((nb_desc * sizeof(union ixgbe_adv_rx_desc)) % IXGBE_ALIGN) != 0 ||
-	    (nb_desc > IXGBE_MAX_RING_DESC) ||
-	    (nb_desc < IXGBE_MIN_RING_DESC)) {
-		return (-EINVAL);
-	}
-
-	/* Free memory prior to re-allocation if needed... */
-	if (dev->data->rx_queues[queue_idx] != NULL) {
-		ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
-		dev->data->rx_queues[queue_idx] = NULL;
-	}
-
-	/* First allocate the rx queue data structure */
-	rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
-				 RTE_CACHE_LINE_SIZE, socket_id);
-	if (rxq == NULL)
-		return (-ENOMEM);
-	rxq->mb_pool = mp;
-	rxq->nb_rx_desc = nb_desc;
-	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
-	rxq->queue_id = queue_idx;
-	rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
-		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
-	rxq->port_id = dev->data->port_id;
-	rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
-							0 : ETHER_CRC_LEN);
-	rxq->drop_en = rx_conf->rx_drop_en;
-	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
-
-	/*
-	 * Allocate RX ring hardware descriptors. A memzone large enough to
-	 * handle the maximum ring size is allocated in order to allow for
-	 * resizing in later calls to the queue setup function.
-	 */
-	rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx,
-				   RX_RING_SZ, socket_id);
-	if (rz == NULL) {
-		ixgbe_rx_queue_release(rxq);
-		return (-ENOMEM);
-	}
-
-	/*
-	 * Zero init all the descriptors in the ring.
-	 */
-	memset (rz->addr, 0, RX_RING_SZ);
-
-	/*
-	 * Modified to setup VFRDT for Virtual Function
-	 */
-	if (hw->mac.type == ixgbe_mac_82599_vf ||
-	    hw->mac.type == ixgbe_mac_X540_vf ||
-	    hw->mac.type == ixgbe_mac_X550_vf ||
-	    hw->mac.type == ixgbe_mac_X550EM_x_vf) {
-		rxq->rdt_reg_addr =
-			IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
-		rxq->rdh_reg_addr =
-			IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
-	}
-	else {
-		rxq->rdt_reg_addr =
-			IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
-		rxq->rdh_reg_addr =
-			IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
-	}
-#ifndef RTE_LIBRTE_XEN_DOM0
-	rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
-#else
-	rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#endif
-	rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
-
-	/*
-	 * Certain constraints must be met in order to use the bulk buffer
-	 * allocation Rx burst function. If any of Rx queues doesn't meet them
-	 * the feature should be disabled for the whole port.
-	 */
-	if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
-		PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
-				    "preconditions - canceling the feature for "
-				    "the whole port[%d]",
-			     rxq->queue_id, rxq->port_id);
-		adapter->rx_bulk_alloc_allowed = false;
-	}
-
-	/*
-	 * Allocate software ring. Allow for space at the end of the
-	 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
-	 * function does not access an invalid memory region.
-	 */
-	len = nb_desc;
-	if (adapter->rx_bulk_alloc_allowed)
-		len += RTE_PMD_IXGBE_RX_MAX_BURST;
-
-	rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
-					  sizeof(struct ixgbe_rx_entry) * len,
-					  RTE_CACHE_LINE_SIZE, socket_id);
-	if (!rxq->sw_ring) {
-		ixgbe_rx_queue_release(rxq);
-		return (-ENOMEM);
-	}
-
-	/*
-	 * Always allocate even if it's not going to be needed in order to
-	 * simplify the code.
-	 *
-	 * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
-	 * be requested in ixgbe_dev_rx_init(), which is called later from
-	 * dev_start() flow.
-	 */
-	rxq->sw_sc_ring =
-		rte_zmalloc_socket("rxq->sw_sc_ring",
-				   sizeof(struct ixgbe_scattered_rx_entry) * len,
-				   RTE_CACHE_LINE_SIZE, socket_id);
-	if (!rxq->sw_sc_ring) {
-		ixgbe_rx_queue_release(rxq);
-		return (-ENOMEM);
-	}
-
-	PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
-			    "dma_addr=0x%"PRIx64,
-		     rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
-		     rxq->rx_ring_phys_addr);
-
-	if (!rte_is_power_of_2(nb_desc)) {
-		PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
-				    "preconditions - canceling the feature for "
-				    "the whole port[%d]",
-			     rxq->queue_id, rxq->port_id);
-		adapter->rx_vec_allowed = false;
-	} else
-		ixgbe_rxq_vec_setup(rxq);
-
-	dev->data->rx_queues[queue_idx] = rxq;
-
-	ixgbe_reset_rx_queue(adapter, rxq);
-
-	return 0;
-}
-
-uint32_t
-ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
-{
-#define IXGBE_RXQ_SCAN_INTERVAL 4
-	volatile union ixgbe_adv_rx_desc *rxdp;
-	struct ixgbe_rx_queue *rxq;
-	uint32_t desc = 0;
-
-	if (rx_queue_id >= dev->data->nb_rx_queues) {
-		PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
-		return 0;
-	}
-
-	rxq = dev->data->rx_queues[rx_queue_id];
-	rxdp = &(rxq->rx_ring[rxq->rx_tail]);
-
-	while ((desc < rxq->nb_rx_desc) &&
-		(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) {
-		desc += IXGBE_RXQ_SCAN_INTERVAL;
-		rxdp += IXGBE_RXQ_SCAN_INTERVAL;
-		if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
-			rxdp = &(rxq->rx_ring[rxq->rx_tail +
-				desc - rxq->nb_rx_desc]);
-	}
-
-	return desc;
-}
-
-int
-ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
-{
-	volatile union ixgbe_adv_rx_desc *rxdp;
-	struct ixgbe_rx_queue *rxq = rx_queue;
-	uint32_t desc;
-
-	if (unlikely(offset >= rxq->nb_rx_desc))
-		return 0;
-	desc = rxq->rx_tail + offset;
-	if (desc >= rxq->nb_rx_desc)
-		desc -= rxq->nb_rx_desc;
-
-	rxdp = &rxq->rx_ring[desc];
-	return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD);
-}
-
-void
-ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
-{
-	unsigned i;
-	struct ixgbe_adapter *adapter =
-		(struct ixgbe_adapter *)dev->data->dev_private;
-
-	PMD_INIT_FUNC_TRACE();
-
-	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-		struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
-		if (txq != NULL) {
-			txq->ops->release_mbufs(txq);
-			txq->ops->reset(txq);
-		}
-	}
-
-	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
-		if (rxq != NULL) {
-			ixgbe_rx_queue_release_mbufs(rxq);
-			ixgbe_reset_rx_queue(adapter, rxq);
-		}
-	}
-}
-
-/*********************************************************************
- *
- *  Device RX/TX init functions
- *
- **********************************************************************/
-
-/**
- * Receive Side Scaling (RSS)
- * See section 7.1.2.8 in the following document:
- *     "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
- *
- * Principles:
- * The source and destination IP addresses of the IP header and the source
- * and destination ports of TCP/UDP headers, if any, of received packets are
- * hashed against a configurable random key to compute a 32-bit RSS hash result.
- * The seven (7) LSBs of the 32-bit hash result are used as an index into a
- * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
- * RSS output index which is used as the RX queue index where to store the
- * received packets.
- * The following output is supplied in the RX write-back descriptor:
- *     - 32-bit result of the Microsoft RSS hash function,
- *     - 4-bit RSS type field.
- */
-
-/*
- * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
- * Used as the default key.
- */
-static uint8_t rss_intel_key[40] = {
-	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
-	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
-	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
-	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
-	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
-};
-
-static void
-ixgbe_rss_disable(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw;
-	uint32_t mrqc;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
-	mrqc &= ~IXGBE_MRQC_RSSEN;
-	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
-}
-
-static void
-ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
-{
-	uint8_t  *hash_key;
-	uint32_t mrqc;
-	uint32_t rss_key;
-	uint64_t rss_hf;
-	uint16_t i;
-
-	hash_key = rss_conf->rss_key;
-	if (hash_key != NULL) {
-		/* Fill in RSS hash key */
-		for (i = 0; i < 10; i++) {
-			rss_key  = hash_key[(i * 4)];
-			rss_key |= hash_key[(i * 4) + 1] << 8;
-			rss_key |= hash_key[(i * 4) + 2] << 16;
-			rss_key |= hash_key[(i * 4) + 3] << 24;
-			IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, rss_key);
-		}
-	}
-
-	/* Set configured hashing protocols in MRQC register */
-	rss_hf = rss_conf->rss_hf;
-	mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
-	if (rss_hf & ETH_RSS_IPV4)
-		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
-		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
-	if (rss_hf & ETH_RSS_IPV6)
-		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
-	if (rss_hf & ETH_RSS_IPV6_EX)
-		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
-		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
-	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
-		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
-		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
-	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
-		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
-	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
-		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
-	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
-}
-
-int
-ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
-			  struct rte_eth_rss_conf *rss_conf)
-{
-	struct ixgbe_hw *hw;
-	uint32_t mrqc;
-	uint64_t rss_hf;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	/*
-	 * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
-	 *     "RSS enabling cannot be done dynamically while it must be
-	 *      preceded by a software reset"
-	 * Before changing anything, first check that the update RSS operation
-	 * does not attempt to disable RSS, if RSS was enabled at
-	 * initialization time, or does not attempt to enable RSS, if RSS was
-	 * disabled at initialization time.
-	 */
-	rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
-	mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
-	if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
-		if (rss_hf != 0) /* Enable RSS */
-			return -(EINVAL);
-		return 0; /* Nothing to do */
-	}
-	/* RSS enabled */
-	if (rss_hf == 0) /* Disable RSS */
-		return -(EINVAL);
-	ixgbe_hw_rss_hash_set(hw, rss_conf);
-	return 0;
-}
-
-int
-ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
-			    struct rte_eth_rss_conf *rss_conf)
-{
-	struct ixgbe_hw *hw;
-	uint8_t *hash_key;
-	uint32_t mrqc;
-	uint32_t rss_key;
-	uint64_t rss_hf;
-	uint16_t i;
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	hash_key = rss_conf->rss_key;
-	if (hash_key != NULL) {
-		/* Return RSS hash key */
-		for (i = 0; i < 10; i++) {
-			rss_key = IXGBE_READ_REG_ARRAY(hw, IXGBE_RSSRK(0), i);
-			hash_key[(i * 4)] = rss_key & 0x000000FF;
-			hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
-			hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
-			hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
-		}
-	}
-
-	/* Get RSS functions configured in MRQC register */
-	mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
-	if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
-		rss_conf->rss_hf = 0;
-		return 0;
-	}
-	rss_hf = 0;
-	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
-		rss_hf |= ETH_RSS_IPV4;
-	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
-	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
-		rss_hf |= ETH_RSS_IPV6;
-	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
-		rss_hf |= ETH_RSS_IPV6_EX;
-	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
-	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
-		rss_hf |= ETH_RSS_IPV6_TCP_EX;
-	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
-	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
-		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
-	if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
-		rss_hf |= ETH_RSS_IPV6_UDP_EX;
-	rss_conf->rss_hf = rss_hf;
-	return 0;
-}
-
-static void
-ixgbe_rss_configure(struct rte_eth_dev *dev)
-{
-	struct rte_eth_rss_conf rss_conf;
-	struct ixgbe_hw *hw;
-	uint32_t reta;
-	uint16_t i;
-	uint16_t j;
-
-	PMD_INIT_FUNC_TRACE();
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	/*
-	 * Fill in redirection table
-	 * The byte-swap is needed because NIC registers are in
-	 * little-endian order.
-	 */
-	reta = 0;
-	for (i = 0, j = 0; i < 128; i++, j++) {
-		if (j == dev->data->nb_rx_queues)
-			j = 0;
-		reta = (reta << 8) | j;
-		if ((i & 3) == 3)
-			IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2),
-					rte_bswap32(reta));
-	}
-
-	/*
-	 * Configure the RSS key and the RSS protocols used to compute
-	 * the RSS hash of input packets.
-	 */
-	rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
-	if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
-		ixgbe_rss_disable(dev);
-		return;
-	}
-	if (rss_conf.rss_key == NULL)
-		rss_conf.rss_key = rss_intel_key; /* Default hash key */
-	ixgbe_hw_rss_hash_set(hw, &rss_conf);
-}
-
-#define NUM_VFTA_REGISTERS 128
-#define NIC_RX_BUFFER_SIZE 0x200
-
-static void
-ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
-{
-	struct rte_eth_vmdq_dcb_conf *cfg;
-	struct ixgbe_hw *hw;
-	enum rte_eth_nb_pools num_pools;
-	uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
-	uint16_t pbsize;
-	uint8_t nb_tcs; /* number of traffic classes */
-	int i;
-
-	PMD_INIT_FUNC_TRACE();
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
-	num_pools = cfg->nb_queue_pools;
-	/* Check we have a valid number of pools */
-	if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
-		ixgbe_rss_disable(dev);
-		return;
-	}
-	/* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
-	nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
-
-	/*
-	 * RXPBSIZE
-	 * split rx buffer up into sections, each for 1 traffic class
-	 */
-	pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
-	for (i = 0 ; i < nb_tcs; i++) {
-		uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
-		rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
-		/* clear 10 bits. */
-		rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
-		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
-	}
-	/* zero alloc all unused TCs */
-	for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
-		uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
-		rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT ));
-		/* clear 10 bits. */
-		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
-	}
-
-	/* MRQC: enable vmdq and dcb */
-	mrqc = ((num_pools == ETH_16_POOLS) ? \
-		IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN );
-	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
-
-	/* PFVTCTL: turn on virtualisation and set the default pool */
-	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
-	if (cfg->enable_default_pool) {
-		vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
-	} else {
-		vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
-	}
-
-	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
-
-	/* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
-	queue_mapping = 0;
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
-		/*
-		 * mapping is done with 3 bits per priority,
-		 * so shift by i*3 each time
-		 */
-		queue_mapping |= ((cfg->dcb_queue[i] & 0x07) << (i * 3));
-
-	IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
-
-	/* RTRPCS: DCB related */
-	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
-
-	/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
-	vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-	vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
-	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
-
-	/* VFTA - enable all vlan filters */
-	for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
-		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
-	}
-
-	/* VFRE: pool enabling for receive - 16 or 32 */
-	IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \
-			num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
-
-	/*
-	 * MPSAR - allow pools to read specific mac addresses
-	 * In this case, all pools should be able to read from mac addr 0
-	 */
-	IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
-	IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
-
-	/* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
-	for (i = 0; i < cfg->nb_pool_maps; i++) {
-		/* set vlan id in VF register and set the valid bit */
-		IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
-				(cfg->pool_map[i].vlan_id & 0xFFF)));
-		/*
-		 * Put the allowed pools in VFB reg. As we only have 16 or 32
-		 * pools, we only need to use the first half of the register
-		 * i.e. bits 0-31
-		 */
-		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
-	}
-}
-
-/**
- * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- */
-static void
-ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
-               struct ixgbe_dcb_config *dcb_config)
-{
-	uint32_t reg;
-	uint32_t q;
-
-	PMD_INIT_FUNC_TRACE();
-	if (hw->mac.type != ixgbe_mac_82598EB) {
-		/* Disable the Tx desc arbiter so that MTQC can be changed */
-		reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
-		reg |= IXGBE_RTTDCS_ARBDIS;
-		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
-
-		/* Enable DCB for Tx with 8 TCs */
-		if (dcb_config->num_tcs.pg_tcs == 8) {
-			reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
-		}
-		else {
-			reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
-		}
-		if (dcb_config->vt_mode)
-	            reg |= IXGBE_MTQC_VT_ENA;
-		IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
-
-		/* Disable drop for all queues */
-		for (q = 0; q < 128; q++)
-			IXGBE_WRITE_REG(hw, IXGBE_QDE,
-	             (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
-
-		/* Enable the Tx desc arbiter */
-		reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
-		reg &= ~IXGBE_RTTDCS_ARBDIS;
-		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
-
-		/* Enable Security TX Buffer IFG for DCB */
-		reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
-		reg |= IXGBE_SECTX_DCB;
-		IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
-	}
-	return;
-}
-
-/**
- * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
- * @dev: pointer to rte_eth_dev structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- */
-static void
-ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
-			struct ixgbe_dcb_config *dcb_config)
-{
-	struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
-			&dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
-	struct ixgbe_hw *hw =
-			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	PMD_INIT_FUNC_TRACE();
-	if (hw->mac.type != ixgbe_mac_82598EB)
-		/*PF VF Transmit Enable*/
-		IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
-			vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
-
-	/*Configure general DCB TX parameters*/
-	ixgbe_dcb_tx_hw_config(hw,dcb_config);
-	return;
-}
-
-static void
-ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
-                        struct ixgbe_dcb_config *dcb_config)
-{
-	struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
-			&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
-	struct ixgbe_dcb_tc_config *tc;
-	uint8_t i,j;
-
-	/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
-	if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
-	}
-	else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
-	}
-	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
-		j = vmdq_rx_conf->dcb_queue[i];
-		tc = &dcb_config->tc_config[j];
-		tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
-						(uint8_t)(1 << j);
-	}
-}
-
-static void
-ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
-                        struct ixgbe_dcb_config *dcb_config)
-{
-	struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
-			&dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
-	struct ixgbe_dcb_tc_config *tc;
-	uint8_t i,j;
-
-	/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
-	if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) {
-		dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
-	}
-	else {
-		dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
-		dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
-	}
-
-	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
-		j = vmdq_tx_conf->dcb_queue[i];
-		tc = &dcb_config->tc_config[j];
-		tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
-						(uint8_t)(1 << j);
-	}
-	return;
-}
-
-static void
-ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
-		struct ixgbe_dcb_config *dcb_config)
-{
-	struct rte_eth_dcb_rx_conf *rx_conf =
-			&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
-	struct ixgbe_dcb_tc_config *tc;
-	uint8_t i,j;
-
-	dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
-	dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
-
-	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
-		j = rx_conf->dcb_queue[i];
-		tc = &dcb_config->tc_config[j];
-		tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
-						(uint8_t)(1 << j);
-	}
-}
-
-static void
-ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
-		struct ixgbe_dcb_config *dcb_config)
-{
-	struct rte_eth_dcb_tx_conf *tx_conf =
-			&dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
-	struct ixgbe_dcb_tc_config *tc;
-	uint8_t i,j;
-
-	dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
-	dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
-
-	/* User Priority to Traffic Class mapping */
-	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
-		j = tx_conf->dcb_queue[i];
-		tc = &dcb_config->tc_config[j];
-		tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
-						(uint8_t)(1 << j);
-	}
-}
-
-/**
- * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- */
-static void
-ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
-               struct ixgbe_dcb_config *dcb_config)
-{
-	uint32_t reg;
-	uint32_t vlanctrl;
-	uint8_t i;
-
-	PMD_INIT_FUNC_TRACE();
-	/*
-	 * Disable the arbiter before changing parameters
-	 * (always enable recycle mode; WSP)
-	 */
-	reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
-	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
-
-	if (hw->mac.type != ixgbe_mac_82598EB) {
-		reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
-		if (dcb_config->num_tcs.pg_tcs == 4) {
-			if (dcb_config->vt_mode)
-				reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
-					IXGBE_MRQC_VMDQRT4TCEN;
-			else {
-				IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
-				reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
-					IXGBE_MRQC_RT4TCEN;
-			}
-		}
-		if (dcb_config->num_tcs.pg_tcs == 8) {
-			if (dcb_config->vt_mode)
-				reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
-					IXGBE_MRQC_VMDQRT8TCEN;
-			else {
-				IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
-				reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
-					IXGBE_MRQC_RT8TCEN;
-			}
-		}
-
-		IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
-	}
-
-	/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
-	vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-	vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
-	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
-
-	/* VFTA - enable all vlan filters */
-	for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
-		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
-	}
-
-	/*
-	 * Configure Rx packet plane (recycle mode; WSP) and
-	 * enable arbiter
-	 */
-	reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
-	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
-
-	return;
-}
-
-static void
-ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
-			uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
-{
-	switch (hw->mac.type) {
-	case ixgbe_mac_82598EB:
-		ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
-		break;
-	case ixgbe_mac_82599EB:
-	case ixgbe_mac_X540:
-	case ixgbe_mac_X550:
-	case ixgbe_mac_X550EM_x:
-		ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
-						  tsa, map);
-		break;
-	default:
-		break;
-	}
-}
-
-static void
-ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
-			    uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
-{
-	switch (hw->mac.type) {
-	case ixgbe_mac_82598EB:
-		ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa);
-		ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa);
-		break;
-	case ixgbe_mac_82599EB:
-	case ixgbe_mac_X540:
-	case ixgbe_mac_X550:
-	case ixgbe_mac_X550EM_x:
-		ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa);
-		ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map);
-		break;
-	default:
-		break;
-	}
-}
-
-#define DCB_RX_CONFIG  1
-#define DCB_TX_CONFIG  1
-#define DCB_TX_PB      1024
-/**
- * ixgbe_dcb_hw_configure - Enable DCB and configure
- * general DCB in VT mode and non-VT mode parameters
- * @dev: pointer to rte_eth_dev structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- */
-static int
-ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
-			struct ixgbe_dcb_config *dcb_config)
-{
-	int     ret = 0;
-	uint8_t i,pfc_en,nb_tcs;
-	uint16_t pbsize;
-	uint8_t config_dcb_rx = 0;
-	uint8_t config_dcb_tx = 0;
-	uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
-	uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
-	uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
-	uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
-	uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
-	struct ixgbe_dcb_tc_config *tc;
-	uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
-	struct ixgbe_hw *hw =
-			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	switch(dev->data->dev_conf.rxmode.mq_mode){
-	case ETH_MQ_RX_VMDQ_DCB:
-		dcb_config->vt_mode = true;
-		if (hw->mac.type != ixgbe_mac_82598EB) {
-			config_dcb_rx = DCB_RX_CONFIG;
-			/*
-			 *get dcb and VT rx configuration parameters
-			 *from rte_eth_conf
-			 */
-			ixgbe_vmdq_dcb_rx_config(dev,dcb_config);
-			/*Configure general VMDQ and DCB RX parameters*/
-			ixgbe_vmdq_dcb_configure(dev);
-		}
-		break;
-	case ETH_MQ_RX_DCB:
-		dcb_config->vt_mode = false;
-		config_dcb_rx = DCB_RX_CONFIG;
-		/* Get dcb TX configuration parameters from rte_eth_conf */
-		ixgbe_dcb_rx_config(dev,dcb_config);
-		/*Configure general DCB RX parameters*/
-		ixgbe_dcb_rx_hw_config(hw, dcb_config);
-		break;
-	default:
-		PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
-		break;
-	}
-	switch (dev->data->dev_conf.txmode.mq_mode) {
-	case ETH_MQ_TX_VMDQ_DCB:
-		dcb_config->vt_mode = true;
-		config_dcb_tx = DCB_TX_CONFIG;
-		/* get DCB and VT TX configuration parameters from rte_eth_conf */
-		ixgbe_dcb_vt_tx_config(dev,dcb_config);
-		/*Configure general VMDQ and DCB TX parameters*/
-		ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config);
-		break;
-
-	case ETH_MQ_TX_DCB:
-		dcb_config->vt_mode = false;
-		config_dcb_tx = DCB_TX_CONFIG;
-		/*get DCB TX configuration parameters from rte_eth_conf*/
-		ixgbe_dcb_tx_config(dev,dcb_config);
-		/*Configure general DCB TX parameters*/
-		ixgbe_dcb_tx_hw_config(hw, dcb_config);
-		break;
-	default:
-		PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
-		break;
-	}
-
-	nb_tcs = dcb_config->num_tcs.pfc_tcs;
-	/* Unpack map */
-	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
-	if(nb_tcs == ETH_4_TCS) {
-		/* Avoid un-configured priority mapping to TC0 */
-		uint8_t j = 4;
-		uint8_t mask = 0xFF;
-		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
-			mask = (uint8_t)(mask & (~ (1 << map[i])));
-		for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
-			if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
-				map[j++] = i;
-			mask >>= 1;
-		}
-		/* Re-configure 4 TCs BW */
-		for (i = 0; i < nb_tcs; i++) {
-			tc = &dcb_config->tc_config[i];
-			tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
-						(uint8_t)(100 / nb_tcs);
-			tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
-						(uint8_t)(100 / nb_tcs);
-		}
-		for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-			tc = &dcb_config->tc_config[i];
-			tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
-			tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
-		}
-	}
-
-	if(config_dcb_rx) {
-		/* Set RX buffer size */
-		pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
-		uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
-		for (i = 0 ; i < nb_tcs; i++) {
-			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
-		}
-		/* zero alloc all unused TCs */
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
-			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
-		}
-	}
-	if(config_dcb_tx) {
-		/* Only support an equally distributed Tx packet buffer strategy. */
-		uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
-		uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
-		for (i = 0; i < nb_tcs; i++) {
-			IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
-			IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
-		}
-		/* Clear unused TCs, if any, to zero buffer size*/
-		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
-			IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
-			IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
-		}
-	}
-
-	/*Calculates traffic class credits*/
-	ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
-				IXGBE_DCB_TX_CONFIG);
-	ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
-				IXGBE_DCB_RX_CONFIG);
-
-	if(config_dcb_rx) {
-		/* Unpack CEE standard containers */
-		ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
-		ixgbe_dcb_unpack_max_cee(dcb_config, max);
-		ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
-		ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
-		/* Configure PG(ETS) RX */
-		ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map);
-	}
-
-	if(config_dcb_tx) {
-		/* Unpack CEE standard containers */
-		ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
-		ixgbe_dcb_unpack_max_cee(dcb_config, max);
-		ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
-		ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
-		/* Configure PG(ETS) TX */
-		ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map);
-	}
-
-	/*Configure queue statistics registers*/
-	ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
-
-	/* Check if the PFC is supported */
-	if(dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
-		pbsize = (uint16_t) (NIC_RX_BUFFER_SIZE / nb_tcs);
-		for (i = 0; i < nb_tcs; i++) {
-			/*
-			* If the TC count is 8,and the default high_water is 48,
-			* the low_water is 16 as default.
-			*/
-			hw->fc.high_water[i] = (pbsize * 3 ) / 4;
-			hw->fc.low_water[i] = pbsize / 4;
-			/* Enable pfc for this TC */
-			tc = &dcb_config->tc_config[i];
-			tc->pfc = ixgbe_dcb_pfc_enabled;
-		}
-		ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
-		if(dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
-			pfc_en &= 0x0F;
-		ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
-	}
-
-	return ret;
-}
-
-/**
- * ixgbe_configure_dcb - Configure DCB  Hardware
- * @dev: pointer to rte_eth_dev
- */
-void ixgbe_configure_dcb(struct rte_eth_dev *dev)
-{
-	struct ixgbe_dcb_config *dcb_cfg =
-			IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
-	struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
-
-	PMD_INIT_FUNC_TRACE();
-
-	/* check support mq_mode for DCB */
-	if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
-	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB))
-		return;
-
-	if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
-		return;
-
-	/** Configure DCB hardware **/
-	ixgbe_dcb_hw_configure(dev,dcb_cfg);
-
-	return;
-}
-
-/*
- * VMDq only support for 10 GbE NIC.
- */
-static void
-ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
-{
-	struct rte_eth_vmdq_rx_conf *cfg;
-	struct ixgbe_hw *hw;
-	enum rte_eth_nb_pools num_pools;
-	uint32_t mrqc, vt_ctl, vlanctrl;
-	uint32_t vmolr = 0;
-	int i;
-
-	PMD_INIT_FUNC_TRACE();
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
-	num_pools = cfg->nb_queue_pools;
-
-	ixgbe_rss_disable(dev);
-
-	/* MRQC: enable vmdq */
-	mrqc = IXGBE_MRQC_VMDQEN;
-	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
-
-	/* PFVTCTL: turn on virtualisation and set the default pool */
-	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
-	if (cfg->enable_default_pool)
-		vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
-	else
-		vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
-
-	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
-
-	for (i = 0; i < (int)num_pools; i++) {
-		vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
-		IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
-	}
-
-	/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
-	vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-	vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
-	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
-
-	/* VFTA - enable all vlan filters */
-	for (i = 0; i < NUM_VFTA_REGISTERS; i++)
-		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
-
-	/* VFRE: pool enabling for receive - 64 */
-	IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
-	if (num_pools == ETH_64_POOLS)
-		IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
-
-	/*
-	 * MPSAR - allow pools to read specific mac addresses
-	 * In this case, all pools should be able to read from mac addr 0
-	 */
-	IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
-	IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
-
-	/* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
-	for (i = 0; i < cfg->nb_pool_maps; i++) {
-		/* set vlan id in VF register and set the valid bit */
-		IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
-				(cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
-		/*
-		 * Put the allowed pools in VFB reg. As we only have 16 or 64
-		 * pools, we only need to use the first half of the register
-		 * i.e. bits 0-31
-		 */
-		if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
-			IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \
-					(cfg->pool_map[i].pools & UINT32_MAX));
-		else
-			IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \
-					((cfg->pool_map[i].pools >> 32) \
-					& UINT32_MAX));
-
-	}
-
-	/* PFDMA Tx General Switch Control Enables VMDQ loopback */
-	if (cfg->enable_loop_back) {
-		IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
-		for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
-			IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
-	}
-
-	IXGBE_WRITE_FLUSH(hw);
-}
-
-/*
- * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
- * @hw: pointer to hardware structure
- */
-static void
-ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
-{
-	uint32_t reg;
-	uint32_t q;
-
-	PMD_INIT_FUNC_TRACE();
-	/*PF VF Transmit Enable*/
-	IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
-	IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
-
-	/* Disable the Tx desc arbiter so that MTQC can be changed */
-	reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
-	reg |= IXGBE_RTTDCS_ARBDIS;
-	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
-
-	reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
-	IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
-
-	/* Disable drop for all queues */
-	for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
-		IXGBE_WRITE_REG(hw, IXGBE_QDE,
-	          (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
-
-	/* Enable the Tx desc arbiter */
-	reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
-	reg &= ~IXGBE_RTTDCS_ARBDIS;
-	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
-
-	IXGBE_WRITE_FLUSH(hw);
-
-	return;
-}
-
-static int
-ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
-{
-	struct ixgbe_rx_entry *rxe = rxq->sw_ring;
-	uint64_t dma_addr;
-	unsigned i;
-
-	/* Initialize software ring entries */
-	for (i = 0; i < rxq->nb_rx_desc; i++) {
-		volatile union ixgbe_adv_rx_desc *rxd;
-		struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
-		if (mbuf == NULL) {
-			PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
-				     (unsigned) rxq->queue_id);
-			return (-ENOMEM);
-		}
-
-		rte_mbuf_refcnt_set(mbuf, 1);
-		mbuf->next = NULL;
-		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
-		mbuf->nb_segs = 1;
-		mbuf->port = rxq->port_id;
-
-		dma_addr =
-			rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
-		rxd = &rxq->rx_ring[i];
-		rxd->read.hdr_addr = dma_addr;
-		rxd->read.pkt_addr = dma_addr;
-		rxe[i].mbuf = mbuf;
-	}
-
-	return 0;
-}
-
-static int
-ixgbe_config_vf_rss(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw;
-	uint32_t mrqc;
-
-	ixgbe_rss_configure(dev);
-
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	/* MRQC: enable VF RSS */
-	mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
-	mrqc &= ~IXGBE_MRQC_MRQE_MASK;
-	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
-		mrqc |= IXGBE_MRQC_VMDQRSS64EN;
-		break;
-
-	case ETH_32_POOLS:
-		mrqc |= IXGBE_MRQC_VMDQRSS32EN;
-		break;
-
-	default:
-		PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
-		return -EINVAL;
-	}
-
-	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
-
-	return 0;
-}
-
-static int
-ixgbe_config_vf_default(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	switch (RTE_ETH_DEV_SRIOV(dev).active) {
-	case ETH_64_POOLS:
-		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
-			IXGBE_MRQC_VMDQEN);
-		break;
-
-	case ETH_32_POOLS:
-		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
-			IXGBE_MRQC_VMDQRT4TCEN);
-		break;
-
-	case ETH_16_POOLS:
-		IXGBE_WRITE_REG(hw, IXGBE_MRQC,
-			IXGBE_MRQC_VMDQRT8TCEN);
-		break;
-	default:
-		PMD_INIT_LOG(ERR,
-			"invalid pool number in IOV mode");
-		break;
-	}
-	return 0;
-}
-
-static int
-ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	if (hw->mac.type == ixgbe_mac_82598EB)
-		return 0;
-
-	if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
-		/*
-		 * SRIOV inactive scheme
-		 * any DCB/RSS w/o VMDq multi-queue setting
-		 */
-		switch (dev->data->dev_conf.rxmode.mq_mode) {
-			case ETH_MQ_RX_RSS:
-				ixgbe_rss_configure(dev);
-				break;
-
-			case ETH_MQ_RX_VMDQ_DCB:
-				ixgbe_vmdq_dcb_configure(dev);
-				break;
-
-			case ETH_MQ_RX_VMDQ_ONLY:
-				ixgbe_vmdq_rx_hw_configure(dev);
-				break;
-
-			case ETH_MQ_RX_NONE:
-				/* if mq_mode is none, disable rss mode.*/
-			default: ixgbe_rss_disable(dev);
-		}
-	} else {
-		/*
-		 * SRIOV active scheme
-		 * Support RSS together with VMDq & SRIOV
-		 */
-		switch (dev->data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-		case ETH_MQ_RX_VMDQ_RSS:
-			ixgbe_config_vf_rss(dev);
-			break;
-
-		/* FIXME if support DCB/RSS together with VMDq & SRIOV */
-		case ETH_MQ_RX_VMDQ_DCB:
-		case ETH_MQ_RX_VMDQ_DCB_RSS:
-			PMD_INIT_LOG(ERR,
-				"Could not support DCB with VMDq & SRIOV");
-			return -1;
-		default:
-			ixgbe_config_vf_default(dev);
-			break;
-		}
-	}
-
-	return 0;
-}
-
-static int
-ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw *hw =
-		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	uint32_t mtqc;
-	uint32_t rttdcs;
-
-	if (hw->mac.type == ixgbe_mac_82598EB)
-		return 0;
-
-	/* disable arbiter before setting MTQC */
-	rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
-	rttdcs |= IXGBE_RTTDCS_ARBDIS;
-	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
-
-	if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
-		/*
-		 * SRIOV inactive scheme
-		 * any DCB w/o VMDq multi-queue setting
-		 */
-		if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
-			ixgbe_vmdq_tx_hw_configure(hw);
-		else {
-			mtqc = IXGBE_MTQC_64Q_1PB;
-			IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
-		}
-	} else {
-		switch (RTE_ETH_DEV_SRIOV(dev).active) {
-
-		/*
-		 * SRIOV active scheme
-		 * FIXME if support DCB together with VMDq & SRIOV
-		 */
-		case ETH_64_POOLS:
-			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
-			break;
-		case ETH_32_POOLS:
-			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
-			break;
-		case ETH_16_POOLS:
-			mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
-				IXGBE_MTQC_8TC_8TQ;
-			break;
-		default:
-			mtqc = IXGBE_MTQC_64Q_1PB;
-			PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
-		}
-		IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
-	}
-
-	/* re-enable arbiter */
-	rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
-	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
-
-	return 0;
-}
-
-/**
- * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
- *
- * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
- * spec rev. 3.0 chapter 8.2.3.8.13.
- *
- * @pool Memory pool of the Rx queue
- */
-static inline uint32_t
-ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
-{
-	struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
-
-	/* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
-	uint16_t maxdesc =
-		IPV4_MAX_PKT_LEN /
-			(mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
-
-	if (maxdesc >= 16)
-		return IXGBE_RSCCTL_MAXDESC_16;
-	else if (maxdesc >= 8)
-		return IXGBE_RSCCTL_MAXDESC_8;
-	else if (maxdesc >= 4)
-		return IXGBE_RSCCTL_MAXDESC_4;
-	else
-		return IXGBE_RSCCTL_MAXDESC_1;
-}
-
-/**
- * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
- * interrupt
- *
- * (Taken from FreeBSD tree)
- * (yes this is all very magic and confusing :)
- *
- * @dev port handle
- * @entry the register array entry
- * @vector the MSIX vector for this queue
- * @type RX/TX/MISC
- */
-static void
-ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
-{
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	u32 ivar, index;
-
-	vector |= IXGBE_IVAR_ALLOC_VAL;
-
-	switch (hw->mac.type) {
-
-	case ixgbe_mac_82598EB:
-		if (type == -1)
-			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
-		else
-			entry += (type * 64);
-		index = (entry >> 2) & 0x1F;
-		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
-		ivar &= ~(0xFF << (8 * (entry & 0x3)));
-		ivar |= (vector << (8 * (entry & 0x3)));
-		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
-		break;
-
-	case ixgbe_mac_82599EB:
-	case ixgbe_mac_X540:
-		if (type == -1) { /* MISC IVAR */
-			index = (entry & 1) * 8;
-			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
-			ivar &= ~(0xFF << index);
-			ivar |= (vector << index);
-			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
-		} else {	/* RX/TX IVARS */
-			index = (16 * (entry & 1)) + (8 * type);
-			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
-			ivar &= ~(0xFF << index);
-			ivar |= (vector << index);
-			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
-		}
-
-		break;
-
-	default:
-		break;
-	}
-}
-
-void ixgbe_set_rx_function(struct rte_eth_dev *dev)
-{
-	struct ixgbe_adapter *adapter =
-		(struct ixgbe_adapter *)dev->data->dev_private;
-
-	/*
-	 * In order to allow Vector Rx there are a few configuration
-	 * conditions to be met and Rx Bulk Allocation should be allowed.
-	 */
-	if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
-	    !adapter->rx_bulk_alloc_allowed) {
-		PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
-				    "preconditions or RTE_IXGBE_INC_VECTOR is "
-				    "not enabled",
-			     dev->data->port_id);
-
-		adapter->rx_vec_allowed = false;
-	}
-
-	/*
-	 * Initialize the appropriate LRO callback.
-	 *
-	 * If all queues satisfy the bulk allocation preconditions
-	 * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
-	 * Otherwise use a single allocation version.
-	 */
-	if (dev->data->lro) {
-		if (adapter->rx_bulk_alloc_allowed) {
-			PMD_INIT_LOG(INFO, "LRO is requested. Using a bulk "
-					   "allocation version");
-			dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
-		} else {
-			PMD_INIT_LOG(INFO, "LRO is requested. Using a single "
-					   "allocation version");
-			dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
-		}
-	} else if (dev->data->scattered_rx) {
-		/*
-		 * Set the non-LRO scattered callback: there are Vector and
-		 * single allocation versions.
-		 */
-		if (adapter->rx_vec_allowed) {
-			PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
-					    "callback (port=%d).",
-				     dev->data->port_id);
-
-			dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-		} else if (adapter->rx_bulk_alloc_allowed) {
-			PMD_INIT_LOG(INFO, "Using a Scattered with bulk "
-					   "allocation callback (port=%d).",
-				     dev->data->port_id);
-			dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
-		} else {
-			PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
-					    "single allocation) "
-					    "Scattered Rx callback "
-					    "(port=%d).",
-				     dev->data->port_id);
-
-			dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
-		}
-	/*
-	 * Below we set "simple" callbacks according to port/queues parameters.
-	 * If parameters allow we are going to choose between the following
-	 * callbacks:
-	 *    - Vector
-	 *    - Bulk Allocation
-	 *    - Single buffer allocation (the simplest one)
-	 */
-	} else if (adapter->rx_vec_allowed) {
-		PMD_INIT_LOG(INFO, "Vector rx enabled, please make sure RX "
-				   "burst size no less than 32.");
-
-		dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
-	} else if (adapter->rx_bulk_alloc_allowed) {
-		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
-				    "satisfied. Rx Burst Bulk Alloc function "
-				    "will be used on port=%d.",
-			     dev->data->port_id);
-
-		dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
-	} else {
-		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
-				    "satisfied, or Scattered Rx is requested, "
-				    "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC "
-				    "is not enabled (port=%d).",
-			     dev->data->port_id);
-
-		dev->rx_pkt_burst = ixgbe_recv_pkts;
-	}
-}
-
-/**
- * ixgbe_set_rsc - configure RSC related port HW registers
- *
- * Configures the port's RSC related registers according to the 4.6.7.2 chapter
- * of 82599 Spec (x540 configuration is virtually the same).
- *
- * @dev port handle
- *
- * Returns 0 in case of success or a non-zero error code
- */
-static int
-ixgbe_set_rsc(struct rte_eth_dev *dev)
-{
-	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct rte_eth_dev_info dev_info = { 0 };
-	bool rsc_capable = false;
-	uint16_t i;
-	uint32_t rdrxctl;
-
-	/* Sanity check */
-	dev->dev_ops->dev_infos_get(dev, &dev_info);
-	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
-		rsc_capable = true;
-
-	if (!rsc_capable && rx_conf->enable_lro) {
-		PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
-				   "support it");
-		return -EINVAL;
-	}
-
-	/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
-
-	if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
-		/*
-		 * According to chapter of 4.6.7.2.1 of the Spec Rev.
-		 * 3.0 RSC configuration requires HW CRC stripping being
-		 * enabled. If user requested both HW CRC stripping off
-		 * and RSC on - return an error.
-		 */
-		PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
-				    "is disabled");
-		return -EINVAL;
-	}
-
-	/* RFCTL configuration  */
-	if (rsc_capable) {
-		uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
-		if (rx_conf->enable_lro)
-			/*
-			 * Since NFS packets coalescing is not supported - clear
-			 * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
-			 * enabled.
-			 */
-			rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
-				   IXGBE_RFCTL_NFSR_DIS);
-		else
-			rfctl |= IXGBE_RFCTL_RSC_DIS;
-
-		IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
-	}
-
-	/* If LRO hasn't been requested - we are done here. */
-	if (!rx_conf->enable_lro)
-		return 0;
-
-	/* Set RDRXCTL.RSCACKC bit */
-	rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-	rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
-	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
-
-	/* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
-	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
-		uint32_t srrctl =
-			IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
-		uint32_t rscctl =
-			IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
-		uint32_t psrtype =
-			IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
-		uint32_t eitr =
-			IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
-
-		/*
-		 * ixgbe PMD doesn't support header-split at the moment.
-		 *
-		 * Following the 4.6.7.2.1 chapter of the 82599/x540
-		 * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
-		 * should be configured even if header split is not
-		 * enabled. We will configure it 128 bytes following the
-		 * recommendation in the spec.
-		 */
-		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
-		srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
-					    IXGBE_SRRCTL_BSIZEHDR_MASK;
-
-		/*
-		 * TODO: Consider setting the Receive Descriptor Minimum
-		 * Threshold Size for an RSC case. This is not an obviously
-		 * beneficiary option but the one worth considering...
-		 */
-
-		rscctl |= IXGBE_RSCCTL_RSCEN;
-		rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
-		psrtype |= IXGBE_PSRTYPE_TCPHDR;
-
-		/*
-		 * RSC: Set ITR interval corresponding to 2K ints/s.
-		 *
-		 * Full-sized RSC aggregations for a 10Gb/s link will
-		 * arrive at about 20K aggregation/s rate.
-		 *
-		 * 2K inst/s rate will make only 10% of the
-		 * aggregations to be closed due to the interrupt timer
-		 * expiration for a streaming at wire-speed case.
-		 *
-		 * For a sparse streaming case this setting will yield
-		 * at most 500us latency for a single RSC aggregation.
-		 */
-		eitr &= ~IXGBE_EITR_ITR_INT_MASK;
-		eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS;
-
-		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
-		IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
-		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
-		IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
-
-		/*
-		 * RSC requires the mapping of the queue to the
-		 * interrupt vector.
-		 */
-		ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
-	}
-
-	dev->data->lro = 1;
-
-	PMD_INIT_LOG(INFO, "enabling LRO mode");
-
-	return 0;
-}
-
-/*
- * Initializes Receive Unit.
- */
-int
-ixgbe_dev_rx_init(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw     *hw;
-	struct ixgbe_rx_queue *rxq;
-	uint64_t bus_addr;
-	uint32_t rxctrl;
-	uint32_t fctrl;
-	uint32_t hlreg0;
-	uint32_t maxfrs;
-	uint32_t srrctl;
-	uint32_t rdrxctl;
-	uint32_t rxcsum;
-	uint16_t buf_size;
-	uint16_t i;
-	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
-	int rc;
-
-	PMD_INIT_FUNC_TRACE();
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	/*
-	 * Make sure receives are disabled while setting
-	 * up the RX context (registers, descriptor rings, etc.).
-	 */
-	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
-	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
-
-	/* Enable receipt of broadcasted frames */
-	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-	fctrl |= IXGBE_FCTRL_BAM;
-	fctrl |= IXGBE_FCTRL_DPF;
-	fctrl |= IXGBE_FCTRL_PMCF;
-	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-
-	/*
-	 * Configure CRC stripping, if any.
-	 */
-	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-	if (rx_conf->hw_strip_crc)
-		hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
-	else
-		hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
-
-	/*
-	 * Configure jumbo frame support, if any.
-	 */
-	if (rx_conf->jumbo_frame == 1) {
-		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
-		maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
-		maxfrs &= 0x0000FFFF;
-		maxfrs |= (rx_conf->max_rx_pkt_len << 16);
-		IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
-	} else
-		hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
-
-	/*
-	 * If loopback mode is configured for 82599, set LPBK bit.
-	 */
-	if (hw->mac.type == ixgbe_mac_82599EB &&
-			dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
-		hlreg0 |= IXGBE_HLREG0_LPBK;
-	else
-		hlreg0 &= ~IXGBE_HLREG0_LPBK;
-
-	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
-
-	/* Setup RX queues */
-	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		rxq = dev->data->rx_queues[i];
-
-		/*
-		 * Reset crc_len in case it was changed after queue setup by a
-		 * call to configure.
-		 */
-		rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
-
-		/* Setup the Base and Length of the Rx Descriptor Rings */
-		bus_addr = rxq->rx_ring_phys_addr;
-		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
-				(uint32_t)(bus_addr & 0x00000000ffffffffULL));
-		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
-				(uint32_t)(bus_addr >> 32));
-		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
-				rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
-		IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
-
-		/* Configure the SRRCTL register */
-#ifdef RTE_HEADER_SPLIT_ENABLE
-		/*
-		 * Configure Header Split
-		 */
-		if (rx_conf->header_split) {
-			if (hw->mac.type == ixgbe_mac_82599EB) {
-				/* Must setup the PSRTYPE register */
-				uint32_t psrtype;
-				psrtype = IXGBE_PSRTYPE_TCPHDR |
-					IXGBE_PSRTYPE_UDPHDR   |
-					IXGBE_PSRTYPE_IPV4HDR  |
-					IXGBE_PSRTYPE_IPV6HDR;
-				IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
-			}
-			srrctl = ((rx_conf->split_hdr_size <<
-				IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
-				IXGBE_SRRCTL_BSIZEHDR_MASK);
-			srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-		} else
-#endif
-			srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
-
-		/* Set if packets are dropped when no descriptors available */
-		if (rxq->drop_en)
-			srrctl |= IXGBE_SRRCTL_DROP_EN;
-
-		/*
-		 * Configure the RX buffer size in the BSIZEPACKET field of
-		 * the SRRCTL register of the queue.
-		 * The value is in 1 KB resolution. Valid values can be from
-		 * 1 KB to 16 KB.
-		 */
-		buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
-			RTE_PKTMBUF_HEADROOM);
-		srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
-			   IXGBE_SRRCTL_BSIZEPKT_MASK);
-
-		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
-
-		buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
-				       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
-
-		/* It adds dual VLAN length for supporting dual VLAN */
-		if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
-					    2 * IXGBE_VLAN_TAG_SIZE > buf_size)
-			dev->data->scattered_rx = 1;
-	}
-
-	if (rx_conf->enable_scatter)
-		dev->data->scattered_rx = 1;
-
-	/*
-	 * Device configured with multiple RX queues.
-	 */
-	ixgbe_dev_mq_rx_configure(dev);
-
-	/*
-	 * Setup the Checksum Register.
-	 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
-	 * Enable IP/L4 checkum computation by hardware if requested to do so.
-	 */
-	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
-	rxcsum |= IXGBE_RXCSUM_PCSD;
-	if (rx_conf->hw_ip_checksum)
-		rxcsum |= IXGBE_RXCSUM_IPPCSE;
-	else
-		rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
-
-	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
-
-	if (hw->mac.type == ixgbe_mac_82599EB ||
-	    hw->mac.type == ixgbe_mac_X540) {
-		rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-		if (rx_conf->hw_strip_crc)
-			rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
-		else
-			rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
-		rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
-		IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
-	}
-
-	rc = ixgbe_set_rsc(dev);
-	if (rc)
-		return rc;
-
-	ixgbe_set_rx_function(dev);
-
-	return 0;
-}
-
-/*
- * Initializes Transmit Unit.
- */
-void
-ixgbe_dev_tx_init(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw     *hw;
-	struct ixgbe_tx_queue *txq;
-	uint64_t bus_addr;
-	uint32_t hlreg0;
-	uint32_t txctrl;
-	uint16_t i;
-
-	PMD_INIT_FUNC_TRACE();
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	/* Enable TX CRC (checksum offload requirement) and hw padding
-	 * (TSO requirement) */
-	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-	hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
-	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
-
-	/* Setup the Base and Length of the Tx Descriptor Rings */
-	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-		txq = dev->data->tx_queues[i];
-
-		bus_addr = txq->tx_ring_phys_addr;
-		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
-				(uint32_t)(bus_addr & 0x00000000ffffffffULL));
-		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
-				(uint32_t)(bus_addr >> 32));
-		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
-				txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
-		/* Setup the HW Tx Head and TX Tail descriptor pointers */
-		IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
-
-		/*
-		 * Disable Tx Head Writeback RO bit, since this hoses
-		 * bookkeeping if things aren't delivered in order.
-		 */
-		switch (hw->mac.type) {
-			case ixgbe_mac_82598EB:
-				txctrl = IXGBE_READ_REG(hw,
-							IXGBE_DCA_TXCTRL(txq->reg_idx));
-				txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
-				IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
-						txctrl);
-				break;
-
-			case ixgbe_mac_82599EB:
-			case ixgbe_mac_X540:
-			case ixgbe_mac_X550:
-			case ixgbe_mac_X550EM_x:
-			default:
-				txctrl = IXGBE_READ_REG(hw,
-						IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
-				txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
-				IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
-						txctrl);
-				break;
-		}
-	}
-
-	/* Device configured with multiple TX queues. */
-	ixgbe_dev_mq_tx_configure(dev);
-}
-
-/*
- * Set up link for 82599 loopback mode Tx->Rx.
- */
-static inline void
-ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
-{
-	PMD_INIT_FUNC_TRACE();
-
-	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
-		if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
-				IXGBE_SUCCESS) {
-			PMD_INIT_LOG(ERR, "Could not enable loopback mode");
-			/* ignore error */
-			return;
-		}
-	}
-
-	/* Restart link */
-	IXGBE_WRITE_REG(hw,
-			IXGBE_AUTOC,
-			IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
-	ixgbe_reset_pipeline_82599(hw);
-
-	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
-	msec_delay(50);
-}
-
-
-/*
- * Start Transmit and Receive Units.
- */
-int
-ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw     *hw;
-	struct ixgbe_tx_queue *txq;
-	struct ixgbe_rx_queue *rxq;
-	uint32_t txdctl;
-	uint32_t dmatxctl;
-	uint32_t rxctrl;
-	uint16_t i;
-	int ret = 0;
-
-	PMD_INIT_FUNC_TRACE();
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-		txq = dev->data->tx_queues[i];
-		/* Setup Transmit Threshold Registers */
-		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
-		txdctl |= txq->pthresh & 0x7F;
-		txdctl |= ((txq->hthresh & 0x7F) << 8);
-		txdctl |= ((txq->wthresh & 0x7F) << 16);
-		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
-	}
-
-	if (hw->mac.type != ixgbe_mac_82598EB) {
-		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
-		dmatxctl |= IXGBE_DMATXCTL_TE;
-		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
-	}
-
-	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-		txq = dev->data->tx_queues[i];
-		if (!txq->tx_deferred_start) {
-			ret = ixgbe_dev_tx_queue_start(dev, i);
-			if (ret < 0)
-				return ret;
-		}
-	}
-
-	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		rxq = dev->data->rx_queues[i];
-		if (!rxq->rx_deferred_start) {
-			ret = ixgbe_dev_rx_queue_start(dev, i);
-			if (ret < 0)
-				return ret;
-		}
-	}
-
-	/* Enable Receive engine */
-	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
-	if (hw->mac.type == ixgbe_mac_82598EB)
-		rxctrl |= IXGBE_RXCTRL_DMBYPS;
-	rxctrl |= IXGBE_RXCTRL_RXEN;
-	hw->mac.ops.enable_rx_dma(hw, rxctrl);
-
-	/* If loopback mode is enabled for 82599, set up the link accordingly */
-	if (hw->mac.type == ixgbe_mac_82599EB &&
-			dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
-		ixgbe_setup_loopback_link_82599(hw);
-
-	return 0;
-}
-
-/*
- * Start Receive Units for specified queue.
- */
-int
-ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
-{
-	struct ixgbe_hw     *hw;
-	struct ixgbe_rx_queue *rxq;
-	uint32_t rxdctl;
-	int poll_ms;
-
-	PMD_INIT_FUNC_TRACE();
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	if (rx_queue_id < dev->data->nb_rx_queues) {
-		rxq = dev->data->rx_queues[rx_queue_id];
-
-		/* Allocate buffers for descriptor rings */
-		if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
-			PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
-				     rx_queue_id);
-			return -1;
-		}
-		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
-		rxdctl |= IXGBE_RXDCTL_ENABLE;
-		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
-
-		/* Wait until RX Enable ready */
-		poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
-		do {
-			rte_delay_ms(1);
-			rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
-		} while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
-		if (!poll_ms)
-			PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d",
-				     rx_queue_id);
-		rte_wmb();
-		IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
-	} else
-		return -1;
-
-	return 0;
-}
-
-/*
- * Stop Receive Units for specified queue.
- */
-int
-ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
-{
-	struct ixgbe_hw     *hw;
-	struct ixgbe_adapter *adapter =
-		(struct ixgbe_adapter *)dev->data->dev_private;
-	struct ixgbe_rx_queue *rxq;
-	uint32_t rxdctl;
-	int poll_ms;
-
-	PMD_INIT_FUNC_TRACE();
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	if (rx_queue_id < dev->data->nb_rx_queues) {
-		rxq = dev->data->rx_queues[rx_queue_id];
-
-		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
-		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
-		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
-
-		/* Wait until RX Enable ready */
-		poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
-		do {
-			rte_delay_ms(1);
-			rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
-		} while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
-		if (!poll_ms)
-			PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
-				     rx_queue_id);
-
-		rte_delay_us(RTE_IXGBE_WAIT_100_US);
-
-		ixgbe_rx_queue_release_mbufs(rxq);
-		ixgbe_reset_rx_queue(adapter, rxq);
-	} else
-		return -1;
-
-	return 0;
-}
-
-
-/*
- * Start Transmit Units for specified queue.
- */
-int
-ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
-{
-	struct ixgbe_hw     *hw;
-	struct ixgbe_tx_queue *txq;
-	uint32_t txdctl;
-	int poll_ms;
-
-	PMD_INIT_FUNC_TRACE();
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	if (tx_queue_id < dev->data->nb_tx_queues) {
-		txq = dev->data->tx_queues[tx_queue_id];
-		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
-		txdctl |= IXGBE_TXDCTL_ENABLE;
-		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
-
-		/* Wait until TX Enable ready */
-		if (hw->mac.type == ixgbe_mac_82599EB) {
-			poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
-			do {
-				rte_delay_ms(1);
-				txdctl = IXGBE_READ_REG(hw,
-					IXGBE_TXDCTL(txq->reg_idx));
-			} while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
-			if (!poll_ms)
-				PMD_INIT_LOG(ERR, "Could not enable "
-					     "Tx Queue %d", tx_queue_id);
-		}
-		rte_wmb();
-		IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
-	} else
-		return -1;
-
-	return 0;
-}
-
-/*
- * Stop Transmit Units for specified queue.
- */
-int
-ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
-{
-	struct ixgbe_hw     *hw;
-	struct ixgbe_tx_queue *txq;
-	uint32_t txdctl;
-	uint32_t txtdh, txtdt;
-	int poll_ms;
-
-	PMD_INIT_FUNC_TRACE();
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	if (tx_queue_id < dev->data->nb_tx_queues) {
-		txq = dev->data->tx_queues[tx_queue_id];
-
-		/* Wait until TX queue is empty */
-		if (hw->mac.type == ixgbe_mac_82599EB) {
-			poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
-			do {
-				rte_delay_us(RTE_IXGBE_WAIT_100_US);
-				txtdh = IXGBE_READ_REG(hw,
-						IXGBE_TDH(txq->reg_idx));
-				txtdt = IXGBE_READ_REG(hw,
-						IXGBE_TDT(txq->reg_idx));
-			} while (--poll_ms && (txtdh != txtdt));
-			if (!poll_ms)
-				PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
-					     "when stopping.", tx_queue_id);
-		}
-
-		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
-		txdctl &= ~IXGBE_TXDCTL_ENABLE;
-		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
-
-		/* Wait until TX Enable ready */
-		if (hw->mac.type == ixgbe_mac_82599EB) {
-			poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
-			do {
-				rte_delay_ms(1);
-				txdctl = IXGBE_READ_REG(hw,
-						IXGBE_TXDCTL(txq->reg_idx));
-			} while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
-			if (!poll_ms)
-				PMD_INIT_LOG(ERR, "Could not disable "
-					     "Tx Queue %d", tx_queue_id);
-		}
-
-		if (txq->ops != NULL) {
-			txq->ops->release_mbufs(txq);
-			txq->ops->reset(txq);
-		}
-	} else
-		return -1;
-
-	return 0;
-}
-
-/*
- * [VF] Initializes Receive Unit.
- */
-int
-ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw     *hw;
-	struct ixgbe_rx_queue *rxq;
-	uint64_t bus_addr;
-	uint32_t srrctl, psrtype = 0;
-	uint16_t buf_size;
-	uint16_t i;
-	int ret;
-
-	PMD_INIT_FUNC_TRACE();
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
-		PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
-			"it should be power of 2");
-		return -1;
-	}
-
-	if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
-		PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
-			"it should be equal to or less than %d",
-			hw->mac.max_rx_queues);
-		return -1;
-	}
-
-	/*
-	 * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
-	 * disables the VF receipt of packets if the PF MTU is > 1500.
-	 * This is done to deal with 82599 limitations that imposes
-	 * the PF and all VFs to share the same MTU.
-	 * Then, the PF driver enables again the VF receipt of packet when
-	 * the VF driver issues a IXGBE_VF_SET_LPE request.
-	 * In the meantime, the VF device cannot be used, even if the VF driver
-	 * and the Guest VM network stack are ready to accept packets with a
-	 * size up to the PF MTU.
-	 * As a work-around to this PF behaviour, force the call to
-	 * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
-	 * VF packets received can work in all cases.
-	 */
-	ixgbevf_rlpml_set_vf(hw,
-		(uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
-
-	/* Setup RX queues */
-	dev->rx_pkt_burst = ixgbe_recv_pkts;
-	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		rxq = dev->data->rx_queues[i];
-
-		/* Allocate buffers for descriptor rings */
-		ret = ixgbe_alloc_rx_queue_mbufs(rxq);
-		if (ret)
-			return ret;
-
-		/* Setup the Base and Length of the Rx Descriptor Rings */
-		bus_addr = rxq->rx_ring_phys_addr;
-
-		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
-				(uint32_t)(bus_addr & 0x00000000ffffffffULL));
-		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
-				(uint32_t)(bus_addr >> 32));
-		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
-				rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
-		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
-
-
-		/* Configure the SRRCTL register */
-#ifdef RTE_HEADER_SPLIT_ENABLE
-		/*
-		 * Configure Header Split
-		 */
-		if (dev->data->dev_conf.rxmode.header_split) {
-			srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
-				IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
-				IXGBE_SRRCTL_BSIZEHDR_MASK);
-			srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-		} else
-#endif
-			srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
-
-		/* Set if packets are dropped when no descriptors available */
-		if (rxq->drop_en)
-			srrctl |= IXGBE_SRRCTL_DROP_EN;
-
-		/*
-		 * Configure the RX buffer size in the BSIZEPACKET field of
-		 * the SRRCTL register of the queue.
-		 * The value is in 1 KB resolution. Valid values can be from
-		 * 1 KB to 16 KB.
-		 */
-		buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
-			RTE_PKTMBUF_HEADROOM);
-		srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
-			   IXGBE_SRRCTL_BSIZEPKT_MASK);
-
-		/*
-		 * VF modification to write virtual function SRRCTL register
-		 */
-		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
-
-		buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
-				       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
-
-		if (dev->data->dev_conf.rxmode.enable_scatter ||
-		    /* It adds dual VLAN length for supporting dual VLAN */
-		    (dev->data->dev_conf.rxmode.max_rx_pkt_len +
-				2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
-			if (!dev->data->scattered_rx)
-				PMD_INIT_LOG(DEBUG, "forcing scatter mode");
-			dev->data->scattered_rx = 1;
-#ifdef RTE_IXGBE_INC_VECTOR
-			if (rte_is_power_of_2(rxq->nb_rx_desc))
-				dev->rx_pkt_burst =
-					ixgbe_recv_scattered_pkts_vec;
-			else
-#endif
-				dev->rx_pkt_burst =
-					ixgbe_recv_pkts_lro_single_alloc;
-		}
-	}
-
-#ifdef RTE_HEADER_SPLIT_ENABLE
-	if (dev->data->dev_conf.rxmode.header_split)
-		/* Must setup the PSRTYPE register */
-		psrtype = IXGBE_PSRTYPE_TCPHDR |
-			IXGBE_PSRTYPE_UDPHDR   |
-			IXGBE_PSRTYPE_IPV4HDR  |
-			IXGBE_PSRTYPE_IPV6HDR;
-#endif
-
-	/* Set RQPL for VF RSS according to max Rx queue */
-	psrtype |= (dev->data->nb_rx_queues >> 1) <<
-		IXGBE_PSRTYPE_RQPL_SHIFT;
-	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
-
-	return 0;
-}
-
-/*
- * [VF] Initializes Transmit Unit.
- */
-void
-ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw     *hw;
-	struct ixgbe_tx_queue *txq;
-	uint64_t bus_addr;
-	uint32_t txctrl;
-	uint16_t i;
-
-	PMD_INIT_FUNC_TRACE();
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	/* Setup the Base and Length of the Tx Descriptor Rings */
-	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-		txq = dev->data->tx_queues[i];
-		bus_addr = txq->tx_ring_phys_addr;
-		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
-				(uint32_t)(bus_addr & 0x00000000ffffffffULL));
-		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
-				(uint32_t)(bus_addr >> 32));
-		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
-				txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
-		/* Setup the HW Tx Head and TX Tail descriptor pointers */
-		IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
-
-		/*
-		 * Disable Tx Head Writeback RO bit, since this hoses
-		 * bookkeeping if things aren't delivered in order.
-		 */
-		txctrl = IXGBE_READ_REG(hw,
-				IXGBE_VFDCA_TXCTRL(i));
-		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
-		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
-				txctrl);
-	}
-}
-
-/*
- * [VF] Start Transmit and Receive Units.
- */
-void
-ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
-{
-	struct ixgbe_hw     *hw;
-	struct ixgbe_tx_queue *txq;
-	struct ixgbe_rx_queue *rxq;
-	uint32_t txdctl;
-	uint32_t rxdctl;
-	uint16_t i;
-	int poll_ms;
-
-	PMD_INIT_FUNC_TRACE();
-	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-		txq = dev->data->tx_queues[i];
-		/* Setup Transmit Threshold Registers */
-		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
-		txdctl |= txq->pthresh & 0x7F;
-		txdctl |= ((txq->hthresh & 0x7F) << 8);
-		txdctl |= ((txq->wthresh & 0x7F) << 16);
-		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
-	}
-
-	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-
-		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
-		txdctl |= IXGBE_TXDCTL_ENABLE;
-		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
-
-		poll_ms = 10;
-		/* Wait until TX Enable ready */
-		do {
-			rte_delay_ms(1);
-			txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
-		} while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
-		if (!poll_ms)
-			PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
-	}
-	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-
-		rxq = dev->data->rx_queues[i];
-
-		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
-		rxdctl |= IXGBE_RXDCTL_ENABLE;
-		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
-
-		/* Wait until RX Enable ready */
-		poll_ms = 10;
-		do {
-			rte_delay_ms(1);
-			rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
-		} while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
-		if (!poll_ms)
-			PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
-		rte_wmb();
-		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
-
-	}
-}
-
-/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
-int __attribute__((weak))
-ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
-{
-	return -1;
-}
-
-uint16_t __attribute__((weak))
-ixgbe_recv_pkts_vec(
-	void __rte_unused *rx_queue,
-	struct rte_mbuf __rte_unused **rx_pkts,
-	uint16_t __rte_unused nb_pkts)
-{
-	return 0;
-}
-
-uint16_t __attribute__((weak))
-ixgbe_recv_scattered_pkts_vec(
-	void __rte_unused *rx_queue,
-	struct rte_mbuf __rte_unused **rx_pkts,
-	uint16_t __rte_unused nb_pkts)
-{
-	return 0;
-}
-
-int __attribute__((weak))
-ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
-{
-	return -1;
-}
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
deleted file mode 100644
index af36438..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
+++ /dev/null
@@ -1,293 +0,0 @@ 
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _IXGBE_RXTX_H_
-#define _IXGBE_RXTX_H_
-
-
-#define RTE_PMD_IXGBE_TX_MAX_BURST 32
-#define RTE_PMD_IXGBE_RX_MAX_BURST 32
-
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-#define RTE_IXGBE_DESCS_PER_LOOP           4
-#elif defined(RTE_IXGBE_INC_VECTOR)
-#define RTE_IXGBE_DESCS_PER_LOOP           4
-#else
-#define RTE_IXGBE_DESCS_PER_LOOP           1
-#endif
-
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
-	(uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
-
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
-	(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
-
-#ifdef RTE_IXGBE_INC_VECTOR
-#define RTE_IXGBE_VPMD_RX_BURST         32
-#define RTE_IXGBE_VPMD_TX_BURST         32
-#define RTE_IXGBE_RXQ_REARM_THRESH      RTE_IXGBE_VPMD_RX_BURST
-#define RTE_IXGBE_TX_MAX_FREE_BUF_SZ    64
-#endif
-
-#define RX_RING_SZ ((IXGBE_MAX_RING_DESC + RTE_IXGBE_DESCS_PER_LOOP - 1) * \
-		    sizeof(union ixgbe_adv_rx_desc))
-
-#ifdef RTE_PMD_PACKET_PREFETCH
-#define rte_packet_prefetch(p)  rte_prefetch1(p)
-#else
-#define rte_packet_prefetch(p)  do {} while(0)
-#endif
-
-#define RTE_IXGBE_REGISTER_POLL_WAIT_10_MS  10
-#define RTE_IXGBE_WAIT_100_US               100
-#define RTE_IXGBE_VMTXSW_REGISTER_COUNT     2
-
-/**
- * Structure associated with each descriptor of the RX ring of a RX queue.
- */
-struct ixgbe_rx_entry {
-	struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
-};
-
-struct ixgbe_scattered_rx_entry {
-	struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */
-};
-
-/**
- * Structure associated with each descriptor of the TX ring of a TX queue.
- */
-struct ixgbe_tx_entry {
-	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
-	uint16_t next_id; /**< Index of next descriptor in ring. */
-	uint16_t last_id; /**< Index of last scattered descriptor. */
-};
-
-/**
- * Structure associated with each descriptor of the TX ring of a TX queue.
- */
-struct ixgbe_tx_entry_v {
-	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
-};
-
-/**
- * Structure associated with each RX queue.
- */
-struct ixgbe_rx_queue {
-	struct rte_mempool  *mb_pool; /**< mbuf pool to populate RX ring. */
-	volatile union ixgbe_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
-	uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
-	volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
-	volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
-	struct ixgbe_rx_entry *sw_ring; /**< address of RX software ring. */
-	struct ixgbe_scattered_rx_entry *sw_sc_ring; /**< address of scattered Rx software ring. */
-	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
-	struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
-	uint64_t            mbuf_initializer; /**< value to init mbufs */
-	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
-	uint16_t            rx_tail;  /**< current value of RDT register. */
-	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-	uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
-	uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
-	uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
-#endif
-#ifdef RTE_IXGBE_INC_VECTOR
-	uint16_t            rxrearm_nb; /**< the idx we start the re-arming from */
-	uint16_t            rxrearm_start;  /**< number of remaining to be re-armed */
-#endif
-	uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
-	uint16_t            queue_id; /**< RX queue index. */
-	uint16_t            reg_idx;  /**< RX queue register index. */
-	uint8_t             port_id;  /**< Device port identifier. */
-	uint8_t             crc_len;  /**< 0 if CRC stripped, 4 otherwise. */
-	uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
-	uint8_t             rx_deferred_start; /**< not in global dev start. */
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
-	struct rte_mbuf fake_mbuf;
-	/** hold packets to return to application */
-	struct rte_mbuf *rx_stage[RTE_PMD_IXGBE_RX_MAX_BURST*2];
-#endif
-};
-
-/**
- * IXGBE CTX Constants
- */
-enum ixgbe_advctx_num {
-	IXGBE_CTX_0    = 0, /**< CTX0 */
-	IXGBE_CTX_1    = 1, /**< CTX1  */
-	IXGBE_CTX_NUM  = 2, /**< CTX NUMBER  */
-};
-
-/** Offload features */
-union ixgbe_tx_offload {
-	uint64_t data;
-	struct {
-		uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
-		uint64_t l3_len:9; /**< L3 (IP) Header Length. */
-		uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
-		uint64_t tso_segsz:16; /**< TCP TSO segment size */
-		uint64_t vlan_tci:16;
-		/**< VLAN Tag Control Identifier (CPU order). */
-	};
-};
-
-/*
- * Compare mask for vlan_macip_len.data,
- * should be in sync with ixgbe_vlan_macip.f layout.
- * */
-#define TX_VLAN_CMP_MASK        0xFFFF0000  /**< VLAN length - 16-bits. */
-#define TX_MAC_LEN_CMP_MASK     0x0000FE00  /**< MAC length - 7-bits. */
-#define TX_IP_LEN_CMP_MASK      0x000001FF  /**< IP  length - 9-bits. */
-/** MAC+IP  length. */
-#define TX_MACIP_LEN_CMP_MASK   (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
-
-/**
- * Structure to check if new context need be built
- */
-
-struct ixgbe_advctx_info {
-	uint64_t flags;           /**< ol_flags for context build. */
-	/**< tx offload: vlan, tso, l2-l3-l4 lengths. */
-	union ixgbe_tx_offload tx_offload;
-	/** compare mask for tx offload. */
-	union ixgbe_tx_offload tx_offload_mask;
-};
-
-/**
- * Structure associated with each TX queue.
- */
-struct ixgbe_tx_queue {
-	/** TX ring virtual address. */
-	volatile union ixgbe_adv_tx_desc *tx_ring;
-	uint64_t            tx_ring_phys_addr; /**< TX ring DMA address. */
-	struct ixgbe_tx_entry *sw_ring;      /**< virtual address of SW ring. */
-	volatile uint32_t   *tdt_reg_addr; /**< Address of TDT register. */
-	uint16_t            nb_tx_desc;    /**< number of TX descriptors. */
-	uint16_t            tx_tail;       /**< current value of TDT reg. */
-	uint16_t            tx_free_thresh;/**< minimum TX before freeing. */
-	/** Number of TX descriptors to use before RS bit is set. */
-	uint16_t            tx_rs_thresh;
-	/** Number of TX descriptors used since RS bit was set. */
-	uint16_t            nb_tx_used;
-	/** Index to last TX descriptor to have been cleaned. */
-	uint16_t            last_desc_cleaned;
-	/** Total number of TX descriptors ready to be allocated. */
-	uint16_t            nb_tx_free;
-	uint16_t tx_next_dd; /**< next desc to scan for DD bit */
-	uint16_t tx_next_rs; /**< next desc to set RS bit */
-	uint16_t            queue_id;      /**< TX queue index. */
-	uint16_t            reg_idx;       /**< TX queue register index. */
-	uint8_t             port_id;       /**< Device port identifier. */
-	uint8_t             pthresh;       /**< Prefetch threshold register. */
-	uint8_t             hthresh;       /**< Host threshold register. */
-	uint8_t             wthresh;       /**< Write-back threshold reg. */
-	uint32_t txq_flags; /**< Holds flags for this TXq */
-	uint32_t            ctx_curr;      /**< Hardware context states. */
-	/** Hardware context0 history. */
-	struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
-	const struct ixgbe_txq_ops *ops;       /**< txq ops */
-	uint8_t             tx_deferred_start; /**< not in global dev start. */
-};
-
-struct ixgbe_txq_ops {
-	void (*release_mbufs)(struct ixgbe_tx_queue *txq);
-	void (*free_swring)(struct ixgbe_tx_queue *txq);
-	void (*reset)(struct ixgbe_tx_queue *txq);
-};
-
-/*
- * The "simple" TX queue functions require that the following
- * flags are set when the TX queue is configured:
- *  - ETH_TXQ_FLAGS_NOMULTSEGS
- *  - ETH_TXQ_FLAGS_NOVLANOFFL
- *  - ETH_TXQ_FLAGS_NOXSUMSCTP
- *  - ETH_TXQ_FLAGS_NOXSUMUDP
- *  - ETH_TXQ_FLAGS_NOXSUMTCP
- * and that the RS bit threshold (tx_rs_thresh) is at least equal to
- * RTE_PMD_IXGBE_TX_MAX_BURST.
- */
-#define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
-			    ETH_TXQ_FLAGS_NOOFFLOADS)
-
-/*
- * Populate descriptors with the following info:
- * 1.) buffer_addr = phys_addr + headroom
- * 2.) cmd_type_len = DCMD_DTYP_FLAGS | pkt_len
- * 3.) olinfo_status = pkt_len << PAYLEN_SHIFT
- */
-
-/* Defines for Tx descriptor */
-#define DCMD_DTYP_FLAGS (IXGBE_ADVTXD_DTYP_DATA |\
-			 IXGBE_ADVTXD_DCMD_IFCS |\
-			 IXGBE_ADVTXD_DCMD_DEXT |\
-			 IXGBE_ADVTXD_DCMD_EOP)
-
-
-/* Takes an ethdev and a queue and sets up the tx function to be used based on
- * the queue parameters. Used in tx_queue_setup by primary process and then
- * in dev_init by secondary process when attaching to an existing ethdev.
- */
-void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq);
-
-/**
- * Sets the rx_pkt_burst callback in the ixgbe rte_eth_dev instance.
- *
- * Sets the callback based on the device parameters:
- *  - ixgbe_hw.rx_bulk_alloc_allowed
- *  - rte_eth_dev_data.scattered_rx
- *  - rte_eth_dev_data.lro
- *  - conditions checked in ixgbe_rx_vec_condition_check()
- *
- *  This means that the parameters above have to be configured prior to calling
- *  to this function.
- *
- * @dev rte_eth_dev handle
- */
-void ixgbe_set_rx_function(struct rte_eth_dev *dev);
-
-uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-		uint16_t nb_pkts);
-uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue,
-		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
-int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
-int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq);
-
-#ifdef RTE_IXGBE_INC_VECTOR
-
-uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
-		uint16_t nb_pkts);
-int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
-
-#endif /* RTE_IXGBE_INC_VECTOR */
-#endif /* _IXGBE_RXTX_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c
deleted file mode 100644
index abd10f6..0000000
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c
+++ /dev/null
@@ -1,792 +0,0 @@ 
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdint.h>
-#include <rte_ethdev.h>
-#include <rte_malloc.h>
-
-#include "ixgbe_ethdev.h"
-#include "ixgbe_rxtx.h"
-
-#include <tmmintrin.h>
-
-#ifndef __INTEL_COMPILER
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif
-
-static inline void
-ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
-{
-	int i;
-	uint16_t rx_id;
-	volatile union ixgbe_adv_rx_desc *rxdp;
-	struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
-	struct rte_mbuf *mb0, *mb1;
-	__m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
-			RTE_PKTMBUF_HEADROOM);
-	__m128i dma_addr0, dma_addr1;
-
-	rxdp = rxq->rx_ring + rxq->rxrearm_start;
-
-	/* Pull 'n' more MBUFs into the software ring */
-	if (rte_mempool_get_bulk(rxq->mb_pool,
-				 (void *)rxep,
-				 RTE_IXGBE_RXQ_REARM_THRESH) < 0) {
-		if (rxq->rxrearm_nb + RTE_IXGBE_RXQ_REARM_THRESH >=
-		    rxq->nb_rx_desc) {
-			dma_addr0 = _mm_setzero_si128();
-			for (i = 0; i < RTE_IXGBE_DESCS_PER_LOOP; i++) {
-				rxep[i].mbuf = &rxq->fake_mbuf;
-				_mm_store_si128((__m128i *)&rxdp[i].read,
-						dma_addr0);
-			}
-		}
-		rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
-			RTE_IXGBE_RXQ_REARM_THRESH;
-		return;
-	}
-
-	/* Initialize the mbufs in vector, process 2 mbufs in one loop */
-	for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
-		__m128i vaddr0, vaddr1;
-		uintptr_t p0, p1;
-
-		mb0 = rxep[0].mbuf;
-		mb1 = rxep[1].mbuf;
-
-		/*
-		 * Flush mbuf with pkt template.
-		 * Data to be rearmed is 6 bytes long.
-		 * Though, RX will overwrite ol_flags that are coming next
-		 * anyway. So overwrite whole 8 bytes with one load:
-		 * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
-		 */
-		p0 = (uintptr_t)&mb0->rearm_data;
-		*(uint64_t *)p0 = rxq->mbuf_initializer;
-		p1 = (uintptr_t)&mb1->rearm_data;
-		*(uint64_t *)p1 = rxq->mbuf_initializer;
-
-		/* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
-		vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr));
-		vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr));
-
-		/* convert pa to dma_addr hdr/data */
-		dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
-		dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
-
-		/* add headroom to pa values */
-		dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
-		dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
-
-		/* flush desc with pa dma_addr */
-		_mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
-		_mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
-	}
-
-	rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH;
-	if (rxq->rxrearm_start >= rxq->nb_rx_desc)
-		rxq->rxrearm_start = 0;
-
-	rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH;
-
-	rx_id = (uint16_t) ((rxq->rxrearm_start == 0) ?
-			     (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
-
-	/* Update the tail pointer on the NIC */
-	IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
-}
-
-/* Handling the offload flags (olflags) field takes computation
- * time when receiving packets. Therefore we provide a flag to disable
- * the processing of the olflags field when they are not needed. This
- * gives improved performance, at the cost of losing the offload info
- * in the received packet
- */
-#ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
-
-#define OLFLAGS_MASK     ((uint16_t)(PKT_RX_VLAN_PKT | PKT_RX_IPV4_HDR |\
-				     PKT_RX_IPV4_HDR_EXT | PKT_RX_IPV6_HDR |\
-				     PKT_RX_IPV6_HDR_EXT))
-#define OLFLAGS_MASK_V   (((uint64_t)OLFLAGS_MASK << 48) | \
-			  ((uint64_t)OLFLAGS_MASK << 32) | \
-			  ((uint64_t)OLFLAGS_MASK << 16) | \
-			  ((uint64_t)OLFLAGS_MASK))
-#define PTYPE_SHIFT    (1)
-#define VTAG_SHIFT     (3)
-
-static inline void
-desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
-{
-	__m128i ptype0, ptype1, vtag0, vtag1;
-	union {
-		uint16_t e[4];
-		uint64_t dword;
-	} vol;
-
-	ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
-	ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
-	vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
-	vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]);
-
-	ptype1 = _mm_unpacklo_epi32(ptype0, ptype1);
-	vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
-
-	ptype1 = _mm_slli_epi16(ptype1, PTYPE_SHIFT);
-	vtag1 = _mm_srli_epi16(vtag1, VTAG_SHIFT);
-
-	ptype1 = _mm_or_si128(ptype1, vtag1);
-	vol.dword = _mm_cvtsi128_si64(ptype1) & OLFLAGS_MASK_V;
-
-	rx_pkts[0]->ol_flags = vol.e[0];
-	rx_pkts[1]->ol_flags = vol.e[1];
-	rx_pkts[2]->ol_flags = vol.e[2];
-	rx_pkts[3]->ol_flags = vol.e[3];
-}
-#else
-#define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
-#endif
-
-/*
- * vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
- * in one loop
- *
- * Notice:
- * - nb_pkts < RTE_IXGBE_VPMD_RX_BURST, just return no packet
- * - nb_pkts > RTE_IXGBE_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST
- *   numbers of DD bit
- * - don't support ol_flags for rss and csum err
- */
-static inline uint16_t
-_recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
-		uint16_t nb_pkts, uint8_t *split_packet)
-{
-	volatile union ixgbe_adv_rx_desc *rxdp;
-	struct ixgbe_rx_entry *sw_ring;
-	uint16_t nb_pkts_recd;
-	int pos;
-	uint64_t var;
-	__m128i shuf_msk;
-	__m128i crc_adjust = _mm_set_epi16(
-				0, 0, 0, 0, /* ignore non-length fields */
-				0,          /* ignore high-16bits of pkt_len */
-				-rxq->crc_len, /* sub crc on pkt_len */
-				-rxq->crc_len, /* sub crc on data_len */
-				0            /* ignore pkt_type field */
-			);
-	__m128i dd_check, eop_check;
-
-	if (unlikely(nb_pkts < RTE_IXGBE_VPMD_RX_BURST))
-		return 0;
-
-	/* Just the act of getting into the function from the application is
-	 * going to cost about 7 cycles */
-	rxdp = rxq->rx_ring + rxq->rx_tail;
-
-	_mm_prefetch((const void *)rxdp, _MM_HINT_T0);
-
-	/* See if we need to rearm the RX queue - gives the prefetch a bit
-	 * of time to act */
-	if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
-		ixgbe_rxq_rearm(rxq);
-
-	/* Before we start moving massive data around, check to see if
-	 * there is actually a packet available */
-	if (!(rxdp->wb.upper.status_error &
-				rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
-		return 0;
-
-	/* 4 packets DD mask */
-	dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
-
-	/* 4 packets EOP mask */
-	eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
-
-	/* mask to shuffle from desc. to mbuf */
-	shuf_msk = _mm_set_epi8(
-		7, 6, 5, 4,  /* octet 4~7, 32bits rss */
-		0xFF, 0xFF,  /* skip high 16 bits vlan_macip, zero out */
-		15, 14,      /* octet 14~15, low 16 bits vlan_macip */
-		0xFF, 0xFF,  /* skip high 16 bits pkt_len, zero out */
-		13, 12,      /* octet 12~13, low 16 bits pkt_len */
-		13, 12,      /* octet 12~13, 16 bits data_len */
-		0xFF, 0xFF   /* skip pkt_type field */
-		);
-
-	/* Cache is empty -> need to scan the buffer rings, but first move
-	 * the next 'n' mbufs into the cache */
-	sw_ring = &rxq->sw_ring[rxq->rx_tail];
-
-	/*
-	 * A. load 4 packet in one loop
-	 * B. copy 4 mbuf point from swring to rx_pkts
-	 * C. calc the number of DD bits among the 4 packets
-	 * [C*. extract the end-of-packet bit, if requested]
-	 * D. fill info. from desc to mbuf
-	 */
-	for (pos = 0, nb_pkts_recd = 0; pos < RTE_IXGBE_VPMD_RX_BURST;
-			pos += RTE_IXGBE_DESCS_PER_LOOP,
-			rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
-		__m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
-		__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
-		__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
-		__m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
-
-		if (split_packet) {
-			rte_prefetch0(&rx_pkts[pos]->cacheline1);
-			rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
-			rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
-			rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
-		}
-
-		/* B.1 load 1 mbuf point */
-		mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
-
-		/* Read desc statuses backwards to avoid race condition */
-		/* A.1 load 4 pkts desc */
-		descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
-
-		/* B.2 copy 2 mbuf point into rx_pkts  */
-		_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
-
-		/* B.1 load 1 mbuf point */
-		mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
-
-		descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
-		/* B.1 load 2 mbuf point */
-		descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
-		descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
-
-		/* B.2 copy 2 mbuf point into rx_pkts  */
-		_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
-
-		/* avoid compiler reorder optimization */
-		rte_compiler_barrier();
-
-		/* D.1 pkt 3,4 convert format from desc to pktmbuf */
-		pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
-		pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
-
-		/* C.1 4=>2 filter staterr info only */
-		sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
-		/* C.1 4=>2 filter staterr info only */
-		sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
-
-		/* set ol_flags with packet type and vlan tag */
-		desc_to_olflags_v(descs, &rx_pkts[pos]);
-
-		/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
-		pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
-		pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
-
-		/* D.1 pkt 1,2 convert format from desc to pktmbuf */
-		pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
-		pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
-
-		/* C.2 get 4 pkts staterr value  */
-		zero = _mm_xor_si128(dd_check, dd_check);
-		staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
-
-		/* D.3 copy final 3,4 data to rx_pkts */
-		_mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
-				pkt_mb4);
-		_mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
-				pkt_mb3);
-
-		/* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
-		pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
-		pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
-
-		/* C* extract and record EOP bit */
-		if (split_packet) {
-			__m128i eop_shuf_mask = _mm_set_epi8(
-					0xFF, 0xFF, 0xFF, 0xFF,
-					0xFF, 0xFF, 0xFF, 0xFF,
-					0xFF, 0xFF, 0xFF, 0xFF,
-					0x04, 0x0C, 0x00, 0x08
-					);
-
-			/* and with mask to extract bits, flipping 1-0 */
-			__m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
-			/* the staterr values are not in order, as the count
-			 * count of dd bits doesn't care. However, for end of
-			 * packet tracking, we do care, so shuffle. This also
-			 * compresses the 32-bit values to 8-bit */
-			eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
-			/* store the resulting 32-bit value */
-			*(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
-			split_packet += RTE_IXGBE_DESCS_PER_LOOP;
-
-			/* zero-out next pointers */
-			rx_pkts[pos]->next = NULL;
-			rx_pkts[pos + 1]->next = NULL;
-			rx_pkts[pos + 2]->next = NULL;
-			rx_pkts[pos + 3]->next = NULL;
-		}
-
-		/* C.3 calc available number of desc */
-		staterr = _mm_and_si128(staterr, dd_check);
-		staterr = _mm_packs_epi32(staterr, zero);
-
-		/* D.3 copy final 1,2 data to rx_pkts */
-		_mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
-				pkt_mb2);
-		_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
-				pkt_mb1);
-
-		/* C.4 calc avaialbe number of desc */
-		var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
-		nb_pkts_recd += var;
-		if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
-			break;
-	}
-
-	/* Update our internal tail pointer */
-	rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
-	rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
-	rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
-
-	return nb_pkts_recd;
-}
-
-/*
- * vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
- * in one loop
- *
- * Notice:
- * - nb_pkts < RTE_IXGBE_VPMD_RX_BURST, just return no packet
- * - nb_pkts > RTE_IXGBE_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST
- *   numbers of DD bit
- * - don't support ol_flags for rss and csum err
- */
-uint16_t
-ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-		uint16_t nb_pkts)
-{
-	return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
-}
-
-static inline uint16_t
-reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
-		uint16_t nb_bufs, uint8_t *split_flags)
-{
-	struct rte_mbuf *pkts[RTE_IXGBE_VPMD_RX_BURST]; /*finished pkts*/
-	struct rte_mbuf *start = rxq->pkt_first_seg;
-	struct rte_mbuf *end =  rxq->pkt_last_seg;
-	unsigned pkt_idx, buf_idx;
-
-
-	for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
-		if (end != NULL) {
-			/* processing a split packet */
-			end->next = rx_bufs[buf_idx];
-			rx_bufs[buf_idx]->data_len += rxq->crc_len;
-
-			start->nb_segs++;
-			start->pkt_len += rx_bufs[buf_idx]->data_len;
-			end = end->next;
-
-			if (!split_flags[buf_idx]) {
-				/* it's the last packet of the set */
-				start->hash = end->hash;
-				start->ol_flags = end->ol_flags;
-				/* we need to strip crc for the whole packet */
-				start->pkt_len -= rxq->crc_len;
-				if (end->data_len > rxq->crc_len)
-					end->data_len -= rxq->crc_len;
-				else {
-					/* free up last mbuf */
-					struct rte_mbuf *secondlast = start;
-					while (secondlast->next != end)
-						secondlast = secondlast->next;
-					secondlast->data_len -= (rxq->crc_len -
-							end->data_len);
-					secondlast->next = NULL;
-					rte_pktmbuf_free_seg(end);
-					end = secondlast;
-				}
-				pkts[pkt_idx++] = start;
-				start = end = NULL;
-			}
-		} else {
-			/* not processing a split packet */
-			if (!split_flags[buf_idx]) {
-				/* not a split packet, save and skip */
-				pkts[pkt_idx++] = rx_bufs[buf_idx];
-				continue;
-			}
-			end = start = rx_bufs[buf_idx];
-			rx_bufs[buf_idx]->data_len += rxq->crc_len;
-			rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
-		}
-	}
-
-	/* save the partial packet for next time */
-	rxq->pkt_first_seg = start;
-	rxq->pkt_last_seg = end;
-	memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
-	return pkt_idx;
-}
-
-/*
- * vPMD receive routine that reassembles scattered packets
- *
- * Notice:
- * - don't support ol_flags for rss and csum err
- * - now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
- */
-uint16_t
-ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-		uint16_t nb_pkts)
-{
-	struct ixgbe_rx_queue *rxq = rx_queue;
-	uint8_t split_flags[RTE_IXGBE_VPMD_RX_BURST] = {0};
-
-	/* get some new buffers */
-	uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
-			split_flags);
-	if (nb_bufs == 0)
-		return 0;
-
-	/* happy day case, full burst + no packets to be joined */
-	const uint32_t *split_fl32 = (uint32_t *)split_flags;
-	if (rxq->pkt_first_seg == NULL &&
-			split_fl32[0] == 0 && split_fl32[1] == 0 &&
-			split_fl32[2] == 0 && split_fl32[3] == 0)
-		return nb_bufs;
-
-	/* reassemble any packets that need reassembly*/
-	unsigned i = 0;
-	if (rxq->pkt_first_seg == NULL) {
-		/* find the first split flag, and only reassemble then*/
-		while (i < nb_bufs && !split_flags[i])
-			i++;
-		if (i == nb_bufs)
-			return nb_bufs;
-	}
-	return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
-		&split_flags[i]);
-}
-
-static inline void
-vtx1(volatile union ixgbe_adv_tx_desc *txdp,
-		struct rte_mbuf *pkt, uint64_t flags)
-{
-	__m128i descriptor = _mm_set_epi64x((uint64_t)pkt->pkt_len << 46 |
-			flags | pkt->data_len,
-			pkt->buf_physaddr + pkt->data_off);
-	_mm_store_si128((__m128i *)&txdp->read, descriptor);
-}
-
-static inline void
-vtx(volatile union ixgbe_adv_tx_desc *txdp,
-		struct rte_mbuf **pkt, uint16_t nb_pkts,  uint64_t flags)
-{
-	int i;
-	for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
-		vtx1(txdp, *pkt, flags);
-}
-
-static inline int __attribute__((always_inline))
-ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
-{
-	struct ixgbe_tx_entry_v *txep;
-	uint32_t status;
-	uint32_t n;
-	uint32_t i;
-	int nb_free = 0;
-	struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
-
-	/* check DD bit on threshold descriptor */
-	status = txq->tx_ring[txq->tx_next_dd].wb.status;
-	if (!(status & IXGBE_ADVTXD_STAT_DD))
-		return 0;
-
-	n = txq->tx_rs_thresh;
-
-	/*
-	 * first buffer to free from S/W ring is at index
-	 * tx_next_dd - (tx_rs_thresh-1)
-	 */
-	txep = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[txq->tx_next_dd -
-			(n - 1)];
-	m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
-	if (likely(m != NULL)) {
-		free[0] = m;
-		nb_free = 1;
-		for (i = 1; i < n; i++) {
-			m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
-			if (likely(m != NULL)) {
-				if (likely(m->pool == free[0]->pool))
-					free[nb_free++] = m;
-				else {
-					rte_mempool_put_bulk(free[0]->pool,
-							(void *)free, nb_free);
-					free[0] = m;
-					nb_free = 1;
-				}
-			}
-		}
-		rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
-	} else {
-		for (i = 1; i < n; i++) {
-			m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
-			if (m != NULL)
-				rte_mempool_put(m->pool, m);
-		}
-	}
-
-	/* buffers were freed, update counters */
-	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
-	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
-	if (txq->tx_next_dd >= txq->nb_tx_desc)
-		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
-
-	return txq->tx_rs_thresh;
-}
-
-static inline void __attribute__((always_inline))
-tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
-		 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
-	int i;
-	for (i = 0; i < (int)nb_pkts; ++i)
-		txep[i].mbuf = tx_pkts[i];
-}
-
-uint16_t
-ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
-		       uint16_t nb_pkts)
-{
-	struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
-	volatile union ixgbe_adv_tx_desc *txdp;
-	struct ixgbe_tx_entry_v *txep;
-	uint16_t n, nb_commit, tx_id;
-	uint64_t flags = DCMD_DTYP_FLAGS;
-	uint64_t rs = IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS;
-	int i;
-
-	if (unlikely(nb_pkts > RTE_IXGBE_VPMD_TX_BURST))
-		nb_pkts = RTE_IXGBE_VPMD_TX_BURST;
-
-	if (txq->nb_tx_free < txq->tx_free_thresh)
-		ixgbe_tx_free_bufs(txq);
-
-	nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
-	if (unlikely(nb_pkts == 0))
-		return 0;
-
-	tx_id = txq->tx_tail;
-	txdp = &txq->tx_ring[tx_id];
-	txep = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[tx_id];
-
-	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
-
-	n = (uint16_t)(txq->nb_tx_desc - tx_id);
-	if (nb_commit >= n) {
-
-		tx_backlog_entry(txep, tx_pkts, n);
-
-		for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
-			vtx1(txdp, *tx_pkts, flags);
-
-		vtx1(txdp, *tx_pkts++, rs);
-
-		nb_commit = (uint16_t)(nb_commit - n);
-
-		tx_id = 0;
-		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
-
-		/* avoid reach the end of ring */
-		txdp = &(txq->tx_ring[tx_id]);
-		txep = &(((struct ixgbe_tx_entry_v *)txq->sw_ring)[tx_id]);
-	}
-
-	tx_backlog_entry(txep, tx_pkts, nb_commit);
-
-	vtx(txdp, tx_pkts, nb_commit, flags);
-
-	tx_id = (uint16_t)(tx_id + nb_commit);
-	if (tx_id > txq->tx_next_rs) {
-		txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
-			rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
-		txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
-			txq->tx_rs_thresh);
-	}
-
-	txq->tx_tail = tx_id;
-
-	IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
-
-	return nb_pkts;
-}
-
-static void
-ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
-{
-	unsigned i;
-	struct ixgbe_tx_entry_v *txe;
-	uint16_t nb_free, max_desc;
-
-	if (txq->sw_ring != NULL) {
-		/* release the used mbufs in sw_ring */
-		nb_free = txq->nb_tx_free;
-		max_desc = (uint16_t)(txq->nb_tx_desc - 1);
-		for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
-		     nb_free < max_desc && i != txq->tx_tail;
-		     i = (i + 1) & max_desc) {
-			txe = (struct ixgbe_tx_entry_v *)&txq->sw_ring[i];
-			if (txe->mbuf != NULL)
-				rte_pktmbuf_free_seg(txe->mbuf);
-		}
-		/* reset tx_entry */
-		for (i = 0; i < txq->nb_tx_desc; i++) {
-			txe = (struct ixgbe_tx_entry_v *)&txq->sw_ring[i];
-			txe->mbuf = NULL;
-		}
-	}
-}
-
-static void
-ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
-{
-	if (txq == NULL)
-		return;
-
-	if (txq->sw_ring != NULL) {
-		rte_free((struct ixgbe_rx_entry *)txq->sw_ring - 1);
-		txq->sw_ring = NULL;
-	}
-}
-
-static void
-ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
-{
-	static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
-	struct ixgbe_tx_entry_v *txe = (struct ixgbe_tx_entry_v *)txq->sw_ring;
-	uint16_t i;
-
-	/* Zero out HW ring memory */
-	for (i = 0; i < txq->nb_tx_desc; i++)
-		txq->tx_ring[i] = zeroed_desc;
-
-	/* Initialize SW ring entries */
-	for (i = 0; i < txq->nb_tx_desc; i++) {
-		volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
-		txd->wb.status = IXGBE_TXD_STAT_DD;
-		txe[i].mbuf = NULL;
-	}
-
-	txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
-	txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
-
-	txq->tx_tail = 0;
-	txq->nb_tx_used = 0;
-	/*
-	 * Always allow 1 descriptor to be un-allocated to avoid
-	 * a H/W race condition
-	 */
-	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
-	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
-	txq->ctx_curr = 0;
-	memset((void *)&txq->ctx_cache, 0,
-		IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
-}
-
-static const struct ixgbe_txq_ops vec_txq_ops = {
-	.release_mbufs = ixgbe_tx_queue_release_mbufs,
-	.free_swring = ixgbe_tx_free_swring,
-	.reset = ixgbe_reset_tx_queue,
-};
-
-int
-ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
-{
-	uintptr_t p;
-	struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
-
-	mb_def.nb_segs = 1;
-	mb_def.data_off = RTE_PKTMBUF_HEADROOM;
-	mb_def.port = rxq->port_id;
-	rte_mbuf_refcnt_set(&mb_def, 1);
-
-	/* prevent compiler reordering: rearm_data covers previous fields */
-	rte_compiler_barrier();
-	p = (uintptr_t)&mb_def.rearm_data;
-	rxq->mbuf_initializer = *(uint64_t *)p;
-	return 0;
-}
-
-int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
-{
-	if (txq->sw_ring == NULL)
-		return -1;
-
-	/* leave the first one for overflow */
-	txq->sw_ring = (struct ixgbe_tx_entry *)
-		((struct ixgbe_tx_entry_v *)txq->sw_ring + 1);
-	txq->ops = &vec_txq_ops;
-
-	return 0;
-}
-
-int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
-{
-#ifndef RTE_LIBRTE_IEEE1588
-	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-	struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
-
-#ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
-	/* whithout rx ol_flags, no VP flag report */
-	if (rxmode->hw_vlan_strip != 0 ||
-	    rxmode->hw_vlan_extend != 0)
-		return -1;
-#endif
-
-	/* no fdir support */
-	if (fconf->mode != RTE_FDIR_MODE_NONE)
-		return -1;
-
-	/*
-	 * - no csum error report support
-	 * - no header split support
-	 */
-	if (rxmode->hw_ip_checksum == 1 ||
-	    rxmode->header_split == 1)
-		return -1;
-
-	return 0;
-#else
-	RTE_SET_USED(dev);
-	return -1;
-#endif
-}
diff --git a/lib/librte_pmd_ixgbe/rte_pmd_ixgbe_version.map b/lib/librte_pmd_ixgbe/rte_pmd_ixgbe_version.map
deleted file mode 100644
index ef35398..0000000
--- a/lib/librte_pmd_ixgbe/rte_pmd_ixgbe_version.map
+++ /dev/null
@@ -1,4 +0,0 @@ 
-DPDK_2.0 {
-
-	local: *;
-};