@@ -91,6 +91,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
+
# this lib depends upon:
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_eal lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_mempool lib/librte_mbuf
@@ -778,6 +778,12 @@ i40e_dev_start(struct rte_eth_dev *dev)
i40e_vsi_queues_bind_intr(vsi);
i40e_vsi_enable_queues_intr(vsi);
+ /* enable FDIR MSIX interrupt */
+ if (pf->flags & I40E_FLAG_FDIR) {
+ i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
+ i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
+ }
+
/* Enable all queues which have been configured */
ret = i40e_vsi_switch_queues(vsi, TRUE);
if (ret != I40E_SUCCESS) {
@@ -2615,16 +2621,30 @@ i40e_vsi_setup(struct i40e_pf *pf,
case I40E_VSI_SRIOV :
vsi->nb_qps = pf->vf_nb_qps;
break;
+ case I40E_VSI_FDIR:
+ vsi->nb_qps = pf->fdir_nb_qps;
+ break;
default:
goto fail_mem;
}
- ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
- if (ret < 0) {
- PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
- vsi->seid, ret);
- goto fail_mem;
- }
- vsi->base_queue = ret;
+ /*
+ * The filter status descriptor is reported in rx queue 0,
+ * while the tx queue for fdir filter programming has no
+ * such constraints, can be non-zero queues.
+ * To simplify it, choose FDIR vsi use queue 0 pair.
+ * To make sure it will use queue 0 pair, queue allocation
+ * need be done before this function is called
+ */
+ if (type != I40E_VSI_FDIR) {
+ ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
+ vsi->seid, ret);
+ goto fail_mem;
+ }
+ vsi->base_queue = ret;
+ } else
+ vsi->base_queue = I40E_FDIR_QUEUE_ID;
/* VF has MSIX interrupt in VF range, don't allocate here */
if (type != I40E_VSI_SRIOV) {
@@ -2756,9 +2776,25 @@ i40e_vsi_setup(struct i40e_pf *pf,
* Since VSI is not created yet, only configure parameter,
* will add vsi below.
*/
- }
- else {
- PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
+ } else if (type == I40E_VSI_FDIR) {
+ vsi->uplink_seid = uplink_vsi->uplink_seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
+ I40E_DEFAULT_TCMAP);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to configure "
+ "TC queue mapping.");
+ goto fail_msix_alloc;
+ }
+ ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
+ ctxt.info.valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
+ } else {
+ PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet.");
goto fail_msix_alloc;
}
@@ -2943,8 +2979,16 @@ i40e_pf_setup(struct i40e_pf *pf)
PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
return ret;
}
-
- /* VSI setup */
+ if (pf->flags & I40E_FLAG_FDIR) {
+ /* make queue allocated first, let FDIR use queue pair 0*/
+ ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
+ if (ret != I40E_FDIR_QUEUE_ID) {
+ PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :"
+ " ret =%d", ret);
+ pf->flags &= ~I40E_FLAG_FDIR;
+ }
+ }
+ /* main VSI setup */
vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
if (!vsi) {
PMD_DRV_LOG(ERR, "Setup of main vsi failed");
@@ -2954,9 +2998,20 @@ i40e_pf_setup(struct i40e_pf *pf)
dev_data->nb_rx_queues = vsi->nb_qps;
dev_data->nb_tx_queues = vsi->nb_qps;
+ /* setup FDIR after main vsi created.*/
+ if (pf->flags & I40E_FLAG_FDIR) {
+ ret = i40e_fdir_setup(pf);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to setup flow director.");
+ pf->flags &= ~I40E_FLAG_FDIR;
+ }
+ }
+
/* Configure filter control */
memset(&settings, 0, sizeof(settings));
settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
+ if (pf->flags & I40E_FLAG_FDIR)
+ settings.enable_fdir = TRUE;
/* Enable ethtype and macvlan filters */
settings.enable_ethtype = TRUE;
settings.enable_macvlan = TRUE;
@@ -46,11 +46,12 @@
/* number of VSIs and queue default setting */
#define I40E_MAX_QP_NUM_PER_VF 16
#define I40E_DEFAULT_QP_NUM_VMDQ 64
-#define I40E_DEFAULT_QP_NUM_FDIR 64
+#define I40E_DEFAULT_QP_NUM_FDIR 1
#define I40E_UINT32_BIT_SIZE (CHAR_BIT * sizeof(uint32_t))
#define I40E_VFTA_SIZE (4096 / I40E_UINT32_BIT_SIZE)
/* Default TC traffic in case DCB is not enabled */
#define I40E_DEFAULT_TCMAP 0x1
+#define I40E_FDIR_QUEUE_ID 0
/* i40e flags */
#define I40E_FLAG_RSS (1ULL << 0)
@@ -221,6 +222,27 @@ struct i40e_pf_vf {
};
/*
+ * A structure used to define fields of a FDIR related info.
+ */
+struct i40e_fdir_info {
+ struct i40e_vsi *fdir_vsi; /* pointer to fdir VSI structure */
+ uint16_t match_counter_index; /* Statistic counter index used for fdir*/
+ struct i40e_tx_queue *txq;
+ struct i40e_rx_queue *rxq;
+ void *prg_pkt; /* memory for fdir program packet */
+ uint64_t dma_addr; /* physic address of packet memory*/
+ /*
+ * the rule how bytes stream is extracted as flexible payload
+ * for each payload layer, the setting can up to three elements
+ */
+ struct {
+ uint8_t offset; /* offset in words from the beginning of payload */
+ uint8_t size; /* size in words */
+ } flex_set[3][3];
+
+};
+
+/*
* Structure to store private data specific for PF instance.
*/
struct i40e_pf {
@@ -248,10 +270,10 @@ struct i40e_pf {
uint16_t vmdq_nb_qps; /* The number of queue pairs of VMDq */
uint16_t vf_nb_qps; /* The number of queue pairs of VF */
uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
-
/* store VXLAN UDP ports */
uint16_t vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
uint16_t vxlan_bitmap; /* Vxlan bit mask */
+ struct i40e_fdir_info fdir; /* flow director info */
};
enum pending_msg {
@@ -352,6 +374,11 @@ int i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
int i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on);
uint64_t i40e_config_hena(uint64_t flags);
uint64_t i40e_parse_hena(uint64_t flags);
+enum i40e_status_code i40e_fdir_setup_tx_resources(struct i40e_pf *pf,
+ unsigned int socket_id);
+enum i40e_status_code i40e_fdir_setup_rx_resources(struct i40e_pf *pf,
+ unsigned int socket_id);
+int i40e_fdir_setup(struct i40e_pf *pf);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
new file mode 100644
@@ -0,0 +1,222 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+
+#include "i40e_logs.h"
+#include "i40e/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+
+#define I40E_FDIR_MZ_NAME "FDIR_MEMZONE"
+#define I40E_FDIR_PKT_LEN 512
+
+#define I40E_COUNTER_PF 2
+/* Statistic counter index for one pf */
+#define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF)
+#define I40E_FLX_OFFSET_IN_FIELD_VECTOR 50
+
+static int i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq);
+
+static int
+i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
+ struct i40e_hmc_obj_rxq rx_ctx;
+ int err = I40E_SUCCESS;
+
+ memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+ /* Init the RX queue in hardware */
+ rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT;
+ rx_ctx.hbuff = 0;
+ rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
+ rx_ctx.qlen = rxq->nb_rx_desc;
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ rx_ctx.dsize = 1;
+#endif
+ rx_ctx.dtype = i40e_header_split_none;
+ rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
+ rx_ctx.rxmax = ETHER_MAX_LEN;
+ rx_ctx.tphrdesc_ena = 1;
+ rx_ctx.tphwdesc_ena = 1;
+ rx_ctx.tphdata_ena = 1;
+ rx_ctx.tphhead_ena = 1;
+ rx_ctx.lrxqthresh = 2;
+ rx_ctx.crcstrip = 0;
+ rx_ctx.l2tsel = 1;
+ rx_ctx.showiv = 1;
+ rx_ctx.prefena = 1;
+
+ err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx);
+ if (err != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context.");
+ return err;
+ }
+ err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx);
+ if (err != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context.");
+ return err;
+ }
+ rxq->qrx_tail = hw->hw_addr +
+ I40E_QRX_TAIL(rxq->vsi->base_queue);
+
+ rte_wmb();
+ /* Init the RX tail regieter. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, 0);
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+
+ return err;
+}
+
+/*
+ * i40e_fdir_setup - reserve and initialize the Flow Director resources
+ * @pf: board private structure
+ */
+int
+i40e_fdir_setup(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+ int err = I40E_SUCCESS;
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz = NULL;
+ struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
+
+ PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u,"
+ " num_filters_best_effort = %u.",
+ hw->func_caps.fd_filters_guaranteed,
+ hw->func_caps.fd_filters_best_effort);
+
+ vsi = pf->fdir.fdir_vsi;
+ if (vsi) {
+ PMD_DRV_LOG(ERR, "FDIR vsi pointer needs"
+ "to be null before creation.");
+ return I40E_ERR_BAD_PTR;
+ }
+ /* make new FDIR VSI */
+ vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
+ return I40E_ERR_NO_AVAILABLE_VSI;
+ }
+ pf->fdir.fdir_vsi = vsi;
+
+ /*Fdir tx queue setup*/
+ err = i40e_fdir_setup_tx_resources(pf, 0);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
+ goto fail_setup_tx;
+ }
+
+ /*Fdir rx queue setup*/
+ err = i40e_fdir_setup_rx_resources(pf, 0);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
+ goto fail_setup_rx;
+ }
+
+ err = i40e_tx_queue_init(pf->fdir.txq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization.");
+ goto fail_mem;
+ }
+
+ /* need switch on before dev start*/
+ err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on.");
+ goto fail_mem;
+ }
+
+ /* Init the rx queue in hardware */
+ err = i40e_fdir_rx_queue_init(pf->fdir.rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization.");
+ goto fail_mem;
+ }
+
+ /* switch on rx queue */
+ err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on.");
+ goto fail_mem;
+ }
+
+ /* reserve memory for the fdir programming packet */
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d",
+ eth_dev->driver->pci_drv.name,
+ I40E_FDIR_MZ_NAME,
+ eth_dev->data->port_id);
+ mz = rte_memzone_lookup(z_name);
+ if (!mz) {
+ mz = rte_memzone_reserve(z_name,
+ I40E_FDIR_PKT_LEN,
+ rte_socket_id(),
+ 0);
+ if (!mz) {
+ PMD_DRV_LOG(ERR, "Cannot init memzone for"
+ "flow director program packet.");
+ err = I40E_ERR_NO_MEMORY;
+ goto fail_mem;
+ }
+ }
+ pf->fdir.prg_pkt = mz->addr;
+ pf->fdir.dma_addr = (uint64_t)mz->phys_addr;
+ pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
+ PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
+ vsi->base_queue);
+ return I40E_SUCCESS;
+
+fail_mem:
+ i40e_dev_rx_queue_release(pf->fdir.rxq);
+ pf->fdir.rxq = NULL;
+fail_setup_rx:
+ i40e_dev_tx_queue_release(pf->fdir.txq);
+ pf->fdir.txq = NULL;
+fail_setup_tx:
+ i40e_vsi_release(vsi);
+ pf->fdir.fdir_vsi = NULL;
+ return err;
+}
\ No newline at end of file
@@ -2150,6 +2150,8 @@ i40e_tx_queue_init(struct i40e_tx_queue *txq)
tx_ctx.base = txq->tx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
tx_ctx.qlen = txq->nb_tx_desc;
tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[0]);
+ if (vsi->type == I40E_VSI_FDIR)
+ tx_ctx.fd_ena = TRUE;
err = i40e_clear_lan_tx_queue_context(hw, pf_q);
if (err != I40E_SUCCESS) {
@@ -2366,3 +2368,128 @@ i40e_dev_clear_queues(struct rte_eth_dev *dev)
i40e_reset_rx_queue(dev->data->rx_queues[i]);
}
}
+
+enum i40e_status_code
+i40e_fdir_setup_tx_resources(struct i40e_pf *pf,
+ unsigned int socket_id)
+{
+ struct i40e_tx_queue *txq;
+ const struct rte_memzone *tz = NULL;
+ uint32_t ring_size;
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+
+#define I40E_FDIR_NUM_TX_DESC I40E_MIN_RING_DESC
+ if (!pf) {
+ PMD_DRV_LOG(ERR, "PF is not available");
+ return I40E_ERR_BAD_PTR;
+ }
+
+ /* Allocate the TX queue data structure. */
+ txq = rte_zmalloc_socket("i40e fdir tx queue",
+ sizeof(struct i40e_tx_queue),
+ CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+ "tx queue structure.");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ /* Allocate TX hardware ring descriptors. */
+ ring_size = sizeof(struct i40e_tx_desc) * I40E_FDIR_NUM_TX_DESC;
+ ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
+
+ tz = i40e_ring_dma_zone_reserve(dev,
+ "fdir_tx_ring",
+ I40E_FDIR_QUEUE_ID,
+ ring_size,
+ socket_id);
+ if (!tz) {
+ i40e_dev_tx_queue_release(txq);
+ PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ txq->nb_tx_desc = I40E_FDIR_NUM_TX_DESC;
+ txq->queue_id = I40E_FDIR_QUEUE_ID;
+ txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
+ txq->vsi = pf->fdir.fdir_vsi;
+
+#ifdef RTE_LIBRTE_XEN_DOM0
+ txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+#else
+ txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
+#endif
+ txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
+ /*
+ * don't need to allocate software ring and reset for the fdir
+ * program queue just set the queue has been configured.
+ */
+ txq->q_set = TRUE;
+ pf->fdir.txq = txq;
+
+ return I40E_SUCCESS;
+}
+
+enum i40e_status_code
+i40e_fdir_setup_rx_resources(struct i40e_pf *pf,
+ unsigned int socket_id)
+{
+ struct i40e_rx_queue *rxq;
+ const struct rte_memzone *rz = NULL;
+ uint32_t ring_size;
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+
+#define I40E_FDIR_NUM_RX_DESC I40E_MIN_RING_DESC
+ if (!pf) {
+ PMD_DRV_LOG(ERR, "PF is not available");
+ return I40E_ERR_BAD_PTR;
+ }
+
+ /* Allocate the TX queue data structure. */
+ rxq = rte_zmalloc_socket("i40e fdir rx queue",
+ sizeof(struct i40e_rx_queue),
+ CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for "
+ "rx queue structure.");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ /* Allocate TX hardware ring descriptors. */
+ ring_size = sizeof(union i40e_rx_desc) * I40E_FDIR_NUM_RX_DESC;
+ ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
+
+ rz = i40e_ring_dma_zone_reserve(dev,
+ "fdir_rx_ring",
+ I40E_FDIR_QUEUE_ID,
+ ring_size,
+ socket_id);
+ if (!rz) {
+ i40e_dev_rx_queue_release(rxq);
+ PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ rxq->nb_rx_desc = I40E_FDIR_NUM_RX_DESC;
+ rxq->queue_id = I40E_FDIR_QUEUE_ID;
+ rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
+ rxq->vsi = pf->fdir.fdir_vsi;
+
+#ifdef RTE_LIBRTE_XEN_DOM0
+ rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+#else
+ rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
+#endif
+ rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
+
+ /*
+ * Don't need to allocate software ring and reset for the fdir
+ * rx queue, just set the queue has been configured.
+ */
+ rxq->q_set = TRUE;
+ pf->fdir.rxq = rxq;
+
+ return I40E_SUCCESS;
+}