@@ -105,5 +105,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_filter.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_debug.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_regs.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_sriov.c
include $(RTE_SDK)/mk/rte.lib.mk
@@ -11,6 +11,7 @@ sources = files(
'qede_rxtx.c',
'qede_debug.c',
'qede_regs.c',
+ 'qede_sriov.c',
)
if cc.has_argument('-Wno-format-nonliteral')
@@ -2700,6 +2700,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
adapter->vxlan.enable = false;
adapter->geneve.enable = false;
adapter->ipgre.enable = false;
+ qed_ops->sriov_configure(edev, pci_dev->max_vfs);
}
DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -34,6 +34,7 @@
#include "base/ecore_l2.h"
#include "base/ecore_vf.h"
+#include "qede_sriov.h"
#include "qede_logs.h"
#include "qede_if.h"
#include "qede_rxtx.h"
@@ -82,6 +82,7 @@ struct qed_eth_ops {
const struct qed_common_ops *common;
int (*fill_dev_info)(struct ecore_dev *edev,
struct qed_dev_eth_info *info);
+ void (*sriov_configure)(struct ecore_dev *edev, int num_vfs);
};
struct qed_link_params {
@@ -822,6 +822,7 @@ const struct qed_common_ops qed_common_ops_pass = {
const struct qed_eth_ops qed_eth_ops_pass = {
INIT_STRUCT_FIELD(common, &qed_common_ops_pass),
INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info),
+ INIT_STRUCT_FIELD(sriov_configure, &qed_sriov_configure),
};
const struct qed_eth_ops *qed_get_eth_ops(void)
new file mode 100644
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2020 Marvell.
+ * All rights reserved.
+ * www.marvell.com
+ */
+
+#include "qede_sriov.h"
+
+static void qed_sriov_enable_qid_config(struct ecore_hwfn *hwfn,
+ u16 vfid,
+ struct ecore_iov_vf_init_params *params)
+{
+ u16 num_pf_l2_queues, base, i;
+
+ /* Since we have an equal resource distribution per-VF, and we assume
+ * PF has acquired its first queues, we start setting sequentially from
+ * there.
+ */
+ num_pf_l2_queues = (u16)FEAT_NUM(hwfn, ECORE_PF_L2_QUE);
+
+ base = num_pf_l2_queues + vfid * params->num_queues;
+ params->rel_vf_id = vfid;
+
+ for (i = 0; i < params->num_queues; i++) {
+ params->req_rx_queue[i] = base + i;
+ params->req_tx_queue[i] = base + i;
+ }
+
+ /* PF uses indices 0 for itself; Set vport/RSS afterwards */
+ params->vport_id = vfid + 1;
+ params->rss_eng_id = vfid + 1;
+}
+
+static void qed_sriov_enable(struct ecore_dev *edev, int num)
+{
+ struct ecore_iov_vf_init_params params;
+ struct ecore_hwfn *p_hwfn;
+ struct ecore_ptt *p_ptt;
+ int i, j, rc;
+
+ if ((u32)num >= RESC_NUM(&edev->hwfns[0], ECORE_VPORT)) {
+ DP_NOTICE(edev, false, "Can start at most %d VFs\n",
+ RESC_NUM(&edev->hwfns[0], ECORE_VPORT) - 1);
+ return;
+ }
+
+ OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_iov_vf_init_params));
+
+ for_each_hwfn(edev, j) {
+ int feat_num;
+
+ p_hwfn = &edev->hwfns[j];
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ feat_num = FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE) / num;
+
+ params.num_queues = OSAL_MIN_T(int, feat_num, 16);
+
+ for (i = 0; i < num; i++) {
+ if (!ecore_iov_is_valid_vfid(p_hwfn, i, false, true))
+ continue;
+
+ qed_sriov_enable_qid_config(p_hwfn, i, ¶ms);
+
+ rc = ecore_iov_init_hw_for_vf(p_hwfn, p_ptt, ¶ms);
+ if (rc) {
+ DP_ERR(edev, "Failed to enable VF[%d]\n", i);
+ ecore_ptt_release(p_hwfn, p_ptt);
+ return;
+ }
+ }
+
+ ecore_ptt_release(p_hwfn, p_ptt);
+ }
+}
+
+void qed_sriov_configure(struct ecore_dev *edev, int num_vfs_param)
+{
+ if (!IS_ECORE_SRIOV(edev)) {
+ DP_VERBOSE(edev, ECORE_MSG_IOV, "SR-IOV is not supported\n");
+ return;
+ }
+
+ if (num_vfs_param)
+ qed_sriov_enable(edev, num_vfs_param);
+}
new file mode 100644
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2020 Marvell.
+ * All rights reserved.
+ * www.marvell.com
+ */
+
+#include "qede_ethdev.h"
+
+void qed_sriov_configure(struct ecore_dev *edev, int num_vfs_param);