[v3,08/10] net/cpfl: enable write back based on ITR expire
Checks
Commit Message
From: Beilei Xing <beilei.xing@intel.com>
This patch enabls write back based on ITR expire
(WR_ON_ITR) for hairpin queue.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_device.c | 75 ++++++++++++++++++++++++
drivers/common/idpf/idpf_common_device.h | 4 ++
drivers/common/idpf/version.map | 1 +
drivers/net/cpfl/cpfl_ethdev.c | 13 +++-
4 files changed, 92 insertions(+), 1 deletion(-)
Comments
> idpf_vport_irq_unmap_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
> {
> diff --git a/drivers/common/idpf/idpf_common_device.h
> b/drivers/common/idpf/idpf_common_device.h
> index 112367dae8..f767ea7cec 100644
> --- a/drivers/common/idpf/idpf_common_device.h
> +++ b/drivers/common/idpf/idpf_common_device.h
> @@ -200,5 +200,9 @@ int idpf_vport_info_init(struct idpf_vport *vport,
> struct virtchnl2_create_vport *vport_info);
> __rte_internal
> void idpf_vport_stats_update(struct virtchnl2_vport_stats *oes, struct
> virtchnl2_vport_stats *nes);
> +__rte_internal
> +int idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport,
> + uint32_t *qids,
> + uint16_t nb_rx_queues);
>
> #endif /* _IDPF_COMMON_DEVICE_H_ */
> diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
> index 25624732b0..0729f6b912 100644
> --- a/drivers/common/idpf/version.map
> +++ b/drivers/common/idpf/version.map
> @@ -69,6 +69,7 @@ INTERNAL {
> idpf_vport_info_init;
> idpf_vport_init;
> idpf_vport_irq_map_config;
> + idpf_vport_irq_map_config_by_qids;
> idpf_vport_irq_unmap_config;
> idpf_vport_rss_config;
> idpf_vport_stats_update;
The same, split common change with net one?
> diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
> index c2ab0690fc..3b480178c0 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/cpfl/cpfl_ethdev.c
> @@ -730,11 +730,22 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
> static int
> cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
> {
> + uint32_t qids[CPFL_MAX_P2P_NB_QUEUES + IDPF_DEFAULT_RXQ_NUM] = {0};
> struct cpfl_vport *cpfl_vport = dev->data->dev_private;
> struct idpf_vport *vport = &cpfl_vport->base;
> uint16_t nb_rx_queues = dev->data->nb_rx_queues;
> + struct cpfl_rx_queue *cpfl_rxq;
> + int i;
>
> - return idpf_vport_irq_map_config(vport, nb_rx_queues);
> + for (i = 0; i < nb_rx_queues; i++) {
> + cpfl_rxq = dev->data->rx_queues[i];
> + if (cpfl_rxq->hairpin_info.hairpin_q)
> + qids[i] = cpfl_hw_qid_get(cpfl_vport-
> >p2p_q_chunks_info.rx_start_qid,
> + (i - cpfl_vport->nb_data_rxq));
> + else
> + qids[i] = cpfl_hw_qid_get(vport->chunks_info.rx_start_qid, i);
Looks like cpfl_hw_qid_get and is used cross files, how about defined it as inline or Macro in header file?
> + }
> + return idpf_vport_irq_map_config_by_qids(vport, qids, nb_rx_queues);
> }
>
> /* Update hairpin_info for dev's tx hairpin queue */
> --
> 2.26.2
@@ -667,6 +667,81 @@ idpf_vport_irq_map_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
return ret;
}
+int
+idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport, uint32_t *qids, uint16_t nb_rx_queues)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_queue_vector *qv_map;
+ struct idpf_hw *hw = &adapter->hw;
+ uint32_t dynctl_val, itrn_val;
+ uint32_t dynctl_reg_start;
+ uint32_t itrn_reg_start;
+ uint16_t i;
+ int ret;
+
+ qv_map = rte_zmalloc("qv_map",
+ nb_rx_queues *
+ sizeof(struct virtchnl2_queue_vector), 0);
+ if (qv_map == NULL) {
+ DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
+ nb_rx_queues);
+ ret = -ENOMEM;
+ goto qv_map_alloc_err;
+ }
+
+ /* Rx interrupt disabled, Map interrupt only for writeback */
+
+ /* The capability flags adapter->caps.other_caps should be
+ * compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
+ * condition should be updated when the FW can return the
+ * correct flag bits.
+ */
+ dynctl_reg_start =
+ vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
+ itrn_reg_start =
+ vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
+ dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
+ DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
+ itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
+ DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
+ /* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
+ * register. WB_ON_ITR and INTENA are mutually exclusive
+ * bits. Setting WB_ON_ITR bits means TX and RX Descs
+ * are written back based on ITR expiration irrespective
+ * of INTENA setting.
+ */
+ /* TBD: need to tune INTERVAL value for better performance. */
+ itrn_val = (itrn_val == 0) ? IDPF_DFLT_INTERVAL : itrn_val;
+ dynctl_val = VIRTCHNL2_ITR_IDX_0 <<
+ PF_GLINT_DYN_CTL_ITR_INDX_S |
+ PF_GLINT_DYN_CTL_WB_ON_ITR_M |
+ itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S;
+ IDPF_WRITE_REG(hw, dynctl_reg_start, dynctl_val);
+
+ for (i = 0; i < nb_rx_queues; i++) {
+ /* map all queues to the same vector */
+ qv_map[i].queue_id = qids[i];
+ qv_map[i].vector_id =
+ vport->recv_vectors->vchunks.vchunks->start_vector_id;
+ }
+ vport->qv_map = qv_map;
+
+ ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true);
+ if (ret != 0) {
+ DRV_LOG(ERR, "config interrupt mapping failed");
+ goto config_irq_map_err;
+ }
+
+ return 0;
+
+config_irq_map_err:
+ rte_free(vport->qv_map);
+ vport->qv_map = NULL;
+
+qv_map_alloc_err:
+ return ret;
+}
+
int
idpf_vport_irq_unmap_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
{
@@ -200,5 +200,9 @@ int idpf_vport_info_init(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_info);
__rte_internal
void idpf_vport_stats_update(struct virtchnl2_vport_stats *oes, struct virtchnl2_vport_stats *nes);
+__rte_internal
+int idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport,
+ uint32_t *qids,
+ uint16_t nb_rx_queues);
#endif /* _IDPF_COMMON_DEVICE_H_ */
@@ -69,6 +69,7 @@ INTERNAL {
idpf_vport_info_init;
idpf_vport_init;
idpf_vport_irq_map_config;
+ idpf_vport_irq_map_config_by_qids;
idpf_vport_irq_unmap_config;
idpf_vport_rss_config;
idpf_vport_stats_update;
@@ -730,11 +730,22 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
static int
cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
+ uint32_t qids[CPFL_MAX_P2P_NB_QUEUES + IDPF_DEFAULT_RXQ_NUM] = {0};
struct cpfl_vport *cpfl_vport = dev->data->dev_private;
struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
- return idpf_vport_irq_map_config(vport, nb_rx_queues);
+ for (i = 0; i < nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ qids[i] = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.rx_start_qid,
+ (i - cpfl_vport->nb_data_rxq));
+ else
+ qids[i] = cpfl_hw_qid_get(vport->chunks_info.rx_start_qid, i);
+ }
+ return idpf_vport_irq_map_config_by_qids(vport, qids, nb_rx_queues);
}
/* Update hairpin_info for dev's tx hairpin queue */