[v4,13/13] net/cpfl: support hairpin bind/unbind
Checks
Commit Message
From: Beilei Xing <beilei.xing@intel.com>
This patch supports hairpin_bind/unbind ops.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 137 +++++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_rxtx.c | 28 +++++++
drivers/net/cpfl/cpfl_rxtx.h | 2 +
3 files changed, 167 insertions(+)
Comments
> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Friday, May 26, 2023 3:39 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> Subject: [PATCH v4 13/13] net/cpfl: support hairpin bind/unbind
>
> From: Beilei Xing <beilei.xing@intel.com>
>
> This patch supports hairpin_bind/unbind ops.
>
> Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/cpfl/cpfl_ethdev.c | 137 +++++++++++++++++++++++++++++++++
> drivers/net/cpfl/cpfl_rxtx.c | 28 +++++++
> drivers/net/cpfl/cpfl_rxtx.h | 2 +
> 3 files changed, 167 insertions(+)
>
> diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index
> d6dc1672f1..4b70441e27 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/cpfl/cpfl_ethdev.c
> @@ -1114,6 +1114,141 @@ cpfl_hairpin_get_peer_ports(struct rte_eth_dev
> *dev, uint16_t *peer_ports,
> return j;
> }
>
>
> static int
> diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c index
> 38c48ad8c7..ef83a03c2b 100644
> --- a/drivers/net/cpfl/cpfl_rxtx.c
> +++ b/drivers/net/cpfl/cpfl_rxtx.c
> @@ -1011,6 +1011,34 @@ cpfl_switch_hairpin_bufq_complq(struct cpfl_vport
> *cpfl_vport, bool on)
> return err;
> }
>
> +int
> +cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on) {
> + struct idpf_vport *vport = &cpfl_vport->base;
> + uint32_t type;
> + int err, queue_id;
> +
> + type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
> + queue_id = cpfl_vport->p2p_tx_complq->queue_id;
> + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> +
> + return err;
> +}
> +
> +int
> +cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on) {
> + struct idpf_vport *vport = &cpfl_vport->base;
> + uint32_t type;
> + int err, queue_id;
> +
> + type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
> + queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
> + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> +
> + return err;
> +}
> +
[Liu, Mingxia] Can cpfl_switch_hairpin_bufq_complq() in patch 9/13 be optimized by calling cpfl_switch_hairpin_complq() and cpfl_switch_hairpin_bufq()?
> int
> cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t
> logic_qid,
> bool rx, bool on)
> diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h index
> 42dfd07155..86e97541c4 100644
> --- a/drivers/net/cpfl/cpfl_rxtx.h
> +++ b/drivers/net/cpfl/cpfl_rxtx.h
> @@ -114,6 +114,8 @@ int cpfl_hairpin_txq_config(struct idpf_vport *vport,
> struct cpfl_tx_queue *cpfl int cpfl_hairpin_rx_bufq_config(struct cpfl_vport
> *cpfl_vport); int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct
> cpfl_rx_queue *cpfl_rxq); int cpfl_switch_hairpin_bufq_complq(struct
> cpfl_vport *cpfl_vport, bool on);
> +int cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on);
> +int cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on);
> int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t qid,
> bool rx, bool on);
> #endif /* _CPFL_RXTX_H_ */
> --
> 2.26.2
> -----Original Message-----
> From: Liu, Mingxia <mingxia.liu@intel.com>
> Sent: Tuesday, May 30, 2023 12:00 PM
> To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Wang, Xiao W <xiao.w.wang@intel.com>
> Subject: RE: [PATCH v4 13/13] net/cpfl: support hairpin bind/unbind
>
>
>
> > -----Original Message-----
> > From: Xing, Beilei <beilei.xing@intel.com>
> > Sent: Friday, May 26, 2023 3:39 PM
> > To: Wu, Jingjing <jingjing.wu@intel.com>
> > Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> > <beilei.xing@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> > Subject: [PATCH v4 13/13] net/cpfl: support hairpin bind/unbind
> >
> > From: Beilei Xing <beilei.xing@intel.com>
> >
> > This patch supports hairpin_bind/unbind ops.
> >
> > Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> > drivers/net/cpfl/cpfl_ethdev.c | 137 +++++++++++++++++++++++++++++++++
> > drivers/net/cpfl/cpfl_rxtx.c | 28 +++++++
> > drivers/net/cpfl/cpfl_rxtx.h | 2 +
> > 3 files changed, 167 insertions(+)
> >
> > diff --git a/drivers/net/cpfl/cpfl_ethdev.c
> > b/drivers/net/cpfl/cpfl_ethdev.c index
> > d6dc1672f1..4b70441e27 100644
> > --- a/drivers/net/cpfl/cpfl_ethdev.c
> > +++ b/drivers/net/cpfl/cpfl_ethdev.c
> > @@ -1114,6 +1114,141 @@ cpfl_hairpin_get_peer_ports(struct
> rte_eth_dev
> > *dev, uint16_t *peer_ports,
> > return j;
> > }
> >
> >
> > static int
> > diff --git a/drivers/net/cpfl/cpfl_rxtx.c
> > b/drivers/net/cpfl/cpfl_rxtx.c index 38c48ad8c7..ef83a03c2b 100644
> > --- a/drivers/net/cpfl/cpfl_rxtx.c
> > +++ b/drivers/net/cpfl/cpfl_rxtx.c
> > @@ -1011,6 +1011,34 @@ cpfl_switch_hairpin_bufq_complq(struct
> > cpfl_vport *cpfl_vport, bool on)
> > return err;
> > }
> >
> > +int
> > +cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on) {
> > + struct idpf_vport *vport = &cpfl_vport->base;
> > + uint32_t type;
> > + int err, queue_id;
> > +
> > + type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
> > + queue_id = cpfl_vport->p2p_tx_complq->queue_id;
> > + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> > +
> > + return err;
> > +}
> > +
> > +int
> > +cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on) {
> > + struct idpf_vport *vport = &cpfl_vport->base;
> > + uint32_t type;
> > + int err, queue_id;
> > +
> > + type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
> > + queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
> > + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> > +
> > + return err;
> > +}
> > +
> [Liu, Mingxia] Can cpfl_switch_hairpin_bufq_complq() in patch 9/13 be
> optimized by calling cpfl_switch_hairpin_complq() and
> cpfl_switch_hairpin_bufq()?
Yes, the functions are duplicated. Refined in next version.
> > int
> > cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport,
> > uint16_t logic_qid,
> > bool rx, bool on)
> > diff --git a/drivers/net/cpfl/cpfl_rxtx.h
> > b/drivers/net/cpfl/cpfl_rxtx.h index
> > 42dfd07155..86e97541c4 100644
> > --- a/drivers/net/cpfl/cpfl_rxtx.h
> > +++ b/drivers/net/cpfl/cpfl_rxtx.h
> > @@ -114,6 +114,8 @@ int cpfl_hairpin_txq_config(struct idpf_vport
> > *vport, struct cpfl_tx_queue *cpfl int
> > cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport); int
> > cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue
> > *cpfl_rxq); int cpfl_switch_hairpin_bufq_complq(struct
> > cpfl_vport *cpfl_vport, bool on);
> > +int cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool
> > +on); int cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool
> > +on);
> > int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t
> qid,
> > bool rx, bool on);
> > #endif /* _CPFL_RXTX_H_ */
> > --
> > 2.26.2
@@ -1114,6 +1114,141 @@ cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
return j;
}
+static int
+cpfl_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct idpf_vport *tx_vport = &cpfl_tx_vport->base;
+ struct cpfl_vport *cpfl_rx_vport;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct rte_eth_dev *peer_dev;
+ struct idpf_vport *rx_vport;
+ int err = 0;
+ int i;
+
+ err = cpfl_txq_hairpin_info_update(dev, rx_port);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info.");
+ return err;
+ }
+
+ /* configure hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_hairpin_txq_config(tx_vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
+ }
+
+ err = cpfl_hairpin_tx_complq_config(cpfl_tx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
+ return err;
+ }
+
+ peer_dev = &rte_eth_devices[rx_port];
+ cpfl_rx_vport = (struct cpfl_vport *)peer_dev->data->dev_private;
+ rx_vport = &cpfl_rx_vport->base;
+ cpfl_rxq_hairpin_mz_bind(peer_dev);
+
+ err = cpfl_hairpin_rx_bufq_config(cpfl_rx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+ return err;
+ }
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_hairpin_rxq_config(rx_vport, cpfl_rxq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i);
+ return err;
+ }
+ err = cpfl_rx_queue_init(peer_dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+ return err;
+ }
+ }
+
+ /* enable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ return err;
+ }
+ cpfl_txq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_complq(cpfl_tx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq");
+ return err;
+ }
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ }
+ cpfl_rxq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_bufq(cpfl_rx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Rx buffer queue");
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+cpfl_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+ struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
+
+ /* disable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, false);
+ cpfl_txq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_complq(cpfl_tx_vport, false);
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, false);
+ cpfl_rxq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_bufq(cpfl_rx_vport, false);
+
+ return 0;
+}
+
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
@@ -1144,6 +1279,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
.hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
+ .hairpin_bind = cpfl_hairpin_bind,
+ .hairpin_unbind = cpfl_hairpin_unbind,
};
static int
@@ -1011,6 +1011,34 @@ cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on)
return err;
}
+int
+cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ queue_id = cpfl_vport->p2p_tx_complq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
int
cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t logic_qid,
bool rx, bool on)
@@ -114,6 +114,8 @@ int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl
int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
int cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on);
int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t qid,
bool rx, bool on);
#endif /* _CPFL_RXTX_H_ */