[v4,09/13] net/cpfl: support hairpin queue start/stop
Checks
Commit Message
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx hairpin queue start/stop.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 41 +++++++++
drivers/net/cpfl/cpfl_rxtx.c | 151 +++++++++++++++++++++++++++++----
drivers/net/cpfl/cpfl_rxtx.h | 14 +++
3 files changed, 188 insertions(+), 18 deletions(-)
Comments
> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Friday, May 26, 2023 3:39 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> Subject: [PATCH v4 09/13] net/cpfl: support hairpin queue start/stop
>
> From: Beilei Xing <beilei.xing@intel.com>
>
> This patch supports Rx/Tx hairpin queue start/stop.
>
> Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/cpfl/cpfl_ethdev.c | 41 +++++++++
> drivers/net/cpfl/cpfl_rxtx.c | 151 +++++++++++++++++++++++++++++----
> drivers/net/cpfl/cpfl_rxtx.h | 14 +++
> 3 files changed, 188 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index
> a06def06d0..8035878602 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/cpfl/cpfl_ethdev.c
> @@ -896,6 +896,47 @@ cpfl_start_queues(struct rte_eth_dev *dev)
> }
> }
>
> + /* For non-manual bind hairpin queues, enable Tx queue and Rx queue,
> + * then enable Tx completion queue and Rx buffer queue.
> + */
> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
[Liu, Mingxia] Better to use for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++), because when i < cpfl_tx_vport->nb_data_txq, (cpfl_txq->hairpin_info.hairpin_q && !cpfl_vport-
> >p2p_manual_bind) must be false, or (i - cpfl_vport->nb_data_txq) will < 0.
> + cpfl_txq = dev->data->tx_queues[i];
> + if (cpfl_txq->hairpin_info.hairpin_q && !cpfl_vport-
> >p2p_manual_bind) {
> + err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
> + i - cpfl_vport-
> >nb_data_txq,
> + false, true);
> + if (err)
> + PMD_DRV_LOG(ERR, "Failed to switch hairpin
> TX queue %u on",
> + i);
> + else
> + cpfl_txq->base.q_started = true;
> + }
> + }
> +
> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
[Liu, Mingxia] Better to use for (i = cpfl_rx_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++), because when i < cpfl_rx_vport->nb_data_rxq, (cpfl_txq->hairpin_info.hairpin_q && !cpfl_vport-
> >p2p_manual_bind) must be false, or (i - cpfl_vport->nb_data_rxq) will < 0.
> + cpfl_rxq = dev->data->rx_queues[i];
> + if (cpfl_rxq->hairpin_info.hairpin_q && !cpfl_vport-
> >p2p_manual_bind) {
> + err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
> + i - cpfl_vport-
> >nb_data_rxq,
> + true, true);
> + if (err)
> + PMD_DRV_LOG(ERR, "Failed to switch hairpin
> RX queue %u on",
> + i);
> + else
> + cpfl_rxq->base.q_started = true;
> + }
> + }
> +
> + if (!cpfl_vport->p2p_manual_bind &&
> + cpfl_vport->p2p_tx_complq != NULL &&
> + cpfl_vport->p2p_rx_bufq != NULL) {
> + err = cpfl_switch_hairpin_bufq_complq(cpfl_vport, true);
> + if (err != 0) {
> + PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx
> complq and Rx bufq");
> + return err;
> + }
> + }
> +
> return err;
> }
>
> diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c index
> 702054d1c5..38c48ad8c7 100644
> --- a/drivers/net/cpfl/cpfl_rxtx.c
> +++ b/drivers/net/cpfl/cpfl_rxtx.c
> @@ -991,6 +991,81 @@ cpfl_hairpin_txq_config(struct idpf_vport *vport,
> struct cpfl_tx_queue *cpfl_txq
> return idpf_vc_txq_config_by_info(vport, txq_info, 1); }
>
> +int
> +cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on)
> +{
> + struct idpf_vport *vport = &cpfl_vport->base;
> + uint32_t type;
> + int err, queue_id;
> +
> + type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
> + queue_id = cpfl_vport->p2p_tx_complq->queue_id;
> + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> + if (err)
> + return err;
> +
> + type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
> + queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
> + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> +
> + return err;
> +}
> +
> +int
> +cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t
> logic_qid,
> + bool rx, bool on)
> +{
> + struct idpf_vport *vport = &cpfl_vport->base;
> + uint32_t type;
> + int err, queue_id;
> +
> + type = rx ? VIRTCHNL2_QUEUE_TYPE_RX :
> VIRTCHNL2_QUEUE_TYPE_TX;
> +
> + if (type == VIRTCHNL2_QUEUE_TYPE_RX)
> + queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info-
> >rx_start_qid, logic_qid);
> + else
> + queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info-
> >tx_start_qid, logic_qid);
> + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> + if (err)
> + return err;
> +
> + return err;
> +}
> +
> +static int
> +cpfl_alloc_split_p2p_rxq_mbufs(struct idpf_rx_queue *rxq) {
> + volatile struct virtchnl2_p2p_rx_buf_desc *rxd;
> + struct rte_mbuf *mbuf = NULL;
> + uint64_t dma_addr;
> + uint16_t i;
> +
> + for (i = 0; i < rxq->nb_rx_desc; i++) {
> + mbuf = rte_mbuf_raw_alloc(rxq->mp);
> + if (unlikely(!mbuf)) {
> + PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
> + return -ENOMEM;
> + }
> +
> + rte_mbuf_refcnt_set(mbuf, 1);
> + mbuf->next = NULL;
> + mbuf->data_off = RTE_PKTMBUF_HEADROOM;
> + mbuf->nb_segs = 1;
> + mbuf->port = rxq->port_id;
> + dma_addr =
> rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
> +
> + rxd = &((volatile struct virtchnl2_p2p_rx_buf_desc *)(rxq-
> >rx_ring))[i];
> + rxd->reserve0 = 0;
> + rxd->pkt_addr = dma_addr;
> + }
> +
> + rxq->nb_rx_hold = 0;
> + /* The value written in the RX buffer queue tail register, must be a
> multiple of 8.*/
> + rxq->rx_tail = rxq->nb_rx_desc - CPFL_HAIRPIN_Q_TAIL_AUX_VALUE;
> +
> + return 0;
> +}
> +
> int
> cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id) { @@ -
> 1044,22 +1119,31 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t
> rx_queue_id)
> IDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
> } else {
> /* Split queue */
> - err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
> - if (err != 0) {
> - PMD_DRV_LOG(ERR, "Failed to allocate RX buffer
> queue mbuf");
> - return err;
> - }
> - err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
> - if (err != 0) {
> - PMD_DRV_LOG(ERR, "Failed to allocate RX buffer
> queue mbuf");
> - return err;
> + if (cpfl_rxq->hairpin_info.hairpin_q) {
> + err = cpfl_alloc_split_p2p_rxq_mbufs(rxq->bufq1);
> + if (err != 0) {
> + PMD_DRV_LOG(ERR, "Failed to allocate p2p RX
> buffer queue mbuf");
> + return err;
> + }
> + } else {
> + err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
> + if (err != 0) {
> + PMD_DRV_LOG(ERR, "Failed to allocate RX
> buffer queue mbuf");
> + return err;
> + }
> + err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
> + if (err != 0) {
> + PMD_DRV_LOG(ERR, "Failed to allocate RX
> buffer queue mbuf");
> + return err;
> + }
> }
>
> rte_wmb();
>
> /* Init the RX tail register. */
> IDPF_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->bufq1-
> >rx_tail);
> - IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2-
> >rx_tail);
> + if (rxq->bufq2)
> + IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq-
> >bufq2->rx_tail);
> }
>
> return err;
> @@ -1166,7 +1250,12 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev,
> uint16_t rx_queue_id)
> return -EINVAL;
>
> cpfl_rxq = dev->data->rx_queues[rx_queue_id];
> - err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
> + if (cpfl_rxq->hairpin_info.hairpin_q)
> + err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
> + rx_queue_id - cpfl_vport-
> >nb_data_txq,
> + true, false);
> + else
> + err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
> if (err != 0) {
> PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
> rx_queue_id);
> @@ -1180,10 +1269,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev,
> uint16_t rx_queue_id)
> idpf_qc_single_rx_queue_reset(rxq);
> } else {
> rxq->bufq1->ops->release_mbufs(rxq->bufq1);
> - rxq->bufq2->ops->release_mbufs(rxq->bufq2);
> - idpf_qc_split_rx_queue_reset(rxq);
> + if (rxq->bufq2)
> + rxq->bufq2->ops->release_mbufs(rxq->bufq2);
> + if (cpfl_rxq->hairpin_info.hairpin_q) {
> + cpfl_rx_hairpin_descq_reset(rxq);
> + cpfl_rx_hairpin_bufq_reset(rxq->bufq1);
> + } else {
> + idpf_qc_split_rx_queue_reset(rxq);
> + }
> }
> - dev->data->rx_queue_state[rx_queue_id] =
> RTE_ETH_QUEUE_STATE_STOPPED;
> + if (!cpfl_rxq->hairpin_info.hairpin_q)
> + dev->data->rx_queue_state[rx_queue_id] =
> RTE_ETH_QUEUE_STATE_STOPPED;
>
> return 0;
> }
> @@ -1202,7 +1298,12 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev,
> uint16_t tx_queue_id)
>
> cpfl_txq = dev->data->tx_queues[tx_queue_id];
>
> - err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
> + if (cpfl_txq->hairpin_info.hairpin_q)
> + err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
> + tx_queue_id - cpfl_vport-
> >nb_data_txq,
> + false, false);
> + else
> + err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
> if (err != 0) {
> PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
> tx_queue_id);
> @@ -1215,10 +1316,17 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev,
> uint16_t tx_queue_id)
> if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> idpf_qc_single_tx_queue_reset(txq);
> } else {
> - idpf_qc_split_tx_descq_reset(txq);
> - idpf_qc_split_tx_complq_reset(txq->complq);
> + if (cpfl_txq->hairpin_info.hairpin_q) {
> + cpfl_tx_hairpin_descq_reset(txq);
> + cpfl_tx_hairpin_complq_reset(txq->complq);
> + } else {
> + idpf_qc_split_tx_descq_reset(txq);
> + idpf_qc_split_tx_complq_reset(txq->complq);
> + }
> }
> - dev->data->tx_queue_state[tx_queue_id] =
> RTE_ETH_QUEUE_STATE_STOPPED;
> +
> + if (!cpfl_txq->hairpin_info.hairpin_q)
> + dev->data->tx_queue_state[tx_queue_id] =
> RTE_ETH_QUEUE_STATE_STOPPED;
>
> return 0;
> }
> @@ -1238,10 +1346,17 @@ cpfl_dev_tx_queue_release(struct rte_eth_dev
> *dev, uint16_t qid) void cpfl_stop_queues(struct rte_eth_dev *dev) {
> + struct cpfl_vport *cpfl_vport =
> + (struct cpfl_vport *)dev->data->dev_private;
> struct cpfl_rx_queue *cpfl_rxq;
> struct cpfl_tx_queue *cpfl_txq;
> int i;
>
> + if (cpfl_vport->p2p_rx_bufq != NULL) {
> + if (cpfl_switch_hairpin_bufq_complq(cpfl_vport, false) != 0)
> + PMD_DRV_LOG(ERR, "Failed to stop hairpin Tx complq
> and Rx bufq");
> + }
> +
> for (i = 0; i < dev->data->nb_rx_queues; i++) {
> cpfl_rxq = dev->data->rx_queues[i];
> if (cpfl_rxq == NULL)
> diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h index
> 872ebc1bfd..42dfd07155 100644
> --- a/drivers/net/cpfl/cpfl_rxtx.h
> +++ b/drivers/net/cpfl/cpfl_rxtx.h
> @@ -41,6 +41,17 @@
>
> #define CPFL_RX_BUF_STRIDE 64
>
> +/* The value written in the RX buffer queue tail register,
> + * and in WritePTR field in the TX completion queue context,
> + * must be a multiple of 8.
> + */
> +#define CPFL_HAIRPIN_Q_TAIL_AUX_VALUE 8
> +
> +struct virtchnl2_p2p_rx_buf_desc {
> + __le64 reserve0;
> + __le64 pkt_addr; /* Packet buffer address */ };
> +
> struct cpfl_rxq_hairpin_info {
> bool hairpin_q; /* if rx queue is a hairpin queue */
> uint16_t peer_txp;
> @@ -102,4 +113,7 @@ int cpfl_hairpin_tx_complq_config(struct cpfl_vport
> *cpfl_vport); int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct
> cpfl_tx_queue *cpfl_txq); int cpfl_hairpin_rx_bufq_config(struct cpfl_vport
> *cpfl_vport); int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct
> cpfl_rx_queue *cpfl_rxq);
> +int cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool
> +on); int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t
> qid,
> + bool rx, bool on);
> #endif /* _CPFL_RXTX_H_ */
> --
> 2.26.2
> -----Original Message-----
> From: Liu, Mingxia <mingxia.liu@intel.com>
> Sent: Tuesday, May 30, 2023 11:31 AM
> To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Wang, Xiao W <xiao.w.wang@intel.com>
> Subject: RE: [PATCH v4 09/13] net/cpfl: support hairpin queue start/stop
>
>
>
> > -----Original Message-----
> > From: Xing, Beilei <beilei.xing@intel.com>
> > Sent: Friday, May 26, 2023 3:39 PM
> > To: Wu, Jingjing <jingjing.wu@intel.com>
> > Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> > <beilei.xing@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> > Subject: [PATCH v4 09/13] net/cpfl: support hairpin queue start/stop
> >
> > From: Beilei Xing <beilei.xing@intel.com>
> >
> > This patch supports Rx/Tx hairpin queue start/stop.
> >
> > Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> > Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> > drivers/net/cpfl/cpfl_ethdev.c | 41 +++++++++
> > drivers/net/cpfl/cpfl_rxtx.c | 151 +++++++++++++++++++++++++++++----
> > drivers/net/cpfl/cpfl_rxtx.h | 14 +++
> > 3 files changed, 188 insertions(+), 18 deletions(-)
> >
> > diff --git a/drivers/net/cpfl/cpfl_ethdev.c
> > b/drivers/net/cpfl/cpfl_ethdev.c index
> > a06def06d0..8035878602 100644
> > --- a/drivers/net/cpfl/cpfl_ethdev.c
> > +++ b/drivers/net/cpfl/cpfl_ethdev.c
> > @@ -896,6 +896,47 @@ cpfl_start_queues(struct rte_eth_dev *dev)
> > }
> > }
> >
> > + /* For non-manual bind hairpin queues, enable Tx queue and Rx queue,
> > + * then enable Tx completion queue and Rx buffer queue.
> > + */
> > + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> [Liu, Mingxia] Better to use for (i = cpfl_tx_vport->nb_data_txq; i < dev->data-
> >nb_tx_queues; i++), because when i < cpfl_tx_vport->nb_data_txq, (cpfl_txq-
> >hairpin_info.hairpin_q && !cpfl_vport-
> > >p2p_manual_bind) must be false, or (i - cpfl_vport->nb_data_txq) will < 0.
>
> > + cpfl_txq = dev->data->tx_queues[i];
> > + if (cpfl_txq->hairpin_info.hairpin_q && !cpfl_vport-
> > >p2p_manual_bind) {
> > + err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
> > + i - cpfl_vport-
> > >nb_data_txq,
> > + false, true);
> > + if (err)
> > + PMD_DRV_LOG(ERR, "Failed to switch hairpin
> > TX queue %u on",
> > + i);
> > + else
> > + cpfl_txq->base.q_started = true;
> > + }
> > + }
> > +
> > + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> [Liu, Mingxia] Better to use for (i = cpfl_rx_vport->nb_data_rxq; i < dev->data-
> >nb_rx_queues; i++), because when i < cpfl_rx_vport->nb_data_rxq, (cpfl_txq-
> >hairpin_info.hairpin_q && !cpfl_vport-
> > >p2p_manual_bind) must be false, or (i - cpfl_vport->nb_data_rxq) will < 0.
Make sense.
>
> > + cpfl_rxq = dev->data->rx_queues[i];
> > + if (cpfl_rxq->hairpin_info.hairpin_q && !cpfl_vport-
> > >p2p_manual_bind) {
> > + err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
> > + i - cpfl_vport-
> > >nb_data_rxq,
> > + true, true);
> > + if (err)
> > + PMD_DRV_LOG(ERR, "Failed to switch hairpin
> > RX queue %u on",
> > + i);
> > + else
> > + cpfl_rxq->base.q_started = true;
> > + }
> > + }
> > +
> > + if (!cpfl_vport->p2p_manual_bind &&
> > + cpfl_vport->p2p_tx_complq != NULL &&
> > + cpfl_vport->p2p_rx_bufq != NULL) {
> > + err = cpfl_switch_hairpin_bufq_complq(cpfl_vport, true);
> > + if (err != 0) {
> > + PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx
> > complq and Rx bufq");
> > + return err;
> > + }
> > + }
> > +
> > return err;
> > }
> >
> > diff --git a/drivers/net/cpfl/cpfl_rxtx.c
> > b/drivers/net/cpfl/cpfl_rxtx.c index
> > 702054d1c5..38c48ad8c7 100644
> > --- a/drivers/net/cpfl/cpfl_rxtx.c
> > +++ b/drivers/net/cpfl/cpfl_rxtx.c
> > @@ -991,6 +991,81 @@ cpfl_hairpin_txq_config(struct idpf_vport *vport,
> > struct cpfl_tx_queue *cpfl_txq
> > return idpf_vc_txq_config_by_info(vport, txq_info, 1); }
> >
> > +int
> > +cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool
> > +on) {
> > + struct idpf_vport *vport = &cpfl_vport->base;
> > + uint32_t type;
> > + int err, queue_id;
> > +
> > + type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
> > + queue_id = cpfl_vport->p2p_tx_complq->queue_id;
> > + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> > + if (err)
> > + return err;
> > +
> > + type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
> > + queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
> > + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> > +
> > + return err;
> > +}
> > +
> > +int
> > +cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport,
> > +uint16_t
> > logic_qid,
> > + bool rx, bool on)
> > +{
> > + struct idpf_vport *vport = &cpfl_vport->base;
> > + uint32_t type;
> > + int err, queue_id;
> > +
> > + type = rx ? VIRTCHNL2_QUEUE_TYPE_RX :
> > VIRTCHNL2_QUEUE_TYPE_TX;
> > +
> > + if (type == VIRTCHNL2_QUEUE_TYPE_RX)
> > + queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info-
> > >rx_start_qid, logic_qid);
> > + else
> > + queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info-
> > >tx_start_qid, logic_qid);
> > + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> > + if (err)
> > + return err;
> > +
> > + return err;
> > +}
> > +
> > +static int
> > +cpfl_alloc_split_p2p_rxq_mbufs(struct idpf_rx_queue *rxq) {
> > + volatile struct virtchnl2_p2p_rx_buf_desc *rxd;
> > + struct rte_mbuf *mbuf = NULL;
> > + uint64_t dma_addr;
> > + uint16_t i;
> > +
> > + for (i = 0; i < rxq->nb_rx_desc; i++) {
> > + mbuf = rte_mbuf_raw_alloc(rxq->mp);
> > + if (unlikely(!mbuf)) {
> > + PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
> > + return -ENOMEM;
> > + }
> > +
> > + rte_mbuf_refcnt_set(mbuf, 1);
> > + mbuf->next = NULL;
> > + mbuf->data_off = RTE_PKTMBUF_HEADROOM;
> > + mbuf->nb_segs = 1;
> > + mbuf->port = rxq->port_id;
> > + dma_addr =
> > rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
> > +
> > + rxd = &((volatile struct virtchnl2_p2p_rx_buf_desc *)(rxq-
> > >rx_ring))[i];
> > + rxd->reserve0 = 0;
> > + rxd->pkt_addr = dma_addr;
> > + }
> > +
> > + rxq->nb_rx_hold = 0;
> > + /* The value written in the RX buffer queue tail register, must be a
> > multiple of 8.*/
> > + rxq->rx_tail = rxq->nb_rx_desc - CPFL_HAIRPIN_Q_TAIL_AUX_VALUE;
> > +
> > + return 0;
> > +}
> > +
> > int
> > cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id) {
> > @@ -
> > 1044,22 +1119,31 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev,
> > uint16_t
> > rx_queue_id)
> > IDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
> > } else {
> > /* Split queue */
> > - err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
> > - if (err != 0) {
> > - PMD_DRV_LOG(ERR, "Failed to allocate RX buffer
> > queue mbuf");
> > - return err;
> > - }
> > - err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
> > - if (err != 0) {
> > - PMD_DRV_LOG(ERR, "Failed to allocate RX buffer
> > queue mbuf");
> > - return err;
> > + if (cpfl_rxq->hairpin_info.hairpin_q) {
> > + err = cpfl_alloc_split_p2p_rxq_mbufs(rxq->bufq1);
> > + if (err != 0) {
> > + PMD_DRV_LOG(ERR, "Failed to allocate p2p
> RX
> > buffer queue mbuf");
> > + return err;
> > + }
> > + } else {
> > + err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
> > + if (err != 0) {
> > + PMD_DRV_LOG(ERR, "Failed to allocate RX
> > buffer queue mbuf");
> > + return err;
> > + }
> > + err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
> > + if (err != 0) {
> > + PMD_DRV_LOG(ERR, "Failed to allocate RX
> > buffer queue mbuf");
> > + return err;
> > + }
> > }
> >
> > rte_wmb();
> >
> > /* Init the RX tail register. */
> > IDPF_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->bufq1-
> > >rx_tail);
> > - IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2-
> > >rx_tail);
> > + if (rxq->bufq2)
> > + IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq-
> > >bufq2->rx_tail);
> > }
> >
> > return err;
> > @@ -1166,7 +1250,12 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev,
> > uint16_t rx_queue_id)
> > return -EINVAL;
> >
> > cpfl_rxq = dev->data->rx_queues[rx_queue_id];
> > - err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
> > + if (cpfl_rxq->hairpin_info.hairpin_q)
> > + err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
> > + rx_queue_id - cpfl_vport-
> > >nb_data_txq,
> > + true, false);
> > + else
> > + err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
> > if (err != 0) {
> > PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
> > rx_queue_id);
> > @@ -1180,10 +1269,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev,
> > uint16_t rx_queue_id)
> > idpf_qc_single_rx_queue_reset(rxq);
> > } else {
> > rxq->bufq1->ops->release_mbufs(rxq->bufq1);
> > - rxq->bufq2->ops->release_mbufs(rxq->bufq2);
> > - idpf_qc_split_rx_queue_reset(rxq);
> > + if (rxq->bufq2)
> > + rxq->bufq2->ops->release_mbufs(rxq->bufq2);
> > + if (cpfl_rxq->hairpin_info.hairpin_q) {
> > + cpfl_rx_hairpin_descq_reset(rxq);
> > + cpfl_rx_hairpin_bufq_reset(rxq->bufq1);
> > + } else {
> > + idpf_qc_split_rx_queue_reset(rxq);
> > + }
> > }
> > - dev->data->rx_queue_state[rx_queue_id] =
> > RTE_ETH_QUEUE_STATE_STOPPED;
> > + if (!cpfl_rxq->hairpin_info.hairpin_q)
> > + dev->data->rx_queue_state[rx_queue_id] =
> > RTE_ETH_QUEUE_STATE_STOPPED;
> >
> > return 0;
> > }
> > @@ -1202,7 +1298,12 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev,
> > uint16_t tx_queue_id)
> >
> > cpfl_txq = dev->data->tx_queues[tx_queue_id];
> >
> > - err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
> > + if (cpfl_txq->hairpin_info.hairpin_q)
> > + err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
> > + tx_queue_id - cpfl_vport-
> > >nb_data_txq,
> > + false, false);
> > + else
> > + err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
> > if (err != 0) {
> > PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
> > tx_queue_id);
> > @@ -1215,10 +1316,17 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev,
> > uint16_t tx_queue_id)
> > if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> > idpf_qc_single_tx_queue_reset(txq);
> > } else {
> > - idpf_qc_split_tx_descq_reset(txq);
> > - idpf_qc_split_tx_complq_reset(txq->complq);
> > + if (cpfl_txq->hairpin_info.hairpin_q) {
> > + cpfl_tx_hairpin_descq_reset(txq);
> > + cpfl_tx_hairpin_complq_reset(txq->complq);
> > + } else {
> > + idpf_qc_split_tx_descq_reset(txq);
> > + idpf_qc_split_tx_complq_reset(txq->complq);
> > + }
> > }
> > - dev->data->tx_queue_state[tx_queue_id] =
> > RTE_ETH_QUEUE_STATE_STOPPED;
> > +
> > + if (!cpfl_txq->hairpin_info.hairpin_q)
> > + dev->data->tx_queue_state[tx_queue_id] =
> > RTE_ETH_QUEUE_STATE_STOPPED;
> >
> > return 0;
> > }
> > @@ -1238,10 +1346,17 @@ cpfl_dev_tx_queue_release(struct rte_eth_dev
> > *dev, uint16_t qid) void cpfl_stop_queues(struct rte_eth_dev *dev)
> > {
> > + struct cpfl_vport *cpfl_vport =
> > + (struct cpfl_vport *)dev->data->dev_private;
> > struct cpfl_rx_queue *cpfl_rxq;
> > struct cpfl_tx_queue *cpfl_txq;
> > int i;
> >
> > + if (cpfl_vport->p2p_rx_bufq != NULL) {
> > + if (cpfl_switch_hairpin_bufq_complq(cpfl_vport, false) != 0)
> > + PMD_DRV_LOG(ERR, "Failed to stop hairpin Tx complq
> > and Rx bufq");
> > + }
> > +
> > for (i = 0; i < dev->data->nb_rx_queues; i++) {
> > cpfl_rxq = dev->data->rx_queues[i];
> > if (cpfl_rxq == NULL)
> > diff --git a/drivers/net/cpfl/cpfl_rxtx.h
> > b/drivers/net/cpfl/cpfl_rxtx.h index
> > 872ebc1bfd..42dfd07155 100644
> > --- a/drivers/net/cpfl/cpfl_rxtx.h
> > +++ b/drivers/net/cpfl/cpfl_rxtx.h
> > @@ -41,6 +41,17 @@
> >
> > #define CPFL_RX_BUF_STRIDE 64
> >
> > +/* The value written in the RX buffer queue tail register,
> > + * and in WritePTR field in the TX completion queue context,
> > + * must be a multiple of 8.
> > + */
> > +#define CPFL_HAIRPIN_Q_TAIL_AUX_VALUE 8
> > +
> > +struct virtchnl2_p2p_rx_buf_desc {
> > + __le64 reserve0;
> > + __le64 pkt_addr; /* Packet buffer address */ };
> > +
> > struct cpfl_rxq_hairpin_info {
> > bool hairpin_q; /* if rx queue is a hairpin queue */
> > uint16_t peer_txp;
> > @@ -102,4 +113,7 @@ int cpfl_hairpin_tx_complq_config(struct
> > cpfl_vport *cpfl_vport); int cpfl_hairpin_txq_config(struct
> > idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq); int
> > cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport); int
> > cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue
> > *cpfl_rxq);
> > +int cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport,
> > +bool on); int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport
> > +*cpfl_vport, uint16_t
> > qid,
> > + bool rx, bool on);
> > #endif /* _CPFL_RXTX_H_ */
> > --
> > 2.26.2
@@ -896,6 +896,47 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
}
+ /* For non-manual bind hairpin queues, enable Tx queue and Rx queue,
+ * then enable Tx completion queue and Rx buffer queue.
+ */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_txq,
+ false, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ else
+ cpfl_txq->base.q_started = true;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_rxq,
+ true, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ else
+ cpfl_rxq->base.q_started = true;
+ }
+ }
+
+ if (!cpfl_vport->p2p_manual_bind &&
+ cpfl_vport->p2p_tx_complq != NULL &&
+ cpfl_vport->p2p_rx_bufq != NULL) {
+ err = cpfl_switch_hairpin_bufq_complq(cpfl_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq and Rx bufq");
+ return err;
+ }
+ }
+
return err;
}
@@ -991,6 +991,81 @@ cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq
return idpf_vc_txq_config_by_info(vport, txq_info, 1);
}
+int
+cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ queue_id = cpfl_vport->p2p_tx_complq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+ if (err)
+ return err;
+
+ type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t logic_qid,
+ bool rx, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;
+
+ if (type == VIRTCHNL2_QUEUE_TYPE_RX)
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid, logic_qid);
+ else
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_start_qid, logic_qid);
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int
+cpfl_alloc_split_p2p_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+ volatile struct virtchnl2_p2p_rx_buf_desc *rxd;
+ struct rte_mbuf *mbuf = NULL;
+ uint64_t dma_addr;
+ uint16_t i;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!mbuf)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ rxd = &((volatile struct virtchnl2_p2p_rx_buf_desc *)(rxq->rx_ring))[i];
+ rxd->reserve0 = 0;
+ rxd->pkt_addr = dma_addr;
+ }
+
+ rxq->nb_rx_hold = 0;
+ /* The value written in the RX buffer queue tail register, must be a multiple of 8.*/
+ rxq->rx_tail = rxq->nb_rx_desc - CPFL_HAIRPIN_Q_TAIL_AUX_VALUE;
+
+ return 0;
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -1044,22 +1119,31 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
IDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
} else {
/* Split queue */
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
- }
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ err = cpfl_alloc_split_p2p_rxq_mbufs(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate p2p RX buffer queue mbuf");
+ return err;
+ }
+ } else {
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
}
rte_wmb();
/* Init the RX tail register. */
IDPF_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->bufq1->rx_tail);
- IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
+ if (rxq->bufq2)
+ IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
}
return err;
@@ -1166,7 +1250,12 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return -EINVAL;
cpfl_rxq = dev->data->rx_queues[rx_queue_id];
- err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ rx_queue_id - cpfl_vport->nb_data_txq,
+ true, false);
+ else
+ err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
rx_queue_id);
@@ -1180,10 +1269,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
idpf_qc_single_rx_queue_reset(rxq);
} else {
rxq->bufq1->ops->release_mbufs(rxq->bufq1);
- rxq->bufq2->ops->release_mbufs(rxq->bufq2);
- idpf_qc_split_rx_queue_reset(rxq);
+ if (rxq->bufq2)
+ rxq->bufq2->ops->release_mbufs(rxq->bufq2);
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ cpfl_rx_hairpin_descq_reset(rxq);
+ cpfl_rx_hairpin_bufq_reset(rxq->bufq1);
+ } else {
+ idpf_qc_split_rx_queue_reset(rxq);
+ }
}
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ if (!cpfl_rxq->hairpin_info.hairpin_q)
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1202,7 +1298,12 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
cpfl_txq = dev->data->tx_queues[tx_queue_id];
- err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
+ if (cpfl_txq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ tx_queue_id - cpfl_vport->nb_data_txq,
+ false, false);
+ else
+ err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
tx_queue_id);
@@ -1215,10 +1316,17 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
idpf_qc_single_tx_queue_reset(txq);
} else {
- idpf_qc_split_tx_descq_reset(txq);
- idpf_qc_split_tx_complq_reset(txq->complq);
+ if (cpfl_txq->hairpin_info.hairpin_q) {
+ cpfl_tx_hairpin_descq_reset(txq);
+ cpfl_tx_hairpin_complq_reset(txq->complq);
+ } else {
+ idpf_qc_split_tx_descq_reset(txq);
+ idpf_qc_split_tx_complq_reset(txq->complq);
+ }
}
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ if (!cpfl_txq->hairpin_info.hairpin_q)
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1238,10 +1346,17 @@ cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
void
cpfl_stop_queues(struct rte_eth_dev *dev)
{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
int i;
+ if (cpfl_vport->p2p_rx_bufq != NULL) {
+ if (cpfl_switch_hairpin_bufq_complq(cpfl_vport, false) != 0)
+ PMD_DRV_LOG(ERR, "Failed to stop hairpin Tx complq and Rx bufq");
+ }
+
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
if (cpfl_rxq == NULL)
@@ -41,6 +41,17 @@
#define CPFL_RX_BUF_STRIDE 64
+/* The value written in the RX buffer queue tail register,
+ * and in WritePTR field in the TX completion queue context,
+ * must be a multiple of 8.
+ */
+#define CPFL_HAIRPIN_Q_TAIL_AUX_VALUE 8
+
+struct virtchnl2_p2p_rx_buf_desc {
+ __le64 reserve0;
+ __le64 pkt_addr; /* Packet buffer address */
+};
+
struct cpfl_rxq_hairpin_info {
bool hairpin_q; /* if rx queue is a hairpin queue */
uint16_t peer_txp;
@@ -102,4 +113,7 @@ int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq);
int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
+int cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t qid,
+ bool rx, bool on);
#endif /* _CPFL_RXTX_H_ */