[v11,05/18] net/idpf: add support for device start and stop
Checks
Commit Message
Add dev ops dev_start, dev_stop and link_update.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
drivers/net/idpf/idpf_ethdev.c | 89 ++++++++++++++++++++++++++++++++++
drivers/net/idpf/idpf_ethdev.h | 5 ++
2 files changed, 94 insertions(+)
Comments
On 10/24/22 16:12, Junfeng Guo wrote:
> Add dev ops dev_start, dev_stop and link_update.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
> Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
> ---
> drivers/net/idpf/idpf_ethdev.c | 89 ++++++++++++++++++++++++++++++++++
> drivers/net/idpf/idpf_ethdev.h | 5 ++
> 2 files changed, 94 insertions(+)
>
> diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
> index 1d2075f466..4c7a2d0748 100644
> --- a/drivers/net/idpf/idpf_ethdev.c
> +++ b/drivers/net/idpf/idpf_ethdev.c
> @@ -29,17 +29,42 @@ static const char * const idpf_valid_args[] = {
> };
>
> static int idpf_dev_configure(struct rte_eth_dev *dev);
> +static int idpf_dev_start(struct rte_eth_dev *dev);
> +static int idpf_dev_stop(struct rte_eth_dev *dev);
> static int idpf_dev_close(struct rte_eth_dev *dev);
> static int idpf_dev_info_get(struct rte_eth_dev *dev,
> struct rte_eth_dev_info *dev_info);
> static void idpf_adapter_rel(struct idpf_adapter *adapter);
>
> +int
> +idpf_dev_link_update(struct rte_eth_dev *dev,
> + __rte_unused int wait_to_complete)
Why is it global? IMHO it should be static now and should be
made global later if you really need it.
> +{
> + struct idpf_vport *vport = dev->data->dev_private;
> + struct rte_eth_link new_link;
> +
> + memset(&new_link, 0, sizeof(new_link));
> +
> + new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
> +
> + new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
> + new_link.link_status = vport->link_up ? RTE_ETH_LINK_UP :
> + RTE_ETH_LINK_DOWN;
> + new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
> + RTE_ETH_LINK_SPEED_FIXED);
> +
> + return rte_eth_linkstatus_set(dev, &new_link);
> +}
> +
> static const struct eth_dev_ops idpf_eth_dev_ops = {
> .dev_configure = idpf_dev_configure,
> + .dev_start = idpf_dev_start,
> + .dev_stop = idpf_dev_stop,
> .dev_close = idpf_dev_close,
> .rx_queue_setup = idpf_rx_queue_setup,
> .tx_queue_setup = idpf_tx_queue_setup,
> .dev_infos_get = idpf_dev_info_get,
> + .link_update = idpf_dev_link_update,
> };
>
> static int
> @@ -233,6 +258,70 @@ idpf_dev_configure(struct rte_eth_dev *dev)
> return 0;
> }
>
> +static int
> +idpf_start_queues(struct rte_eth_dev *dev)
> +{
> + struct idpf_rx_queue *rxq;
> + struct idpf_tx_queue *txq;
> + int err = 0;
> + int i;
> +
> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> + txq = dev->data->tx_queues[i];
> + if (txq == NULL || txq->tx_deferred_start)
> + continue;
> +
> + PMD_DRV_LOG(ERR, "Start Tx queues not supported yet");
> + return -ENOTSUP;
> + }
> +
> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> + rxq = dev->data->rx_queues[i];
> + if (rxq == NULL || rxq->rx_deferred_start)
> + continue;
> +
> + PMD_DRV_LOG(ERR, "Start Rx queues not supported yet");
> + return -ENOTSUP;
> + }
> +
> + return err;
> +}
> +
> +static int
> +idpf_dev_start(struct rte_eth_dev *dev)
> +{
> + struct idpf_vport *vport = dev->data->dev_private;
> +
> + if (dev->data->mtu > vport->max_mtu) {
> + PMD_DRV_LOG(ERR, "MTU should be less than %d", vport->max_mtu);
> + return -1;
> + }
> +
> + vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD;
> +
> + if (idpf_start_queues(dev) != 0) {
> + PMD_DRV_LOG(ERR, "Failed to start queues");
> + return -1;
> + }
> +
> + if (idpf_vc_ena_dis_vport(vport, true) != 0) {
> + PMD_DRV_LOG(ERR, "Failed to enable vport");
Don't you need to stop queues here?
> + return -1;
> + }
> +
> + return 0;
> +}
> +
> +static int
> +idpf_dev_stop(struct rte_eth_dev *dev)
> +{
> + struct idpf_vport *vport = dev->data->dev_private;
Stop queues?
> +
> + idpf_vc_ena_dis_vport(vport, false);
> +
> + return 0;
> +}
> +
> static int
> idpf_dev_close(struct rte_eth_dev *dev)
> {
> diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
> index c0ae801fd5..070531cc48 100644
> --- a/drivers/net/idpf/idpf_ethdev.h
> +++ b/drivers/net/idpf/idpf_ethdev.h
> @@ -105,6 +105,9 @@ struct idpf_vport {
> /* Chunk info */
> struct idpf_chunks_info chunks_info;
>
> + /* Event from ipf */
> + bool link_up;
> +
It is a dead code. Since it is read, but never written in the
patch.
> uint16_t devarg_id;
> };
>
> @@ -195,6 +198,8 @@ atomic_set_cmd(struct idpf_adapter *adapter, enum virtchnl_ops ops)
> }
>
> struct idpf_adapter *idpf_find_adapter(struct rte_pci_device *pci_dev);
> +int idpf_dev_link_update(struct rte_eth_dev *dev,
> + __rte_unused int wait_to_complete);
> void idpf_handle_virtchnl_msg(struct rte_eth_dev *dev);
> int idpf_vc_check_api_version(struct idpf_adapter *adapter);
> int idpf_vc_get_caps(struct idpf_adapter *adapter);
> -----Original Message-----
> From: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> Sent: Tuesday, October 25, 2022 5:50 PM
> To: Guo, Junfeng <junfeng.guo@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Li, Xiaoyun <xiaoyun.li@intel.com>
> Subject: Re: [PATCH v11 05/18] net/idpf: add support for device start and
> stop
>
> On 10/24/22 16:12, Junfeng Guo wrote:
> > Add dev ops dev_start, dev_stop and link_update.
> >
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
> > Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
> > ---
> > drivers/net/idpf/idpf_ethdev.c | 89
> ++++++++++++++++++++++++++++++++++
> > drivers/net/idpf/idpf_ethdev.h | 5 ++
> > 2 files changed, 94 insertions(+)
> >
> > diff --git a/drivers/net/idpf/idpf_ethdev.c
> > b/drivers/net/idpf/idpf_ethdev.c index 1d2075f466..4c7a2d0748 100644
> > --- a/drivers/net/idpf/idpf_ethdev.c
> > +++ b/drivers/net/idpf/idpf_ethdev.c
> > @@ -29,17 +29,42 @@ static const char * const idpf_valid_args[] = {
> > };
> > +static int
> > +idpf_start_queues(struct rte_eth_dev *dev) {
> > + struct idpf_rx_queue *rxq;
> > + struct idpf_tx_queue *txq;
> > + int err = 0;
> > + int i;
> > +
> > + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> > + txq = dev->data->tx_queues[i];
> > + if (txq == NULL || txq->tx_deferred_start)
> > + continue;
> > +
> > + PMD_DRV_LOG(ERR, "Start Tx queues not supported yet");
> > + return -ENOTSUP;
> > + }
> > +
> > + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> > + rxq = dev->data->rx_queues[i];
> > + if (rxq == NULL || rxq->rx_deferred_start)
> > + continue;
> > +
> > + PMD_DRV_LOG(ERR, "Start Rx queues not supported yet");
> > + return -ENOTSUP;
> > + }
> > +
> > + return err;
> > +}
> > +
> > +static int
> > +idpf_dev_start(struct rte_eth_dev *dev) {
> > + struct idpf_vport *vport = dev->data->dev_private;
> > +
> > + if (dev->data->mtu > vport->max_mtu) {
> > + PMD_DRV_LOG(ERR, "MTU should be less than %d", vport-
> >max_mtu);
> > + return -1;
> > + }
> > +
> > + vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD;
> > +
> > + if (idpf_start_queues(dev) != 0) {
> > + PMD_DRV_LOG(ERR, "Failed to start queues");
> > + return -1;
> > + }
> > +
> > + if (idpf_vc_ena_dis_vport(vport, true) != 0) {
> > + PMD_DRV_LOG(ERR, "Failed to enable vport");
>
> Don't you need to stop queues here?
In this patch, we didn't implement start HW queues, so I will remove start queues here
And add start_queues/stop_queues once the APIs are finished.
>
> > + return -1;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +static int
> > +idpf_dev_stop(struct rte_eth_dev *dev) {
> > + struct idpf_vport *vport = dev->data->dev_private;
>
> Stop queues?
Same.
>
> > +
> > + idpf_vc_ena_dis_vport(vport, false);
> > +
> > + return 0;
> > +}
> > +
> > static int
> > idpf_dev_close(struct rte_eth_dev *dev)
> > {
@@ -29,17 +29,42 @@ static const char * const idpf_valid_args[] = {
};
static int idpf_dev_configure(struct rte_eth_dev *dev);
+static int idpf_dev_start(struct rte_eth_dev *dev);
+static int idpf_dev_stop(struct rte_eth_dev *dev);
static int idpf_dev_close(struct rte_eth_dev *dev);
static int idpf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static void idpf_adapter_rel(struct idpf_adapter *adapter);
+int
+idpf_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ struct idpf_vport *vport = dev->data->dev_private;
+ struct rte_eth_link new_link;
+
+ memset(&new_link, 0, sizeof(new_link));
+
+ new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+
+ new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ new_link.link_status = vport->link_up ? RTE_ETH_LINK_UP :
+ RTE_ETH_LINK_DOWN;
+ new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ RTE_ETH_LINK_SPEED_FIXED);
+
+ return rte_eth_linkstatus_set(dev, &new_link);
+}
+
static const struct eth_dev_ops idpf_eth_dev_ops = {
.dev_configure = idpf_dev_configure,
+ .dev_start = idpf_dev_start,
+ .dev_stop = idpf_dev_stop,
.dev_close = idpf_dev_close,
.rx_queue_setup = idpf_rx_queue_setup,
.tx_queue_setup = idpf_tx_queue_setup,
.dev_infos_get = idpf_dev_info_get,
+ .link_update = idpf_dev_link_update,
};
static int
@@ -233,6 +258,70 @@ idpf_dev_configure(struct rte_eth_dev *dev)
return 0;
}
+static int
+idpf_start_queues(struct rte_eth_dev *dev)
+{
+ struct idpf_rx_queue *rxq;
+ struct idpf_tx_queue *txq;
+ int err = 0;
+ int i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq == NULL || txq->tx_deferred_start)
+ continue;
+
+ PMD_DRV_LOG(ERR, "Start Tx queues not supported yet");
+ return -ENOTSUP;
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq == NULL || rxq->rx_deferred_start)
+ continue;
+
+ PMD_DRV_LOG(ERR, "Start Rx queues not supported yet");
+ return -ENOTSUP;
+ }
+
+ return err;
+}
+
+static int
+idpf_dev_start(struct rte_eth_dev *dev)
+{
+ struct idpf_vport *vport = dev->data->dev_private;
+
+ if (dev->data->mtu > vport->max_mtu) {
+ PMD_DRV_LOG(ERR, "MTU should be less than %d", vport->max_mtu);
+ return -1;
+ }
+
+ vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD;
+
+ if (idpf_start_queues(dev) != 0) {
+ PMD_DRV_LOG(ERR, "Failed to start queues");
+ return -1;
+ }
+
+ if (idpf_vc_ena_dis_vport(vport, true) != 0) {
+ PMD_DRV_LOG(ERR, "Failed to enable vport");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+idpf_dev_stop(struct rte_eth_dev *dev)
+{
+ struct idpf_vport *vport = dev->data->dev_private;
+
+ idpf_vc_ena_dis_vport(vport, false);
+
+ return 0;
+}
+
static int
idpf_dev_close(struct rte_eth_dev *dev)
{
@@ -105,6 +105,9 @@ struct idpf_vport {
/* Chunk info */
struct idpf_chunks_info chunks_info;
+ /* Event from ipf */
+ bool link_up;
+
uint16_t devarg_id;
};
@@ -195,6 +198,8 @@ atomic_set_cmd(struct idpf_adapter *adapter, enum virtchnl_ops ops)
}
struct idpf_adapter *idpf_find_adapter(struct rte_pci_device *pci_dev);
+int idpf_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete);
void idpf_handle_virtchnl_msg(struct rte_eth_dev *dev);
int idpf_vc_check_api_version(struct idpf_adapter *adapter);
int idpf_vc_get_caps(struct idpf_adapter *adapter);