[3/9] vdpa/ifc: add support to vDPA queue enable
Checks
Commit Message
This patch adds support to enabling and disabling
vrings on a per-vring granularity.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
drivers/vdpa/ifc/base/ifcvf.c | 9 +++++++++
drivers/vdpa/ifc/base/ifcvf.h | 4 ++++
drivers/vdpa/ifc/ifcvf_vdpa.c | 23 ++++++++++++++++++++++-
3 files changed, 35 insertions(+), 1 deletion(-)
Comments
On 05/14, Maxime Coquelin wrote:
>This patch adds support to enabling and disabling
>vrings on a per-vring granularity.
>
>Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
>---
> drivers/vdpa/ifc/base/ifcvf.c | 9 +++++++++
> drivers/vdpa/ifc/base/ifcvf.h | 4 ++++
> drivers/vdpa/ifc/ifcvf_vdpa.c | 23 ++++++++++++++++++++++-
> 3 files changed, 35 insertions(+), 1 deletion(-)
>
>diff --git a/drivers/vdpa/ifc/base/ifcvf.c b/drivers/vdpa/ifc/base/ifcvf.c
>index 3c0b2dff66..dd4e7468ae 100644
>--- a/drivers/vdpa/ifc/base/ifcvf.c
>+++ b/drivers/vdpa/ifc/base/ifcvf.c
>@@ -327,3 +327,12 @@ ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid)
> return (u8 *)hw->notify_addr[qid] -
> (u8 *)hw->mem_resource[hw->notify_region].addr;
> }
>+
>+void
>+ifcvf_queue_enable(struct ifcvf_hw *hw, u16 qid, u16 enable)
>+{
>+ struct ifcvf_pci_common_cfg *cfg = hw->common_cfg;
>+
>+ IFCVF_WRITE_REG16(qid, &cfg->queue_select);
>+ IFCVF_WRITE_REG16(enable, &cfg->queue_enable);
>+}
>diff --git a/drivers/vdpa/ifc/base/ifcvf.h b/drivers/vdpa/ifc/base/ifcvf.h
>index eb04a94067..bd85010eff 100644
>--- a/drivers/vdpa/ifc/base/ifcvf.h
>+++ b/drivers/vdpa/ifc/base/ifcvf.h
>@@ -159,4 +159,8 @@ ifcvf_get_notify_region(struct ifcvf_hw *hw);
> u64
> ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid);
>
>+void
>+ifcvf_queue_enable(struct ifcvf_hw *hw, u16 qid, u16 enable);
>+
>+
> #endif /* _IFCVF_H_ */
>diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c b/drivers/vdpa/ifc/ifcvf_vdpa.c
>index ec97178dcb..55ce0cf13d 100644
>--- a/drivers/vdpa/ifc/ifcvf_vdpa.c
>+++ b/drivers/vdpa/ifc/ifcvf_vdpa.c
>@@ -937,6 +937,27 @@ ifcvf_dev_close(int vid)
> return 0;
> }
>
>+static int
>+ifcvf_set_vring_state(int vid, int vring, int state)
>+{
>+ int did;
>+ struct internal_list *list;
>+ struct ifcvf_internal *internal;
>+
>+ did = rte_vhost_get_vdpa_device_id(vid);
>+ list = find_internal_resource_by_did(did);
>+ if (list == NULL) {
>+ DRV_LOG(ERR, "Invalid device id: %d", did);
>+ return -1;
>+ }
Do we need the sanity check for the vring as well?
Thanks,
Xiaolong
>+
>+ internal = list->internal;
>+
>+ ifcvf_queue_enable(&internal->hw, (uint16_t)vring, (uint16_t) state);
>+
>+ return 0;
>+}
>+
> static int
> ifcvf_set_features(int vid)
> {
>@@ -1086,7 +1107,7 @@ static struct rte_vdpa_dev_ops ifcvf_ops = {
> .get_protocol_features = ifcvf_get_protocol_features,
> .dev_conf = ifcvf_dev_config,
> .dev_close = ifcvf_dev_close,
>- .set_vring_state = NULL,
>+ .set_vring_state = ifcvf_set_vring_state,
> .set_features = ifcvf_set_features,
> .migration_done = NULL,
> .get_vfio_group_fd = ifcvf_get_vfio_group_fd,
>--
>2.25.4
>
On 2020/5/14 下午4:02, Maxime Coquelin wrote:
> This patch adds support to enabling and disabling
> vrings on a per-vring granularity.
>
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
A question here, I see in qemu peer_attach() may try to generate
VHOST_USER_SET_VRING_ENABLE, but just from the name I think it should
behave as queue_enable defined in virtio specification which is
explicitly under the control of guest?
(Note, in Cindy's vDPA series, we must invent new vhost_ops to differ
from this one).
Thanks
> ---
> drivers/vdpa/ifc/base/ifcvf.c | 9 +++++++++
> drivers/vdpa/ifc/base/ifcvf.h | 4 ++++
> drivers/vdpa/ifc/ifcvf_vdpa.c | 23 ++++++++++++++++++++++-
> 3 files changed, 35 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/vdpa/ifc/base/ifcvf.c b/drivers/vdpa/ifc/base/ifcvf.c
> index 3c0b2dff66..dd4e7468ae 100644
> --- a/drivers/vdpa/ifc/base/ifcvf.c
> +++ b/drivers/vdpa/ifc/base/ifcvf.c
> @@ -327,3 +327,12 @@ ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid)
> return (u8 *)hw->notify_addr[qid] -
> (u8 *)hw->mem_resource[hw->notify_region].addr;
> }
> +
> +void
> +ifcvf_queue_enable(struct ifcvf_hw *hw, u16 qid, u16 enable)
> +{
> + struct ifcvf_pci_common_cfg *cfg = hw->common_cfg;
> +
> + IFCVF_WRITE_REG16(qid, &cfg->queue_select);
> + IFCVF_WRITE_REG16(enable, &cfg->queue_enable);
> +}
> diff --git a/drivers/vdpa/ifc/base/ifcvf.h b/drivers/vdpa/ifc/base/ifcvf.h
> index eb04a94067..bd85010eff 100644
> --- a/drivers/vdpa/ifc/base/ifcvf.h
> +++ b/drivers/vdpa/ifc/base/ifcvf.h
> @@ -159,4 +159,8 @@ ifcvf_get_notify_region(struct ifcvf_hw *hw);
> u64
> ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid);
>
> +void
> +ifcvf_queue_enable(struct ifcvf_hw *hw, u16 qid, u16 enable);
> +
> +
> #endif /* _IFCVF_H_ */
> diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c b/drivers/vdpa/ifc/ifcvf_vdpa.c
> index ec97178dcb..55ce0cf13d 100644
> --- a/drivers/vdpa/ifc/ifcvf_vdpa.c
> +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c
> @@ -937,6 +937,27 @@ ifcvf_dev_close(int vid)
> return 0;
> }
>
> +static int
> +ifcvf_set_vring_state(int vid, int vring, int state)
> +{
> + int did;
> + struct internal_list *list;
> + struct ifcvf_internal *internal;
> +
> + did = rte_vhost_get_vdpa_device_id(vid);
> + list = find_internal_resource_by_did(did);
> + if (list == NULL) {
> + DRV_LOG(ERR, "Invalid device id: %d", did);
> + return -1;
> + }
> +
> + internal = list->internal;
> +
> + ifcvf_queue_enable(&internal->hw, (uint16_t)vring, (uint16_t) state);
> +
> + return 0;
> +}
> +
> static int
> ifcvf_set_features(int vid)
> {
> @@ -1086,7 +1107,7 @@ static struct rte_vdpa_dev_ops ifcvf_ops = {
> .get_protocol_features = ifcvf_get_protocol_features,
> .dev_conf = ifcvf_dev_config,
> .dev_close = ifcvf_dev_close,
> - .set_vring_state = NULL,
> + .set_vring_state = ifcvf_set_vring_state,
> .set_features = ifcvf_set_features,
> .migration_done = NULL,
> .get_vfio_group_fd = ifcvf_get_vfio_group_fd,
Hi,
Best Regards,
Xiao
> -----Original Message-----
> From: Jason Wang <jasowang@redhat.com>
> Sent: Friday, May 15, 2020 5:09 PM
> To: Maxime Coquelin <maxime.coquelin@redhat.com>; Ye, Xiaolong
> <xiaolong.ye@intel.com>; shahafs@mellanox.com; matan@mellanox.com;
> amorenoz@redhat.com; Wang, Xiao W <xiao.w.wang@intel.com>;
> viacheslavo@mellanox.com; dev@dpdk.org
> Cc: lulu@redhat.com
> Subject: Re: [PATCH 3/9] vdpa/ifc: add support to vDPA queue enable
>
>
> On 2020/5/14 下午4:02, Maxime Coquelin wrote:
> > This patch adds support to enabling and disabling
> > vrings on a per-vring granularity.
> >
> > Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com<mailto:maxime.coquelin@redhat.com>>
>
>
> A question here, I see in qemu peer_attach() may try to generate
> VHOST_USER_SET_VRING_ENABLE, but just from the name I think it should
> behave as queue_enable defined in virtio specification which is
> explicitly under the control of guest?
>
> (Note, in Cindy's vDPA series, we must invent new vhost_ops to differ
> from this one).
From my view, common_cfg.enable reg is used for registering a queue to hypervisor&vhost, but not ENABLE.
The control queue message VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET is for enable/disable queue pairs.
Think about when virtio net probes, all queues are selected and "enabled" by init_vqs(), but MQ is not enabled until virtnet_set_channels() by user config with "ethtool".
Based on this, below reg writing is not OK to enable MQ. IFC HW supports below registers for VF pass-thru case.
Actually, we have specific reg designed to enable MQ in VDPA case.
> > + IFCVF_WRITE_REG16(qid, &cfg->queue_select);
> > + IFCVF_WRITE_REG16(enable, &cfg->queue_enable);
BRs,
Xiao
>
> Thanks
>
>
> > ---
> > drivers/vdpa/ifc/base/ifcvf.c | 9 +++++++++
> > drivers/vdpa/ifc/base/ifcvf.h | 4 ++++
> > drivers/vdpa/ifc/ifcvf_vdpa.c | 23 ++++++++++++++++++++++-
> > 3 files changed, 35 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/vdpa/ifc/base/ifcvf.c b/drivers/vdpa/ifc/base/ifcvf.c
> > index 3c0b2dff66..dd4e7468ae 100644
> > --- a/drivers/vdpa/ifc/base/ifcvf.c
> > +++ b/drivers/vdpa/ifc/base/ifcvf.c
> > @@ -327,3 +327,12 @@ ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int
> qid)
> > return (u8 *)hw->notify_addr[qid] -
> > (u8 *)hw->mem_resource[hw->notify_region].addr;
> > }
> > +
> > +void
> > +ifcvf_queue_enable(struct ifcvf_hw *hw, u16 qid, u16 enable)
> > +{
> > + struct ifcvf_pci_common_cfg *cfg = hw->common_cfg;
> > +
> > + IFCVF_WRITE_REG16(qid, &cfg->queue_select);
> > + IFCVF_WRITE_REG16(enable, &cfg->queue_enable);
> > +}
> > diff --git a/drivers/vdpa/ifc/base/ifcvf.h b/drivers/vdpa/ifc/base/ifcvf.h
> > index eb04a94067..bd85010eff 100644
> > --- a/drivers/vdpa/ifc/base/ifcvf.h
> > +++ b/drivers/vdpa/ifc/base/ifcvf.h
> > @@ -159,4 +159,8 @@ ifcvf_get_notify_region(struct ifcvf_hw *hw);
> > u64
> > ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid);
> >
> > +void
> > +ifcvf_queue_enable(struct ifcvf_hw *hw, u16 qid, u16 enable);
> > +
> > +
> > #endif /* _IFCVF_H_ */
> > diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c b/drivers/vdpa/ifc/ifcvf_vdpa.c
> > index ec97178dcb..55ce0cf13d 100644
> > --- a/drivers/vdpa/ifc/ifcvf_vdpa.c
> > +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c
> > @@ -937,6 +937,27 @@ ifcvf_dev_close(int vid)
> > return 0;
> > }
> >
> > +static int
> > +ifcvf_set_vring_state(int vid, int vring, int state)
> > +{
> > + int did;
> > + struct internal_list *list;
> > + struct ifcvf_internal *internal;
> > +
> > + did = rte_vhost_get_vdpa_device_id(vid);
> > + list = find_internal_resource_by_did(did);
> > + if (list == NULL) {
> > + DRV_LOG(ERR, "Invalid device id: %d", did);
> > + return -1;
> > + }
> > +
> > + internal = list->internal;
> > +
> > + ifcvf_queue_enable(&internal->hw, (uint16_t)vring, (uint16_t) state);
> > +
> > + return 0;
> > +}
> > +
> > static int
> > ifcvf_set_features(int vid)
> > {
> > @@ -1086,7 +1107,7 @@ static struct rte_vdpa_dev_ops ifcvf_ops = {
> > .get_protocol_features = ifcvf_get_protocol_features,
> > .dev_conf = ifcvf_dev_config,
> > .dev_close = ifcvf_dev_close,
> > - .set_vring_state = NULL,
> > + .set_vring_state = ifcvf_set_vring_state,
> > .set_features = ifcvf_set_features,
> > .migration_done = NULL,
> > .get_vfio_group_fd = ifcvf_get_vfio_group_fd,
On 2020/5/15 下午5:42, Wang, Xiao W wrote:
>
> Hi,
>
> Best Regards,
>
> Xiao
>
> > -----Original Message-----
>
> > From: Jason Wang <jasowang@redhat.com>
>
> > Sent: Friday, May 15, 2020 5:09 PM
>
> > To: Maxime Coquelin <maxime.coquelin@redhat.com>; Ye, Xiaolong
>
> > <xiaolong.ye@intel.com>; shahafs@mellanox.com; matan@mellanox.com;
>
> > amorenoz@redhat.com; Wang, Xiao W <xiao.w.wang@intel.com>;
>
> > viacheslavo@mellanox.com; dev@dpdk.org
>
> > Cc: lulu@redhat.com
>
> > Subject: Re: [PATCH 3/9] vdpa/ifc: add support to vDPA queue enable
>
> >
>
> >
>
> > On 2020/5/14 下午4:02, Maxime Coquelin wrote:
>
> > > This patch adds support to enabling and disabling
>
> > > vrings on a per-vring granularity.
>
> > >
>
> > > Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com
> <mailto:maxime.coquelin@redhat.com>>
>
> >
>
> >
>
> > A question here, I see in qemu peer_attach() may try to generate
>
> > VHOST_USER_SET_VRING_ENABLE, but just from the name I think it should
>
> > behave as queue_enable defined in virtio specification which is
>
> > explicitly under the control of guest?
>
> >
>
> > (Note, in Cindy's vDPA series, we must invent new vhost_ops to differ
>
> > from this one).
>
> From my view, common_cfg.enable reg is used for registering a queue to
> hypervisor&vhost, but not ENABLE.
>
Well, what's your definition of "enable" in this context?
Spec said:
queue_enable
The driver uses this to selectively prevent the device from
executing requests from this virtqueue. 1 - enabled; 0 - disabled.
This means, if queue_enable is not set to 1, device can not execute
request for this specific virtqueue.
> The control queue message VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET is for
> enable/disable queue pairs.
>
But in qemu this is hooked to VHOST_USER_SET_VRING_ENABLE, see
peer_attach(). And this patch hook VHOST_USER_SET_VRING_ENABLE to
queue_enable.
This means IFCVF uses queue_enable instead of control vq or other
register for setting multiqueue stuff? My understanding is that IFCVF
has dedicated register to do this.
Note setting mq is different from queue_enable, changing the number of
queues should let the underlayer NIC to properly configure its
steering/switching/filtering logic to make sure traffic were only sent
to the queues that is set by driver.
So hooking VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET to qeue_enable looks wrong.
> Think about when virtio net probes, all queues are selected and
> "enabled" by init_vqs(),
>
I think we're talking about aligning the implementation with spec not
just make it work for some specific drivers. Driver may choose to not
enable a virtqueue by not setting 1 to queue_enable.
Thanks
> but MQ is not enabled until virtnet_set_channels() by user config with
> "ethtool".
>
> Based on this, below reg writing is not OK to enable MQ. IFC HW
> supports below registers for VF pass-thru case.
>
> Actually, we have specific reg designed to enable MQ in VDPA case.
>
> > > +IFCVF_WRITE_REG16(qid, &cfg->queue_select);
>
> > > +IFCVF_WRITE_REG16(enable, &cfg->queue_enable);
>
> BRs,
>
> Xiao
>
> >
>
> > Thanks
>
> >
>
> >
>
> > > ---
>
> > >drivers/vdpa/ifc/base/ifcvf.c |9 +++++++++
>
> > >drivers/vdpa/ifc/base/ifcvf.h |4 ++++
>
> > >drivers/vdpa/ifc/ifcvf_vdpa.c | 23 ++++++++++++++++++++++-
>
> > >3 files changed, 35 insertions(+), 1 deletion(-)
>
> > >
>
> > > diff --git a/drivers/vdpa/ifc/base/ifcvf.c
> b/drivers/vdpa/ifc/base/ifcvf.c
>
> > > index 3c0b2dff66..dd4e7468ae 100644
>
> > > --- a/drivers/vdpa/ifc/base/ifcvf.c
>
> > > +++ b/drivers/vdpa/ifc/base/ifcvf.c
>
> > > @@ -327,3 +327,12 @@ ifcvf_get_queue_notify_off(struct ifcvf_hw
> *hw, int
>
> > qid)
>
> > >return (u8 *)hw->notify_addr[qid] -
>
> > >(u8 *)hw->mem_resource[hw->notify_region].addr;
>
> > >}
>
> > > +
>
> > > +void
>
> > > +ifcvf_queue_enable(struct ifcvf_hw *hw, u16 qid,u16 enable)
>
> > > +{
>
> > > +struct ifcvf_pci_common_cfg *cfg = hw->common_cfg;
>
> > > +
>
> > > +IFCVF_WRITE_REG16(qid, &cfg->queue_select);
>
> > > +IFCVF_WRITE_REG16(enable, &cfg->queue_enable);
>
> > > +}
>
> > > diff --git a/drivers/vdpa/ifc/base/ifcvf.h
> b/drivers/vdpa/ifc/base/ifcvf.h
>
> > > index eb04a94067..bd85010eff 100644
>
> > > --- a/drivers/vdpa/ifc/base/ifcvf.h
>
> > > +++ b/drivers/vdpa/ifc/base/ifcvf.h
>
> > > @@ -159,4 +159,8 @@ ifcvf_get_notify_region(struct ifcvf_hw *hw);
>
> > >u64
>
> > >ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid);
>
> > >
>
> > > +void
>
> > > +ifcvf_queue_enable(struct ifcvf_hw *hw, u16 qid,u16 enable);
>
> > > +
>
> > > +
>
> > >#endif /* _IFCVF_H_ */
>
> > > diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c
> b/drivers/vdpa/ifc/ifcvf_vdpa.c
>
> > > index ec97178dcb..55ce0cf13d 100644
>
> > > --- a/drivers/vdpa/ifc/ifcvf_vdpa.c
>
> > > +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c
>
> > > @@ -937,6 +937,27 @@ ifcvf_dev_close(int vid)
>
> > >return 0;
>
> > >}
>
> > >
>
> > > +static int
>
> > > +ifcvf_set_vring_state(int vid, int vring, int state)
>
> > > +{
>
> > > +int did;
>
> > > +struct internal_list *list;
>
> > > +struct ifcvf_internal *internal;
>
> > > +
>
> > > +did = rte_vhost_get_vdpa_device_id(vid);
>
> > > +list = find_internal_resource_by_did(did);
>
> > > +if (list == NULL) {
>
> > > +DRV_LOG(ERR, "Invalid device id: %d", did);
>
> > > +return -1;
>
> > > +}
>
> > > +
>
> > > +internal = list->internal;
>
> > > +
>
> > > +ifcvf_queue_enable(&internal->hw, (uint16_t)vring, (uint16_t) state);
>
> > > +
>
> > > +return 0;
>
> > > +}
>
> > > +
>
> > >static int
>
> > >ifcvf_set_features(int vid)
>
> > >{
>
> > > @@ -1086,7 +1107,7 @@ static struct rte_vdpa_dev_ops ifcvf_ops = {
>
> > >.get_protocol_features = ifcvf_get_protocol_features,
>
> > >.dev_conf = ifcvf_dev_config,
>
> > >.dev_close = ifcvf_dev_close,
>
> > > -.set_vring_state = NULL,
>
> > > +.set_vring_state = ifcvf_set_vring_state,
>
> > >.set_features = ifcvf_set_features,
>
> > >.migration_done = NULL,
>
> > >.get_vfio_group_fd = ifcvf_get_vfio_group_fd,
>
On 2020/5/15 下午5:42, Wang, Xiao W wrote:
>
> Hi,
>
> Best Regards,
>
> Xiao
>
> > -----Original Message-----
>
> > From: Jason Wang <jasowang@redhat.com>
>
> > Sent: Friday, May 15, 2020 5:09 PM
>
> > To: Maxime Coquelin <maxime.coquelin@redhat.com>; Ye, Xiaolong
>
> > <xiaolong.ye@intel.com>; shahafs@mellanox.com; matan@mellanox.com;
>
> > amorenoz@redhat.com; Wang, Xiao W <xiao.w.wang@intel.com>;
>
> > viacheslavo@mellanox.com; dev@dpdk.org
>
> > Cc: lulu@redhat.com
>
> > Subject: Re: [PATCH 3/9] vdpa/ifc: add support to vDPA queue enable
>
> >
>
> >
>
> > On 2020/5/14 下午4:02, Maxime Coquelin wrote:
>
> > > This patch adds support to enabling and disabling
>
> > > vrings on a per-vring granularity.
>
> > >
>
> > > Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com
> <mailto:maxime.coquelin@redhat.com>>
>
> >
>
> >
>
> > A question here, I see in qemu peer_attach() may try to generate
>
> > VHOST_USER_SET_VRING_ENABLE, but just from the name I think it should
>
> > behave as queue_enable defined in virtio specification which is
>
> > explicitly under the control of guest?
>
> >
>
> > (Note, in Cindy's vDPA series, we must invent new vhost_ops to differ
>
> > from this one).
>
> From my view, common_cfg.enable reg is used for registering a queue to
> hypervisor&vhost, but not ENABLE.
>
Well, what's your definition of "enable" in this context?
Spec said:
queue_enable
The driver uses this to selectively prevent the device from
executing requests from this virtqueue. 1 - enabled; 0 - disabled.
This means, if queue_enable is not set to 1, device can not execute
request for this specific virtqueue.
> The control queue message VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET is for
> enable/disable queue pairs.
>
But in qemu this is hooked to VHOST_USER_SET_VRING_ENABLE, see
peer_attach(). And this patch hook VHOST_USER_SET_VRING_ENABLE to
queue_enable.
This means IFCVF uses queue_enable instead of control vq or other
register for setting multiqueue stuff? My understanding is that IFCVF
has dedicated register to do this.
Note setting mq is different from queue_enable, changing the number of
queues should let the underlayer NIC to properly configure its
steering/switching/filtering logic to make sure traffic were only sent
to the queues that is set by driver.
So hooking VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET to queue_enable looks wrong.
> Think about when virtio net probes, all queues are selected and
> "enabled" by init_vqs(),
>
I think we're talking about aligning the implementation with spec not
just make it work for some specific drivers. Driver may choose to not
enable a virtqueue by not setting 1 to queue_enable.
Thanks
> but MQ is not enabled until virtnet_set_channels() by user config with
> "ethtool".
>
> Based on this, below reg writing is not OK to enable MQ. IFC HW
> supports below registers for VF pass-thru case.
>
> Actually, we have specific reg designed to enable MQ in VDPA case.
>
> > > +IFCVF_WRITE_REG16(qid, &cfg->queue_select);
>
> > > +IFCVF_WRITE_REG16(enable, &cfg->queue_enable);
>
> BRs,
>
> Xiao
>
> >
>
> > Thanks
>
> >
>
> >
>
> > > ---
>
> > >drivers/vdpa/ifc/base/ifcvf.c |9 +++++++++
>
> > >drivers/vdpa/ifc/base/ifcvf.h |4 ++++
>
> > >drivers/vdpa/ifc/ifcvf_vdpa.c | 23 ++++++++++++++++++++++-
>
> > >3 files changed, 35 insertions(+), 1 deletion(-)
>
> > >
>
> > > diff --git a/drivers/vdpa/ifc/base/ifcvf.c
> b/drivers/vdpa/ifc/base/ifcvf.c
>
> > > index 3c0b2dff66..dd4e7468ae 100644
>
> > > --- a/drivers/vdpa/ifc/base/ifcvf.c
>
> > > +++ b/drivers/vdpa/ifc/base/ifcvf.c
>
> > > @@ -327,3 +327,12 @@ ifcvf_get_queue_notify_off(struct ifcvf_hw
> *hw, int
>
> > qid)
>
> > >return (u8 *)hw->notify_addr[qid] -
>
> > >(u8 *)hw->mem_resource[hw->notify_region].addr;
>
> > >}
>
> > > +
>
> > > +void
>
> > > +ifcvf_queue_enable(struct ifcvf_hw *hw, u16 qid,u16 enable)
>
> > > +{
>
> > > +struct ifcvf_pci_common_cfg *cfg = hw->common_cfg;
>
> > > +
>
> > > +IFCVF_WRITE_REG16(qid, &cfg->queue_select);
>
> > > +IFCVF_WRITE_REG16(enable, &cfg->queue_enable);
>
> > > +}
>
> > > diff --git a/drivers/vdpa/ifc/base/ifcvf.h
> b/drivers/vdpa/ifc/base/ifcvf.h
>
> > > index eb04a94067..bd85010eff 100644
>
> > > --- a/drivers/vdpa/ifc/base/ifcvf.h
>
> > > +++ b/drivers/vdpa/ifc/base/ifcvf.h
>
> > > @@ -159,4 +159,8 @@ ifcvf_get_notify_region(struct ifcvf_hw *hw);
>
> > >u64
>
> > >ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid);
>
> > >
>
> > > +void
>
> > > +ifcvf_queue_enable(struct ifcvf_hw *hw, u16 qid,u16 enable);
>
> > > +
>
> > > +
>
> > >#endif /* _IFCVF_H_ */
>
> > > diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c
> b/drivers/vdpa/ifc/ifcvf_vdpa.c
>
> > > index ec97178dcb..55ce0cf13d 100644
>
> > > --- a/drivers/vdpa/ifc/ifcvf_vdpa.c
>
> > > +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c
>
> > > @@ -937,6 +937,27 @@ ifcvf_dev_close(int vid)
>
> > >return 0;
>
> > >}
>
> > >
>
> > > +static int
>
> > > +ifcvf_set_vring_state(int vid, int vring, int state)
>
> > > +{
>
> > > +int did;
>
> > > +struct internal_list *list;
>
> > > +struct ifcvf_internal *internal;
>
> > > +
>
> > > +did = rte_vhost_get_vdpa_device_id(vid);
>
> > > +list = find_internal_resource_by_did(did);
>
> > > +if (list == NULL) {
>
> > > +DRV_LOG(ERR, "Invalid device id: %d", did);
>
> > > +return -1;
>
> > > +}
>
> > > +
>
> > > +internal = list->internal;
>
> > > +
>
> > > +ifcvf_queue_enable(&internal->hw, (uint16_t)vring, (uint16_t) state);
>
> > > +
>
> > > +return 0;
>
> > > +}
>
> > > +
>
> > >static int
>
> > >ifcvf_set_features(int vid)
>
> > >{
>
> > > @@ -1086,7 +1107,7 @@ static struct rte_vdpa_dev_ops ifcvf_ops = {
>
> > >.get_protocol_features = ifcvf_get_protocol_features,
>
> > >.dev_conf = ifcvf_dev_config,
>
> > >.dev_close = ifcvf_dev_close,
>
> > > -.set_vring_state = NULL,
>
> > > +.set_vring_state = ifcvf_set_vring_state,
>
> > >.set_features = ifcvf_set_features,
>
> > >.migration_done = NULL,
>
> > >.get_vfio_group_fd = ifcvf_get_vfio_group_fd,
>
Hi,
Comments inline.
Best Regards,
Xiao
> -----Original Message-----
> From: Jason Wang <jasowang@redhat.com>
> Sent: Friday, May 15, 2020 6:09 PM
> To: Wang, Xiao W <xiao.w.wang@intel.com>; Maxime Coquelin
> <maxime.coquelin@redhat.com>; Ye, Xiaolong <xiaolong.ye@intel.com>;
> shahafs@mellanox.com; matan@mellanox.com; amorenoz@redhat.com;
> viacheslavo@mellanox.com; dev@dpdk.org
> Cc: lulu@redhat.com; Xu, Rosen <rosen.xu@intel.com>
> Subject: Re: [PATCH 3/9] vdpa/ifc: add support to vDPA queue enable
>
>
> On 2020/5/15 下午5:42, Wang, Xiao W wrote:
> >
> > Hi,
> >
> > Best Regards,
> >
> > Xiao
> >
> > > -----Original Message-----
> >
> > > From: Jason Wang <jasowang@redhat.com>
> >
> > > Sent: Friday, May 15, 2020 5:09 PM
> >
> > > To: Maxime Coquelin <maxime.coquelin@redhat.com>; Ye, Xiaolong
> >
> > > <xiaolong.ye@intel.com>; shahafs@mellanox.com; matan@mellanox.com;
> >
> > > amorenoz@redhat.com; Wang, Xiao W <xiao.w.wang@intel.com>;
> >
> > > viacheslavo@mellanox.com; dev@dpdk.org
> >
> > > Cc: lulu@redhat.com
> >
> > > Subject: Re: [PATCH 3/9] vdpa/ifc: add support to vDPA queue enable
> >
> > >
> >
> > >
> >
> > > On 2020/5/14 下午4:02, Maxime Coquelin wrote:
> >
> > > > This patch adds support to enabling and disabling
> >
> > > > vrings on a per-vring granularity.
> >
> > > >
> >
> > > > Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com
> > <mailto:maxime.coquelin@redhat.com>>
> >
> > >
> >
> > >
> >
> > > A question here, I see in qemu peer_attach() may try to generate
> >
> > > VHOST_USER_SET_VRING_ENABLE, but just from the name I think it should
> >
> > > behave as queue_enable defined in virtio specification which is
> >
> > > explicitly under the control of guest?
> >
> > >
> >
> > > (Note, in Cindy's vDPA series, we must invent new vhost_ops to differ
> >
> > > from this one).
> >
> > From my view, common_cfg.enable reg is used for registering a queue to
> > hypervisor&vhost, but not ENABLE.
> >
>
> Well, what's your definition of "enable" in this context?
"Enable a queue" means traffic can pass through this queue.
>
> Spec said:
>
> queue_enable
> The driver uses this to selectively prevent the device from
> executing requests from this virtqueue. 1 - enabled; 0 - disabled.
>
> This means, if queue_enable is not set to 1, device can not execute
> request for this specific virtqueue.
>
For queue enabling in virtio MQ case, there're 2 steps needed:
1. select a queue and write 1 to common_cfg.enable reg
2. send control vq message VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
If no step2, by default there's only 1 queue pair enabled.
>
> > The control queue message VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET is for
> > enable/disable queue pairs.
> >
>
> But in qemu this is hooked to VHOST_USER_SET_VRING_ENABLE, see
> peer_attach(). And this patch hook VHOST_USER_SET_VRING_ENABLE to
> queue_enable.
>
> This means IFCVF uses queue_enable instead of control vq or other
> register for setting multiqueue stuff? My understanding is that IFCVF
> has dedicated register to do this.
>
> Note setting mq is different from queue_enable, changing the number of
> queues should let the underlayer NIC to properly configure its
> steering/switching/filtering logic to make sure traffic were only sent
> to the queues that is set by driver.
>
> So hooking VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET to queue_enable looks
> wrong.
We are on the same page. As I said we have dedicated reg designed to enable MQ (ensure traffic only sent to queues enabled by user) in VDPA case.
>
>
> > Think about when virtio net probes, all queues are selected and
> > "enabled" by init_vqs(),
> >
>
> I think we're talking about aligning the implementation with spec not
> just make it work for some specific drivers. Driver may choose to not
> enable a virtqueue by not setting 1 to queue_enable.
>
> Thanks
>
>
> > but MQ is not enabled until virtnet_set_channels() by user config with
> > "ethtool".
> >
> > Based on this, below reg writing is not OK to enable MQ. IFC HW
> > supports below registers for VF pass-thru case.
> >
> > Actually, we have specific reg designed to enable MQ in VDPA case.
> >
> > > > +IFCVF_WRITE_REG16(qid, &cfg->queue_select);
> >
> > > > +IFCVF_WRITE_REG16(enable, &cfg->queue_enable);
> >
> > BRs,
> >
> > Xiao
> >
> > >
> >
> > > Thanks
> >
> > >
> >
> > >
> >
> > > > ---
> >
> > > >drivers/vdpa/ifc/base/ifcvf.c |9 +++++++++
> >
> > > >drivers/vdpa/ifc/base/ifcvf.h |4 ++++
> >
> > > >drivers/vdpa/ifc/ifcvf_vdpa.c | 23 ++++++++++++++++++++++-
> >
> > > >3 files changed, 35 insertions(+), 1 deletion(-)
> >
> > > >
> >
> > > > diff --git a/drivers/vdpa/ifc/base/ifcvf.c
> > b/drivers/vdpa/ifc/base/ifcvf.c
> >
> > > > index 3c0b2dff66..dd4e7468ae 100644
> >
> > > > --- a/drivers/vdpa/ifc/base/ifcvf.c
> >
> > > > +++ b/drivers/vdpa/ifc/base/ifcvf.c
> >
> > > > @@ -327,3 +327,12 @@ ifcvf_get_queue_notify_off(struct ifcvf_hw
> > *hw, int
> >
> > > qid)
> >
> > > >return (u8 *)hw->notify_addr[qid] -
> >
> > > >(u8 *)hw->mem_resource[hw->notify_region].addr;
> >
> > > >}
> >
> > > > +
> >
> > > > +void
> >
> > > > +ifcvf_queue_enable(struct ifcvf_hw *hw, u16 qid,u16 enable)
> >
> > > > +{
> >
> > > > +struct ifcvf_pci_common_cfg *cfg = hw->common_cfg;
> >
> > > > +
> >
> > > > +IFCVF_WRITE_REG16(qid, &cfg->queue_select);
> >
> > > > +IFCVF_WRITE_REG16(enable, &cfg->queue_enable);
> >
> > > > +}
> >
> > > > diff --git a/drivers/vdpa/ifc/base/ifcvf.h
> > b/drivers/vdpa/ifc/base/ifcvf.h
On 2020/5/18 上午11:09, Wang, Xiao W wrote:
> Hi,
>
> Comments inline.
>
> Best Regards,
> Xiao
>
>> -----Original Message-----
>> From: Jason Wang<jasowang@redhat.com>
>> Sent: Friday, May 15, 2020 6:09 PM
>> To: Wang, Xiao W<xiao.w.wang@intel.com>; Maxime Coquelin
>> <maxime.coquelin@redhat.com>; Ye, Xiaolong<xiaolong.ye@intel.com>;
>> shahafs@mellanox.com;matan@mellanox.com;amorenoz@redhat.com;
>> viacheslavo@mellanox.com;dev@dpdk.org
>> Cc:lulu@redhat.com; Xu, Rosen<rosen.xu@intel.com>
>> Subject: Re: [PATCH 3/9] vdpa/ifc: add support to vDPA queue enable
>>
>>
>> On 2020/5/15 下午5:42, Wang, Xiao W wrote:
>>> Hi,
>>>
>>> Best Regards,
>>>
>>> Xiao
>>>
>>>> -----Original Message-----
>>>> From: Jason Wang<jasowang@redhat.com>
>>>> Sent: Friday, May 15, 2020 5:09 PM
>>>> To: Maxime Coquelin<maxime.coquelin@redhat.com>; Ye, Xiaolong
>>>> <xiaolong.ye@intel.com>;shahafs@mellanox.com;matan@mellanox.com;
>>>> amorenoz@redhat.com; Wang, Xiao W<xiao.w.wang@intel.com>;
>>>> viacheslavo@mellanox.com;dev@dpdk.org
>>>> Cc:lulu@redhat.com
>>>> Subject: Re: [PATCH 3/9] vdpa/ifc: add support to vDPA queue enable
>>>> On 2020/5/14 下午4:02, Maxime Coquelin wrote:
>>>>> This patch adds support to enabling and disabling
>>>>> vrings on a per-vring granularity.
>>>>> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com
>>> <mailto:maxime.coquelin@redhat.com>>
>>>
>>>> A question here, I see in qemu peer_attach() may try to generate
>>>> VHOST_USER_SET_VRING_ENABLE, but just from the name I think it should
>>>> behave as queue_enable defined in virtio specification which is
>>>> explicitly under the control of guest?
>>>> (Note, in Cindy's vDPA series, we must invent new vhost_ops to differ
>>>> from this one).
>>> From my view, common_cfg.enable reg is used for registering a queue to
>>> hypervisor&vhost, but not ENABLE.
>>>
>> Well, what's your definition of "enable" in this context?
> "Enable a queue" means traffic can pass through this queue.
>
>> Spec said:
>>
>> queue_enable
>> The driver uses this to selectively prevent the device from
>> executing requests from this virtqueue. 1 - enabled; 0 - disabled.
>>
>> This means, if queue_enable is not set to 1, device can not execute
>> request for this specific virtqueue.
>>
> For queue enabling in virtio MQ case, there're 2 steps needed:
> 1. select a queue and write 1 to common_cfg.enable reg
Note that:
1) queue_enable doesn't mean you can disable a queue by writing zero to
that (which is not allowed by the spec)
2) queue_enable is not specific to MQ, you need write 1 to all the
queues that will be used by this driver
3) it's not allowed to write 1 to queue_enable after DRIVER_OK
> 2. send control vq message VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
> If no step2, by default there's only 1 queue pair enabled.
Yes, and if you read the git history. This command is invented by me :)
Thanks
>
@@ -327,3 +327,12 @@ ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid)
return (u8 *)hw->notify_addr[qid] -
(u8 *)hw->mem_resource[hw->notify_region].addr;
}
+
+void
+ifcvf_queue_enable(struct ifcvf_hw *hw, u16 qid, u16 enable)
+{
+ struct ifcvf_pci_common_cfg *cfg = hw->common_cfg;
+
+ IFCVF_WRITE_REG16(qid, &cfg->queue_select);
+ IFCVF_WRITE_REG16(enable, &cfg->queue_enable);
+}
@@ -159,4 +159,8 @@ ifcvf_get_notify_region(struct ifcvf_hw *hw);
u64
ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid);
+void
+ifcvf_queue_enable(struct ifcvf_hw *hw, u16 qid, u16 enable);
+
+
#endif /* _IFCVF_H_ */
@@ -937,6 +937,27 @@ ifcvf_dev_close(int vid)
return 0;
}
+static int
+ifcvf_set_vring_state(int vid, int vring, int state)
+{
+ int did;
+ struct internal_list *list;
+ struct ifcvf_internal *internal;
+
+ did = rte_vhost_get_vdpa_device_id(vid);
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ internal = list->internal;
+
+ ifcvf_queue_enable(&internal->hw, (uint16_t)vring, (uint16_t) state);
+
+ return 0;
+}
+
static int
ifcvf_set_features(int vid)
{
@@ -1086,7 +1107,7 @@ static struct rte_vdpa_dev_ops ifcvf_ops = {
.get_protocol_features = ifcvf_get_protocol_features,
.dev_conf = ifcvf_dev_config,
.dev_close = ifcvf_dev_close,
- .set_vring_state = NULL,
+ .set_vring_state = ifcvf_set_vring_state,
.set_features = ifcvf_set_features,
.migration_done = NULL,
.get_vfio_group_fd = ifcvf_get_vfio_group_fd,