[v1,09/21] net/virtio: refactor indirect desc headers init
Checks
Commit Message
This patch refactors the indirect descriptors headers
initialization in a dedicated function, and makes it used
by both queue init and reset functions.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 30 +------------
drivers/net/virtio/virtqueue.c | 68 ++++++++++++++++++++++--------
drivers/net/virtio/virtqueue.h | 2 +
3 files changed, 54 insertions(+), 46 deletions(-)
Comments
> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Wednesday, November 30, 2022 11:56 PM
> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>;
> david.marchand@redhat.com; eperezma@redhat.com
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Subject: [PATCH v1 09/21] net/virtio: refactor indirect desc headers init
>
> This patch refactors the indirect descriptors headers
> initialization in a dedicated function, and makes it used
> by both queue init and reset functions.
>
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
> drivers/net/virtio/virtio_ethdev.c | 30 +------------
> drivers/net/virtio/virtqueue.c | 68 ++++++++++++++++++++++--------
> drivers/net/virtio/virtqueue.h | 2 +
> 3 files changed, 54 insertions(+), 46 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_ethdev.c
> b/drivers/net/virtio/virtio_ethdev.c
> index b546916a9f..8b17b450ec 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -347,7 +347,6 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> queue_idx)
> unsigned int vq_size, size;
> struct virtio_hw *hw = dev->data->dev_private;
> struct virtnet_rx *rxvq = NULL;
> - struct virtnet_tx *txvq = NULL;
> struct virtnet_ctl *cvq = NULL;
> struct virtqueue *vq;
> void *sw_ring = NULL;
> @@ -465,7 +464,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> queue_idx)
> rxvq = &vq->rxq;
> rxvq->fake_mbuf = fake_mbuf;
> } else if (queue_type == VTNET_TQ) {
> - txvq = &vq->txq;
> + virtqueue_txq_indirect_headers_init(vq);
> } else if (queue_type == VTNET_CQ) {
> cvq = &vq->cq;
> hw->cvq = cvq;
> @@ -477,33 +476,6 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> queue_idx)
> else
> vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_iova);
>
> - if (queue_type == VTNET_TQ) {
> - struct virtio_tx_region *txr;
> - unsigned int i;
> -
> - txr = txvq->hdr_mz->addr;
> - for (i = 0; i < vq_size; i++) {
> - /* first indirect descriptor is always the tx header */
> - if (!virtio_with_packed_queue(hw)) {
> - struct vring_desc *start_dp = txr[i].tx_indir;
> - vring_desc_init_split(start_dp,
> - RTE_DIM(txr[i].tx_indir));
> - start_dp->addr = txvq->hdr_mem + i * sizeof(*txr)
> - + offsetof(struct virtio_tx_region, tx_hdr);
> - start_dp->len = hw->vtnet_hdr_size;
> - start_dp->flags = VRING_DESC_F_NEXT;
> - } else {
> - struct vring_packed_desc *start_dp =
> - txr[i].tx_packed_indir;
> - vring_desc_init_indirect_packed(start_dp,
> - RTE_DIM(txr[i].tx_packed_indir));
> - start_dp->addr = txvq->hdr_mem + i * sizeof(*txr)
> - + offsetof(struct virtio_tx_region, tx_hdr);
> - start_dp->len = hw->vtnet_hdr_size;
> - }
> - }
> - }
> -
> if (VIRTIO_OPS(hw)->setup_queue(hw, vq) < 0) {
> PMD_INIT_LOG(ERR, "setup_queue failed");
> ret = -EINVAL;
> diff --git a/drivers/net/virtio/virtqueue.c
> b/drivers/net/virtio/virtqueue.c
> index 41e3529546..fb651a4ca3 100644
> --- a/drivers/net/virtio/virtqueue.c
> +++ b/drivers/net/virtio/virtqueue.c
> @@ -143,6 +143,54 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
> virtqueue_rxvq_flush_split(vq);
> }
>
> +static void
> +virtqueue_txq_indirect_header_init_packed(struct virtqueue *vq, uint32_t
> idx)
> +{
> + struct virtio_tx_region *txr;
> + struct vring_packed_desc *desc;
> + rte_iova_t hdr_mem;
> +
> + txr = vq->txq.hdr_mz->addr;
> + hdr_mem = vq->txq.hdr_mem;
> + desc = txr[idx].tx_packed_indir;
> +
> + vring_desc_init_indirect_packed(desc,
> RTE_DIM(txr[idx].tx_packed_indir));
> + desc->addr = hdr_mem + idx * sizeof(*txr) + offsetof(struct
> virtio_tx_region, tx_hdr);
> + desc->len = vq->hw->vtnet_hdr_size;
> +}
> +
> +static void
> +virtqueue_txq_indirect_header_init_split(struct virtqueue *vq, uint32_t
> idx)
> +{
> + struct virtio_tx_region *txr;
> + struct vring_desc *desc;
> + rte_iova_t hdr_mem;
> +
> + txr = vq->txq.hdr_mz->addr;
> + hdr_mem = vq->txq.hdr_mem;
> + desc = txr[idx].tx_indir;
> +
> + vring_desc_init_split(desc, RTE_DIM(txr[idx].tx_indir));
> + desc->addr = hdr_mem + idx * sizeof(*txr) + offsetof(struct
> virtio_tx_region, tx_hdr);
> + desc->len = vq->hw->vtnet_hdr_size;
> + desc->flags = VRING_DESC_F_NEXT;
> +}
> +
> +void
> +virtqueue_txq_indirect_headers_init(struct virtqueue *vq)
> +{
> + uint32_t i;
> +
> + if (!virtio_with_feature(vq->hw, VIRTIO_RING_F_INDIRECT_DESC))
> + return;
> +
> + for (i = 0; i < vq->vq_nentries; i++)
> + if (virtio_with_packed_queue(vq->hw))
> + virtqueue_txq_indirect_header_init_packed(vq, i);
> + else
> + virtqueue_txq_indirect_header_init_split(vq, i);
> +}
> +
> int
> virtqueue_rxvq_reset_packed(struct virtqueue *vq)
> {
> @@ -182,10 +230,7 @@ virtqueue_txvq_reset_packed(struct virtqueue *vq)
> {
> int size = vq->vq_nentries;
> struct vq_desc_extra *dxp;
> - struct virtnet_tx *txvq;
> uint16_t desc_idx;
> - struct virtio_tx_region *txr;
> - struct vring_packed_desc *start_dp;
>
> vq->vq_used_cons_idx = 0;
> vq->vq_desc_head_idx = 0;
> @@ -197,10 +242,8 @@ virtqueue_txvq_reset_packed(struct virtqueue *vq)
> vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
> vq->vq_packed.event_flags_shadow = 0;
>
> - txvq = &vq->txq;
> - txr = txvq->hdr_mz->addr;
> memset(vq->mz->addr, 0, vq->mz->len);
> - memset(txvq->hdr_mz->addr, 0, txvq->hdr_mz->len);
> + memset(vq->txq.hdr_mz->addr, 0, vq->txq.hdr_mz->len);
>
> for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
> dxp = &vq->vq_descx[desc_idx];
> @@ -208,20 +251,11 @@ virtqueue_txvq_reset_packed(struct virtqueue *vq)
> rte_pktmbuf_free(dxp->cookie);
> dxp->cookie = NULL;
> }
> -
> - if (virtio_with_feature(vq->hw, VIRTIO_RING_F_INDIRECT_DESC))
> {
> - /* first indirect descriptor is always the tx header */
> - start_dp = txr[desc_idx].tx_packed_indir;
> - vring_desc_init_indirect_packed(start_dp,
> -
> RTE_DIM(txr[desc_idx].tx_packed_indir));
> - start_dp->addr = txvq->hdr_mem + desc_idx * sizeof(*txr)
> - + offsetof(struct virtio_tx_region, tx_hdr);
> - start_dp->len = vq->hw->vtnet_hdr_size;
> - }
> }
>
> + virtqueue_txq_indirect_headers_init(vq);
> vring_desc_init_packed(vq, size);
> -
> virtqueue_disable_intr(vq);
> +
> return 0;
> }
> diff --git a/drivers/net/virtio/virtqueue.h
> b/drivers/net/virtio/virtqueue.h
> index 8b7bfae643..d453c3ec26 100644
> --- a/drivers/net/virtio/virtqueue.h
> +++ b/drivers/net/virtio/virtqueue.h
> @@ -384,6 +384,8 @@ int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
>
> int virtqueue_txvq_reset_packed(struct virtqueue *vq);
>
> +void virtqueue_txq_indirect_headers_init(struct virtqueue *vq);
> +
> static inline int
> virtqueue_full(const struct virtqueue *vq)
> {
> --
> 2.38.1
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
@@ -347,7 +347,6 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
unsigned int vq_size, size;
struct virtio_hw *hw = dev->data->dev_private;
struct virtnet_rx *rxvq = NULL;
- struct virtnet_tx *txvq = NULL;
struct virtnet_ctl *cvq = NULL;
struct virtqueue *vq;
void *sw_ring = NULL;
@@ -465,7 +464,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
rxvq = &vq->rxq;
rxvq->fake_mbuf = fake_mbuf;
} else if (queue_type == VTNET_TQ) {
- txvq = &vq->txq;
+ virtqueue_txq_indirect_headers_init(vq);
} else if (queue_type == VTNET_CQ) {
cvq = &vq->cq;
hw->cvq = cvq;
@@ -477,33 +476,6 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
else
vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_iova);
- if (queue_type == VTNET_TQ) {
- struct virtio_tx_region *txr;
- unsigned int i;
-
- txr = txvq->hdr_mz->addr;
- for (i = 0; i < vq_size; i++) {
- /* first indirect descriptor is always the tx header */
- if (!virtio_with_packed_queue(hw)) {
- struct vring_desc *start_dp = txr[i].tx_indir;
- vring_desc_init_split(start_dp,
- RTE_DIM(txr[i].tx_indir));
- start_dp->addr = txvq->hdr_mem + i * sizeof(*txr)
- + offsetof(struct virtio_tx_region, tx_hdr);
- start_dp->len = hw->vtnet_hdr_size;
- start_dp->flags = VRING_DESC_F_NEXT;
- } else {
- struct vring_packed_desc *start_dp =
- txr[i].tx_packed_indir;
- vring_desc_init_indirect_packed(start_dp,
- RTE_DIM(txr[i].tx_packed_indir));
- start_dp->addr = txvq->hdr_mem + i * sizeof(*txr)
- + offsetof(struct virtio_tx_region, tx_hdr);
- start_dp->len = hw->vtnet_hdr_size;
- }
- }
- }
-
if (VIRTIO_OPS(hw)->setup_queue(hw, vq) < 0) {
PMD_INIT_LOG(ERR, "setup_queue failed");
ret = -EINVAL;
@@ -143,6 +143,54 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
virtqueue_rxvq_flush_split(vq);
}
+static void
+virtqueue_txq_indirect_header_init_packed(struct virtqueue *vq, uint32_t idx)
+{
+ struct virtio_tx_region *txr;
+ struct vring_packed_desc *desc;
+ rte_iova_t hdr_mem;
+
+ txr = vq->txq.hdr_mz->addr;
+ hdr_mem = vq->txq.hdr_mem;
+ desc = txr[idx].tx_packed_indir;
+
+ vring_desc_init_indirect_packed(desc, RTE_DIM(txr[idx].tx_packed_indir));
+ desc->addr = hdr_mem + idx * sizeof(*txr) + offsetof(struct virtio_tx_region, tx_hdr);
+ desc->len = vq->hw->vtnet_hdr_size;
+}
+
+static void
+virtqueue_txq_indirect_header_init_split(struct virtqueue *vq, uint32_t idx)
+{
+ struct virtio_tx_region *txr;
+ struct vring_desc *desc;
+ rte_iova_t hdr_mem;
+
+ txr = vq->txq.hdr_mz->addr;
+ hdr_mem = vq->txq.hdr_mem;
+ desc = txr[idx].tx_indir;
+
+ vring_desc_init_split(desc, RTE_DIM(txr[idx].tx_indir));
+ desc->addr = hdr_mem + idx * sizeof(*txr) + offsetof(struct virtio_tx_region, tx_hdr);
+ desc->len = vq->hw->vtnet_hdr_size;
+ desc->flags = VRING_DESC_F_NEXT;
+}
+
+void
+virtqueue_txq_indirect_headers_init(struct virtqueue *vq)
+{
+ uint32_t i;
+
+ if (!virtio_with_feature(vq->hw, VIRTIO_RING_F_INDIRECT_DESC))
+ return;
+
+ for (i = 0; i < vq->vq_nentries; i++)
+ if (virtio_with_packed_queue(vq->hw))
+ virtqueue_txq_indirect_header_init_packed(vq, i);
+ else
+ virtqueue_txq_indirect_header_init_split(vq, i);
+}
+
int
virtqueue_rxvq_reset_packed(struct virtqueue *vq)
{
@@ -182,10 +230,7 @@ virtqueue_txvq_reset_packed(struct virtqueue *vq)
{
int size = vq->vq_nentries;
struct vq_desc_extra *dxp;
- struct virtnet_tx *txvq;
uint16_t desc_idx;
- struct virtio_tx_region *txr;
- struct vring_packed_desc *start_dp;
vq->vq_used_cons_idx = 0;
vq->vq_desc_head_idx = 0;
@@ -197,10 +242,8 @@ virtqueue_txvq_reset_packed(struct virtqueue *vq)
vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
vq->vq_packed.event_flags_shadow = 0;
- txvq = &vq->txq;
- txr = txvq->hdr_mz->addr;
memset(vq->mz->addr, 0, vq->mz->len);
- memset(txvq->hdr_mz->addr, 0, txvq->hdr_mz->len);
+ memset(vq->txq.hdr_mz->addr, 0, vq->txq.hdr_mz->len);
for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
dxp = &vq->vq_descx[desc_idx];
@@ -208,20 +251,11 @@ virtqueue_txvq_reset_packed(struct virtqueue *vq)
rte_pktmbuf_free(dxp->cookie);
dxp->cookie = NULL;
}
-
- if (virtio_with_feature(vq->hw, VIRTIO_RING_F_INDIRECT_DESC)) {
- /* first indirect descriptor is always the tx header */
- start_dp = txr[desc_idx].tx_packed_indir;
- vring_desc_init_indirect_packed(start_dp,
- RTE_DIM(txr[desc_idx].tx_packed_indir));
- start_dp->addr = txvq->hdr_mem + desc_idx * sizeof(*txr)
- + offsetof(struct virtio_tx_region, tx_hdr);
- start_dp->len = vq->hw->vtnet_hdr_size;
- }
}
+ virtqueue_txq_indirect_headers_init(vq);
vring_desc_init_packed(vq, size);
-
virtqueue_disable_intr(vq);
+
return 0;
}
@@ -384,6 +384,8 @@ int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
int virtqueue_txvq_reset_packed(struct virtqueue *vq);
+void virtqueue_txq_indirect_headers_init(struct virtqueue *vq);
+
static inline int
virtqueue_full(const struct virtqueue *vq)
{