[v1,10/21] net/virtio: alloc Rx SW ring only if vectorized path
Checks
Commit Message
This patch only allocates the SW ring when vectorized
datapath is used. It also moves the SW ring and fake mbuf
in the virtnet_rx struct since this is Rx-only.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 88 ++++++++++++-------
drivers/net/virtio/virtio_rxtx.c | 8 +-
drivers/net/virtio/virtio_rxtx.h | 4 +-
drivers/net/virtio/virtio_rxtx_simple.h | 2 +-
.../net/virtio/virtio_rxtx_simple_altivec.c | 4 +-
drivers/net/virtio/virtio_rxtx_simple_neon.c | 4 +-
drivers/net/virtio/virtio_rxtx_simple_sse.c | 4 +-
drivers/net/virtio/virtqueue.c | 6 +-
drivers/net/virtio/virtqueue.h | 1 -
9 files changed, 72 insertions(+), 49 deletions(-)
Comments
Hi Maxime,
> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Wednesday, November 30, 2022 11:56 PM
> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>;
> david.marchand@redhat.com; eperezma@redhat.com
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Subject: [PATCH v1 10/21] net/virtio: alloc Rx SW ring only if vectorized
> path
>
> This patch only allocates the SW ring when vectorized
> datapath is used. It also moves the SW ring and fake mbuf
> in the virtnet_rx struct since this is Rx-only.
>
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
> drivers/net/virtio/virtio_ethdev.c | 88 ++++++++++++-------
> drivers/net/virtio/virtio_rxtx.c | 8 +-
> drivers/net/virtio/virtio_rxtx.h | 4 +-
> drivers/net/virtio/virtio_rxtx_simple.h | 2 +-
> .../net/virtio/virtio_rxtx_simple_altivec.c | 4 +-
> drivers/net/virtio/virtio_rxtx_simple_neon.c | 4 +-
> drivers/net/virtio/virtio_rxtx_simple_sse.c | 4 +-
> drivers/net/virtio/virtqueue.c | 6 +-
> drivers/net/virtio/virtqueue.h | 1 -
> 9 files changed, 72 insertions(+), 49 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_ethdev.c
> b/drivers/net/virtio/virtio_ethdev.c
> index 8b17b450ec..46dd5606f6 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -339,6 +339,47 @@ virtio_free_queue_headers(struct virtqueue *vq)
> *hdr_mem = 0;
> }
>
> +static int
> +virtio_rxq_sw_ring_alloc(struct virtqueue *vq, int numa_node)
> +{
> + void *sw_ring;
> + struct rte_mbuf *mbuf;
> + size_t size;
> +
> + /* SW ring is only used with vectorized datapath */
> + if (!vq->hw->use_vec_rx)
> + return 0;
> +
> + size = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq->vq_nentries) * sizeof(vq-
> >rxq.sw_ring[0]);
> +
> + sw_ring = rte_zmalloc_socket("sw_ring", size, RTE_CACHE_LINE_SIZE,
> numa_node);
> + if (!sw_ring) {
> + PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
> + return -ENOMEM;
> + }
> +
> + mbuf = rte_zmalloc_socket("sw_ring", sizeof(*mbuf),
> RTE_CACHE_LINE_SIZE, numa_node);
> + if (!mbuf) {
> + PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
> + rte_free(sw_ring);
> + return -ENOMEM;
> + }
> +
> + vq->rxq.sw_ring = sw_ring;
> + vq->rxq.fake_mbuf = mbuf;
> +
> + return 0;
> +}
> +
> +static void
> +virtio_rxq_sw_ring_free(struct virtqueue *vq)
> +{
> + rte_free(vq->rxq.fake_mbuf);
> + vq->rxq.fake_mbuf = NULL;
> + rte_free(vq->rxq.sw_ring);
> + vq->rxq.sw_ring = NULL;
> +}
> +
> static int
> virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
> {
> @@ -346,14 +387,11 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> queue_idx)
> const struct rte_memzone *mz = NULL;
> unsigned int vq_size, size;
> struct virtio_hw *hw = dev->data->dev_private;
> - struct virtnet_rx *rxvq = NULL;
> struct virtnet_ctl *cvq = NULL;
> struct virtqueue *vq;
> - void *sw_ring = NULL;
> int queue_type = virtio_get_queue_type(hw, queue_idx);
> int ret;
> int numa_node = dev->device->numa_node;
> - struct rte_mbuf *fake_mbuf = NULL;
>
> PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %d",
> queue_idx, numa_node);
> @@ -441,28 +479,9 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> queue_idx)
> }
>
> if (queue_type == VTNET_RQ) {
> - size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
> - sizeof(vq->sw_ring[0]);
> -
> - sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
> - RTE_CACHE_LINE_SIZE, numa_node);
> - if (!sw_ring) {
> - PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
> - ret = -ENOMEM;
> + ret = virtio_rxq_sw_ring_alloc(vq, numa_node);
> + if (ret)
> goto free_hdr_mz;
> - }
> -
> - fake_mbuf = rte_zmalloc_socket("sw_ring", sizeof(*fake_mbuf),
> - RTE_CACHE_LINE_SIZE, numa_node);
> - if (!fake_mbuf) {
> - PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
> - ret = -ENOMEM;
> - goto free_sw_ring;
> - }
> -
> - vq->sw_ring = sw_ring;
> - rxvq = &vq->rxq;
> - rxvq->fake_mbuf = fake_mbuf;
> } else if (queue_type == VTNET_TQ) {
> virtqueue_txq_indirect_headers_init(vq);
> } else if (queue_type == VTNET_CQ) {
> @@ -486,9 +505,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> queue_idx)
>
> clean_vq:
> hw->cvq = NULL;
> - rte_free(fake_mbuf);
> -free_sw_ring:
> - rte_free(sw_ring);
> + if (queue_type == VTNET_RQ)
> + virtio_rxq_sw_ring_free(vq);
> free_hdr_mz:
> virtio_free_queue_headers(vq);
> free_mz:
> @@ -519,7 +537,7 @@ virtio_free_queues(struct virtio_hw *hw)
> queue_type = virtio_get_queue_type(hw, i);
> if (queue_type == VTNET_RQ) {
> rte_free(vq->rxq.fake_mbuf);
> - rte_free(vq->sw_ring);
> + rte_free(vq->rxq.sw_ring);
> }
>
> virtio_free_queue_headers(vq);
> @@ -2195,6 +2213,11 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
>
> rte_spinlock_init(&hw->state_lock);
>
> + if (vectorized) {
> + hw->use_vec_rx = 1;
> + hw->use_vec_tx = 1;
> + }
> +
> /* reset device and negotiate default features */
> ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
> if (ret < 0)
> @@ -2202,12 +2225,11 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
>
> if (vectorized) {
> if (!virtio_with_packed_queue(hw)) {
> - hw->use_vec_rx = 1;
> + hw->use_vec_tx = 0;
> } else {
> -#if defined(CC_AVX512_SUPPORT) || defined(RTE_ARCH_ARM)
> - hw->use_vec_rx = 1;
> - hw->use_vec_tx = 1;
> -#else
> +#if !defined(CC_AVX512_SUPPORT) && !defined(RTE_ARCH_ARM)
> + hw->use_vec_rx = 0;
> + hw->use_vec_tx = 0;
> PMD_DRV_LOG(INFO,
> "building environment do not support packed ring
> vectorized");
> #endif
> diff --git a/drivers/net/virtio/virtio_rxtx.c
> b/drivers/net/virtio/virtio_rxtx.c
> index 4f69b97f41..2d0afd3302 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -737,9 +737,11 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev
> *dev, uint16_t queue_idx)
> virtio_rxq_vec_setup(rxvq);
> }
>
> - memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
> - for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
> desc_idx++)
> - vq->sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
> + if (hw->use_vec_rx) {
> + memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
> + for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
> desc_idx++)
> + vq->rxq.sw_ring[vq->vq_nentries + desc_idx] = rxvq-
> >fake_mbuf;
> + }
>
> if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
> while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
> diff --git a/drivers/net/virtio/virtio_rxtx.h
> b/drivers/net/virtio/virtio_rxtx.h
> index 57af630110..afc4b74534 100644
> --- a/drivers/net/virtio/virtio_rxtx.h
> +++ b/drivers/net/virtio/virtio_rxtx.h
> @@ -18,8 +18,8 @@ struct virtnet_stats {
> };
>
> struct virtnet_rx {
> - /* dummy mbuf, for wraparound when processing RX ring. */
> - struct rte_mbuf *fake_mbuf;
> + struct rte_mbuf **sw_ring; /**< RX software ring. */
> + struct rte_mbuf *fake_mbuf; /**< dummy mbuf, for wraparound when
> processing RX ring. */
> uint64_t mbuf_initializer; /**< value to init mbufs. */
> struct rte_mempool *mpool; /**< mempool for mbuf allocation */
>
> diff --git a/drivers/net/virtio/virtio_rxtx_simple.h
> b/drivers/net/virtio/virtio_rxtx_simple.h
> index 8e235f4dbc..79196ed86e 100644
> --- a/drivers/net/virtio/virtio_rxtx_simple.h
> +++ b/drivers/net/virtio/virtio_rxtx_simple.h
> @@ -26,7 +26,7 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
> struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
>
> desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
> - sw_ring = &vq->sw_ring[desc_idx];
> + sw_ring = &vq->rxq.sw_ring[desc_idx];
> start_dp = &vq->vq_split.ring.desc[desc_idx];
>
> ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring,
> diff --git a/drivers/net/virtio/virtio_rxtx_simple_altivec.c
> b/drivers/net/virtio/virtio_rxtx_simple_altivec.c
> index e7f0ed6068..7910efc153 100644
> --- a/drivers/net/virtio/virtio_rxtx_simple_altivec.c
> +++ b/drivers/net/virtio/virtio_rxtx_simple_altivec.c
> @@ -103,8 +103,8 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf
> **rx_pkts,
>
> desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
> rused = &vq->vq_split.ring.used->ring[desc_idx];
> - sw_ring = &vq->sw_ring[desc_idx];
> - sw_ring_end = &vq->sw_ring[vq->vq_nentries];
> + sw_ring = &vq->rxq.sw_ring[desc_idx];
After sw_ring, there are two spaces, should be only one.
> + sw_ring_end = &vq->rxq.sw_ring[vq->vq_nentries];
>
> rte_prefetch0(rused);
>
> diff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c
> b/drivers/net/virtio/virtio_rxtx_simple_neon.c
> index 7fd92d1b0c..ffaa139bd6 100644
> --- a/drivers/net/virtio/virtio_rxtx_simple_neon.c
> +++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c
> @@ -101,8 +101,8 @@ virtio_recv_pkts_vec(void *rx_queue,
>
> desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
> rused = &vq->vq_split.ring.used->ring[desc_idx];
> - sw_ring = &vq->sw_ring[desc_idx];
> - sw_ring_end = &vq->sw_ring[vq->vq_nentries];
> + sw_ring = &vq->rxq.sw_ring[desc_idx];
Ditto
> + sw_ring_end = &vq->rxq.sw_ring[vq->vq_nentries];
>
> rte_prefetch_non_temporal(rused);
>
> diff --git a/drivers/net/virtio/virtio_rxtx_simple_sse.c
> b/drivers/net/virtio/virtio_rxtx_simple_sse.c
> index 7577f5e86d..ed608fbf2e 100644
> --- a/drivers/net/virtio/virtio_rxtx_simple_sse.c
> +++ b/drivers/net/virtio/virtio_rxtx_simple_sse.c
> @@ -101,8 +101,8 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf
> **rx_pkts,
>
> desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
> rused = &vq->vq_split.ring.used->ring[desc_idx];
> - sw_ring = &vq->sw_ring[desc_idx];
> - sw_ring_end = &vq->sw_ring[vq->vq_nentries];
> + sw_ring = &vq->rxq.sw_ring[desc_idx];
Ditto
Thanks,
Chenbo
> + sw_ring_end = &vq->rxq.sw_ring[vq->vq_nentries];
>
> rte_prefetch0(rused);
>
> diff --git a/drivers/net/virtio/virtqueue.c
> b/drivers/net/virtio/virtqueue.c
> index fb651a4ca3..7a84796513 100644
> --- a/drivers/net/virtio/virtqueue.c
> +++ b/drivers/net/virtio/virtqueue.c
> @@ -38,9 +38,9 @@ virtqueue_detach_unused(struct virtqueue *vq)
> continue;
> if (start > end && (idx >= start || idx < end))
> continue;
> - cookie = vq->sw_ring[idx];
> + cookie = vq->rxq.sw_ring[idx];
> if (cookie != NULL) {
> - vq->sw_ring[idx] = NULL;
> + vq->rxq.sw_ring[idx] = NULL;
> return cookie;
> }
> } else {
> @@ -100,7 +100,7 @@ virtqueue_rxvq_flush_split(struct virtqueue *vq)
> uep = &vq->vq_split.ring.used->ring[used_idx];
> if (hw->use_vec_rx) {
> desc_idx = used_idx;
> - rte_pktmbuf_free(vq->sw_ring[desc_idx]);
> + rte_pktmbuf_free(vq->rxq.sw_ring[desc_idx]);
> vq->vq_free_cnt++;
> } else if (hw->use_inorder_rx) {
> desc_idx = (uint16_t)uep->id;
> diff --git a/drivers/net/virtio/virtqueue.h
> b/drivers/net/virtio/virtqueue.h
> index d453c3ec26..d7f8ee79bb 100644
> --- a/drivers/net/virtio/virtqueue.h
> +++ b/drivers/net/virtio/virtqueue.h
> @@ -206,7 +206,6 @@ struct virtqueue {
> * or virtual address for virtio_user. */
>
> uint16_t *notify_addr;
> - struct rte_mbuf **sw_ring; /**< RX software ring. */
> struct vq_desc_extra vq_descx[];
> };
>
> --
> 2.38.1
On 1/30/23 08:49, Xia, Chenbo wrote:
> Hi Maxime,
>
>> -----Original Message-----
>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
>> Sent: Wednesday, November 30, 2022 11:56 PM
>> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>;
>> david.marchand@redhat.com; eperezma@redhat.com
>> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
>> Subject: [PATCH v1 10/21] net/virtio: alloc Rx SW ring only if vectorized
>> path
>>
>> This patch only allocates the SW ring when vectorized
>> datapath is used. It also moves the SW ring and fake mbuf
>> in the virtnet_rx struct since this is Rx-only.
>>
>> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
>> ---
>> drivers/net/virtio/virtio_ethdev.c | 88 ++++++++++++-------
>> drivers/net/virtio/virtio_rxtx.c | 8 +-
>> drivers/net/virtio/virtio_rxtx.h | 4 +-
>> drivers/net/virtio/virtio_rxtx_simple.h | 2 +-
>> .../net/virtio/virtio_rxtx_simple_altivec.c | 4 +-
>> drivers/net/virtio/virtio_rxtx_simple_neon.c | 4 +-
>> drivers/net/virtio/virtio_rxtx_simple_sse.c | 4 +-
>> drivers/net/virtio/virtqueue.c | 6 +-
>> drivers/net/virtio/virtqueue.h | 1 -
>> 9 files changed, 72 insertions(+), 49 deletions(-)
>>
>> --- a/drivers/net/virtio/virtio_rxtx_simple_altivec.c
>> +++ b/drivers/net/virtio/virtio_rxtx_simple_altivec.c
>> @@ -103,8 +103,8 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf
>> **rx_pkts,
>>
>> desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
>> rused = &vq->vq_split.ring.used->ring[desc_idx];
>> - sw_ring = &vq->sw_ring[desc_idx];
>> - sw_ring_end = &vq->sw_ring[vq->vq_nentries];
>> + sw_ring = &vq->rxq.sw_ring[desc_idx];
>
> After sw_ring, there are two spaces, should be only one.
Right, it was here before but I fixed it in v2 here and elsewhere.
Thanks,
Maxime
@@ -339,6 +339,47 @@ virtio_free_queue_headers(struct virtqueue *vq)
*hdr_mem = 0;
}
+static int
+virtio_rxq_sw_ring_alloc(struct virtqueue *vq, int numa_node)
+{
+ void *sw_ring;
+ struct rte_mbuf *mbuf;
+ size_t size;
+
+ /* SW ring is only used with vectorized datapath */
+ if (!vq->hw->use_vec_rx)
+ return 0;
+
+ size = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq->vq_nentries) * sizeof(vq->rxq.sw_ring[0]);
+
+ sw_ring = rte_zmalloc_socket("sw_ring", size, RTE_CACHE_LINE_SIZE, numa_node);
+ if (!sw_ring) {
+ PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
+ return -ENOMEM;
+ }
+
+ mbuf = rte_zmalloc_socket("sw_ring", sizeof(*mbuf), RTE_CACHE_LINE_SIZE, numa_node);
+ if (!mbuf) {
+ PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
+ rte_free(sw_ring);
+ return -ENOMEM;
+ }
+
+ vq->rxq.sw_ring = sw_ring;
+ vq->rxq.fake_mbuf = mbuf;
+
+ return 0;
+}
+
+static void
+virtio_rxq_sw_ring_free(struct virtqueue *vq)
+{
+ rte_free(vq->rxq.fake_mbuf);
+ vq->rxq.fake_mbuf = NULL;
+ rte_free(vq->rxq.sw_ring);
+ vq->rxq.sw_ring = NULL;
+}
+
static int
virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
{
@@ -346,14 +387,11 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
const struct rte_memzone *mz = NULL;
unsigned int vq_size, size;
struct virtio_hw *hw = dev->data->dev_private;
- struct virtnet_rx *rxvq = NULL;
struct virtnet_ctl *cvq = NULL;
struct virtqueue *vq;
- void *sw_ring = NULL;
int queue_type = virtio_get_queue_type(hw, queue_idx);
int ret;
int numa_node = dev->device->numa_node;
- struct rte_mbuf *fake_mbuf = NULL;
PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %d",
queue_idx, numa_node);
@@ -441,28 +479,9 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
}
if (queue_type == VTNET_RQ) {
- size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
- sizeof(vq->sw_ring[0]);
-
- sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
- RTE_CACHE_LINE_SIZE, numa_node);
- if (!sw_ring) {
- PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
- ret = -ENOMEM;
+ ret = virtio_rxq_sw_ring_alloc(vq, numa_node);
+ if (ret)
goto free_hdr_mz;
- }
-
- fake_mbuf = rte_zmalloc_socket("sw_ring", sizeof(*fake_mbuf),
- RTE_CACHE_LINE_SIZE, numa_node);
- if (!fake_mbuf) {
- PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
- ret = -ENOMEM;
- goto free_sw_ring;
- }
-
- vq->sw_ring = sw_ring;
- rxvq = &vq->rxq;
- rxvq->fake_mbuf = fake_mbuf;
} else if (queue_type == VTNET_TQ) {
virtqueue_txq_indirect_headers_init(vq);
} else if (queue_type == VTNET_CQ) {
@@ -486,9 +505,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
clean_vq:
hw->cvq = NULL;
- rte_free(fake_mbuf);
-free_sw_ring:
- rte_free(sw_ring);
+ if (queue_type == VTNET_RQ)
+ virtio_rxq_sw_ring_free(vq);
free_hdr_mz:
virtio_free_queue_headers(vq);
free_mz:
@@ -519,7 +537,7 @@ virtio_free_queues(struct virtio_hw *hw)
queue_type = virtio_get_queue_type(hw, i);
if (queue_type == VTNET_RQ) {
rte_free(vq->rxq.fake_mbuf);
- rte_free(vq->sw_ring);
+ rte_free(vq->rxq.sw_ring);
}
virtio_free_queue_headers(vq);
@@ -2195,6 +2213,11 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
rte_spinlock_init(&hw->state_lock);
+ if (vectorized) {
+ hw->use_vec_rx = 1;
+ hw->use_vec_tx = 1;
+ }
+
/* reset device and negotiate default features */
ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
if (ret < 0)
@@ -2202,12 +2225,11 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
if (vectorized) {
if (!virtio_with_packed_queue(hw)) {
- hw->use_vec_rx = 1;
+ hw->use_vec_tx = 0;
} else {
-#if defined(CC_AVX512_SUPPORT) || defined(RTE_ARCH_ARM)
- hw->use_vec_rx = 1;
- hw->use_vec_tx = 1;
-#else
+#if !defined(CC_AVX512_SUPPORT) && !defined(RTE_ARCH_ARM)
+ hw->use_vec_rx = 0;
+ hw->use_vec_tx = 0;
PMD_DRV_LOG(INFO,
"building environment do not support packed ring vectorized");
#endif
@@ -737,9 +737,11 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
virtio_rxq_vec_setup(rxvq);
}
- memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
- for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++)
- vq->sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
+ if (hw->use_vec_rx) {
+ memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
+ for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++)
+ vq->rxq.sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
+ }
if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
@@ -18,8 +18,8 @@ struct virtnet_stats {
};
struct virtnet_rx {
- /* dummy mbuf, for wraparound when processing RX ring. */
- struct rte_mbuf *fake_mbuf;
+ struct rte_mbuf **sw_ring; /**< RX software ring. */
+ struct rte_mbuf *fake_mbuf; /**< dummy mbuf, for wraparound when processing RX ring. */
uint64_t mbuf_initializer; /**< value to init mbufs. */
struct rte_mempool *mpool; /**< mempool for mbuf allocation */
@@ -26,7 +26,7 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
- sw_ring = &vq->sw_ring[desc_idx];
+ sw_ring = &vq->rxq.sw_ring[desc_idx];
start_dp = &vq->vq_split.ring.desc[desc_idx];
ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring,
@@ -103,8 +103,8 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
rused = &vq->vq_split.ring.used->ring[desc_idx];
- sw_ring = &vq->sw_ring[desc_idx];
- sw_ring_end = &vq->sw_ring[vq->vq_nentries];
+ sw_ring = &vq->rxq.sw_ring[desc_idx];
+ sw_ring_end = &vq->rxq.sw_ring[vq->vq_nentries];
rte_prefetch0(rused);
@@ -101,8 +101,8 @@ virtio_recv_pkts_vec(void *rx_queue,
desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
rused = &vq->vq_split.ring.used->ring[desc_idx];
- sw_ring = &vq->sw_ring[desc_idx];
- sw_ring_end = &vq->sw_ring[vq->vq_nentries];
+ sw_ring = &vq->rxq.sw_ring[desc_idx];
+ sw_ring_end = &vq->rxq.sw_ring[vq->vq_nentries];
rte_prefetch_non_temporal(rused);
@@ -101,8 +101,8 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
rused = &vq->vq_split.ring.used->ring[desc_idx];
- sw_ring = &vq->sw_ring[desc_idx];
- sw_ring_end = &vq->sw_ring[vq->vq_nentries];
+ sw_ring = &vq->rxq.sw_ring[desc_idx];
+ sw_ring_end = &vq->rxq.sw_ring[vq->vq_nentries];
rte_prefetch0(rused);
@@ -38,9 +38,9 @@ virtqueue_detach_unused(struct virtqueue *vq)
continue;
if (start > end && (idx >= start || idx < end))
continue;
- cookie = vq->sw_ring[idx];
+ cookie = vq->rxq.sw_ring[idx];
if (cookie != NULL) {
- vq->sw_ring[idx] = NULL;
+ vq->rxq.sw_ring[idx] = NULL;
return cookie;
}
} else {
@@ -100,7 +100,7 @@ virtqueue_rxvq_flush_split(struct virtqueue *vq)
uep = &vq->vq_split.ring.used->ring[used_idx];
if (hw->use_vec_rx) {
desc_idx = used_idx;
- rte_pktmbuf_free(vq->sw_ring[desc_idx]);
+ rte_pktmbuf_free(vq->rxq.sw_ring[desc_idx]);
vq->vq_free_cnt++;
} else if (hw->use_inorder_rx) {
desc_idx = (uint16_t)uep->id;
@@ -206,7 +206,6 @@ struct virtqueue {
* or virtual address for virtio_user. */
uint16_t *notify_addr;
- struct rte_mbuf **sw_ring; /**< RX software ring. */
struct vq_desc_extra vq_descx[];
};