[v3,1/3] net/virtio_user: convert cq descriptor IOVA address to Virtual address
Checks
Commit Message
This patch modifies the code to convert descriptor buffer IOVA
addresses to virtual addresses during the processing of shadow
control queue when IOVA mode is PA. This change enables Virtio-user
to operate with IOVA as the descriptor buffer address.
Signed-off-by: Srujana Challa <schalla@marvell.com>
---
.../net/virtio/virtio_user/virtio_user_dev.c | 33 ++++++++++++-------
1 file changed, 21 insertions(+), 12 deletions(-)
Comments
On Wed, Jul 3, 2024 at 3:43 PM Srujana Challa <schalla@marvell.com> wrote:
>
> This patch modifies the code to convert descriptor buffer IOVA
> addresses to virtual addresses during the processing of shadow
> control queue when IOVA mode is PA. This change enables Virtio-user
> to operate with IOVA as the descriptor buffer address.
>
> Signed-off-by: Srujana Challa <schalla@marvell.com>
> ---
> .../net/virtio/virtio_user/virtio_user_dev.c | 33 ++++++++++++-------
> 1 file changed, 21 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
> index 1365c8a5c8..7f35f4b06b 100644
> --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
> +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
> @@ -896,6 +896,15 @@ virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
>
> #define CVQ_MAX_DATA_DESCS 32
>
> +static inline void *
> +virtio_user_iova2virt(rte_iova_t iova)
> +{
> + if (rte_eal_iova_mode() == RTE_IOVA_PA)
There is RTE_IOVA_DC as well. So we may put positive logic. i.e
rte_eal_iova_mode() == RTE_IOVA_VA
> + return rte_mem_iova2virt(iova);
> + else
> + return (void *)(uintptr_t)iova;
> +}
> +
> static uint32_t
> virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vring,
> uint16_t idx_hdr)
> @@ -921,17 +930,18 @@ virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vri
> idx_status = i;
> n_descs++;
>
> - hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
> + hdr = virtio_user_iova2virt(vring->desc[idx_hdr].addr);
> if (hdr->class == VIRTIO_NET_CTRL_MQ &&
> hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
> - uint16_t queues;
> + uint16_t queues, *addr;
>
> - queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
> + addr = virtio_user_iova2virt(vring->desc[idx_data].addr);
> + queues = *addr;
> status = virtio_user_handle_mq(dev, queues);
> } else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
> struct virtio_net_ctrl_rss *rss;
>
> - rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring->desc[idx_data].addr;
> + rss = virtio_user_iova2virt(vring->desc[idx_data].addr);
> status = virtio_user_handle_mq(dev, rss->max_tx_vq);
> } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
> hdr->class == VIRTIO_NET_CTRL_MAC ||
> @@ -944,7 +954,7 @@ virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vri
> (struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
>
> /* Update status */
> - *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
> + *(virtio_net_ctrl_ack *)virtio_user_iova2virt(vring->desc[idx_status].addr) = status;
>
> return n_descs;
> }
> @@ -986,18 +996,18 @@ virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
> n_descs++;
> }
>
> - hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
> + hdr = virtio_user_iova2virt(vring->desc[idx_hdr].addr);
> if (hdr->class == VIRTIO_NET_CTRL_MQ &&
> hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
> - uint16_t queues;
> + uint16_t queues, *addr;
>
> - queues = *(uint16_t *)(uintptr_t)
> - vring->desc[idx_data].addr;
> + addr = virtio_user_iova2virt(vring->desc[idx_data].addr);
> + queues = *addr;
> status = virtio_user_handle_mq(dev, queues);
> } else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
> struct virtio_net_ctrl_rss *rss;
>
> - rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring->desc[idx_data].addr;
> + rss = virtio_user_iova2virt(vring->desc[idx_data].addr);
> status = virtio_user_handle_mq(dev, rss->max_tx_vq);
> } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
> hdr->class == VIRTIO_NET_CTRL_MAC ||
> @@ -1010,8 +1020,7 @@ virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
> (struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
>
> /* Update status */
> - *(virtio_net_ctrl_ack *)(uintptr_t)
> - vring->desc[idx_status].addr = status;
> + *(virtio_net_ctrl_ack *)virtio_user_iova2virt(vring->desc[idx_status].addr) = status;
>
> /* Update used descriptor */
> vring->desc[idx_hdr].id = vring->desc[idx_status].id;
> --
> 2.25.1
>
On 7/3/24 12:19, Jerin Jacob wrote:
> On Wed, Jul 3, 2024 at 3:43 PM Srujana Challa <schalla@marvell.com> wrote:
>>
>> This patch modifies the code to convert descriptor buffer IOVA
>> addresses to virtual addresses during the processing of shadow
>> control queue when IOVA mode is PA. This change enables Virtio-user
>> to operate with IOVA as the descriptor buffer address.
>>
>> Signed-off-by: Srujana Challa <schalla@marvell.com>
>> ---
>> .../net/virtio/virtio_user/virtio_user_dev.c | 33 ++++++++++++-------
>> 1 file changed, 21 insertions(+), 12 deletions(-)
>>
>> diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
>> index 1365c8a5c8..7f35f4b06b 100644
>> --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
>> +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
>> @@ -896,6 +896,15 @@ virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
>>
>> #define CVQ_MAX_DATA_DESCS 32
>>
>> +static inline void *
>> +virtio_user_iova2virt(rte_iova_t iova)
>> +{
>> + if (rte_eal_iova_mode() == RTE_IOVA_PA)
>
> There is RTE_IOVA_DC as well. So we may put positive logic. i.e
> rte_eal_iova_mode() == RTE_IOVA_VA
Indeed, that would be better.
I can fix it while applying.
Thanks,
Maxime
>
>> + return rte_mem_iova2virt(iova);
>> + else
>> + return (void *)(uintptr_t)iova;
>> +}
>> +
>> static uint32_t
>> virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vring,
>> uint16_t idx_hdr)
>> @@ -921,17 +930,18 @@ virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vri
>> idx_status = i;
>> n_descs++;
>>
>> - hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
>> + hdr = virtio_user_iova2virt(vring->desc[idx_hdr].addr);
>> if (hdr->class == VIRTIO_NET_CTRL_MQ &&
>> hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
>> - uint16_t queues;
>> + uint16_t queues, *addr;
>>
>> - queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
>> + addr = virtio_user_iova2virt(vring->desc[idx_data].addr);
>> + queues = *addr;
>> status = virtio_user_handle_mq(dev, queues);
>> } else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
>> struct virtio_net_ctrl_rss *rss;
>>
>> - rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring->desc[idx_data].addr;
>> + rss = virtio_user_iova2virt(vring->desc[idx_data].addr);
>> status = virtio_user_handle_mq(dev, rss->max_tx_vq);
>> } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
>> hdr->class == VIRTIO_NET_CTRL_MAC ||
>> @@ -944,7 +954,7 @@ virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vri
>> (struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
>>
>> /* Update status */
>> - *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
>> + *(virtio_net_ctrl_ack *)virtio_user_iova2virt(vring->desc[idx_status].addr) = status;
>>
>> return n_descs;
>> }
>> @@ -986,18 +996,18 @@ virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
>> n_descs++;
>> }
>>
>> - hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
>> + hdr = virtio_user_iova2virt(vring->desc[idx_hdr].addr);
>> if (hdr->class == VIRTIO_NET_CTRL_MQ &&
>> hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
>> - uint16_t queues;
>> + uint16_t queues, *addr;
>>
>> - queues = *(uint16_t *)(uintptr_t)
>> - vring->desc[idx_data].addr;
>> + addr = virtio_user_iova2virt(vring->desc[idx_data].addr);
>> + queues = *addr;
>> status = virtio_user_handle_mq(dev, queues);
>> } else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
>> struct virtio_net_ctrl_rss *rss;
>>
>> - rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring->desc[idx_data].addr;
>> + rss = virtio_user_iova2virt(vring->desc[idx_data].addr);
>> status = virtio_user_handle_mq(dev, rss->max_tx_vq);
>> } else if (hdr->class == VIRTIO_NET_CTRL_RX ||
>> hdr->class == VIRTIO_NET_CTRL_MAC ||
>> @@ -1010,8 +1020,7 @@ virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
>> (struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
>>
>> /* Update status */
>> - *(virtio_net_ctrl_ack *)(uintptr_t)
>> - vring->desc[idx_status].addr = status;
>> + *(virtio_net_ctrl_ack *)virtio_user_iova2virt(vring->desc[idx_status].addr) = status;
>>
>> /* Update used descriptor */
>> vring->desc[idx_hdr].id = vring->desc[idx_status].id;
>> --
>> 2.25.1
>>
>
On 7/3/24 12:03, Srujana Challa wrote:
> This patch modifies the code to convert descriptor buffer IOVA
> addresses to virtual addresses during the processing of shadow
> control queue when IOVA mode is PA. This change enables Virtio-user
> to operate with IOVA as the descriptor buffer address.
>
> Signed-off-by: Srujana Challa <schalla@marvell.com>
> ---
> .../net/virtio/virtio_user/virtio_user_dev.c | 33 ++++++++++++-------
> 1 file changed, 21 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
> index 1365c8a5c8..7f35f4b06b 100644
> --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
> +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
> @@ -896,6 +896,15 @@ virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
>
> #define CVQ_MAX_DATA_DESCS 32
>
> +static inline void *
> +virtio_user_iova2virt(rte_iova_t iova)
> +{
> + if (rte_eal_iova_mode() == RTE_IOVA_PA)
> + return rte_mem_iova2virt(iova);
> + else
> + return (void *)(uintptr_t)iova;
> +}
> +
Thanks for the last minute change.
With Jerin suggestion:
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Maxime
Hello Srujana, Jerin,
On Wed, Jul 3, 2024 at 12:04 PM Srujana Challa <schalla@marvell.com> wrote:
>
> This patch modifies the code to convert descriptor buffer IOVA
> addresses to virtual addresses during the processing of shadow
> control queue when IOVA mode is PA. This change enables Virtio-user
> to operate with IOVA as the descriptor buffer address.
>
> Signed-off-by: Srujana Challa <schalla@marvell.com>
This patch triggers a segfault in testpmd when using virtio-user
(server) + vhost-user (client).
This was caught in OVS unit tests running in GHA virtual machines.
In such an env with no iommu, IOVA is forced to PA.
This can be reproduced with two testpmd in an env without iommu like a
vm, or alternatively forcing --iova-mode=pa in the cmdline:
$ rm -f vhost-user; gdb ./build/app/dpdk-testpmd -ex 'run -l 0-2
--in-memory --socket-mem=512 --single-file-segments --no-pci
--file-prefix virtio
--vdev=net_virtio_user,path=vhost-user,queues=2,server=1 -- -i'
...
EAL: Selected IOVA mode 'PA'
...
vhost_user_start_server(): (vhost-user) waiting for client connection...
$ ./build/app/dpdk-testpmd -l 0,3-4 --in-memory --socket-mem=512
--single-file-segments --no-pci --file-prefix vhost-user --vdev
net_vhost,iface=vhost-user,client=1 -- -i
...
EAL: Selected IOVA mode 'PA'
...
VHOST_CONFIG: (vhost-user) virtio is now ready for processing.
On the virtio-user side:
Thread 1 "dpdk-testpmd" received signal SIGSEGV, Segmentation fault.
0x0000000002f956ab in virtio_user_handle_ctrl_msg_split
(dev=0x11a01a8d00, vring=0x11a01a8aa0, idx_hdr=0) at
../drivers/net/virtio/virtio_user/virtio_user_dev.c:942
942 if (hdr->class == VIRTIO_NET_CTRL_MQ &&
(gdb) bt
#0 0x0000000002f956ab in virtio_user_handle_ctrl_msg_split
(dev=0x11a01a8d00, vring=0x11a01a8aa0, idx_hdr=0) at
../drivers/net/virtio/virtio_user/virtio_user_dev.c:942
#1 0x0000000002f95d06 in virtio_user_handle_cq_split
(dev=0x11a01a8d00, queue_idx=4) at
../drivers/net/virtio/virtio_user/virtio_user_dev.c:1087
#2 0x0000000002f95dba in virtio_user_handle_cq (dev=0x11a01a8d00,
queue_idx=4) at
../drivers/net/virtio/virtio_user/virtio_user_dev.c:1104
#3 0x0000000002f79af7 in virtio_user_notify_queue (hw=0x11a01a8d00,
vq=0x11a0181e00) at ../drivers/net/virtio/virtio_user_ethdev.c:278
#4 0x0000000002f45408 in virtqueue_notify (vq=0x11a0181e00) at
../drivers/net/virtio/virtqueue.h:525
#5 0x0000000002f45bf0 in virtio_control_queue_notify
(vq=0x11a0181e00, cookie=0x0) at
../drivers/net/virtio/virtio_ethdev.c:227
#6 0x0000000002f404a5 in virtio_send_command_split (cvq=0x11a0181e60,
ctrl=0x7fffffffc850, dlen=0x7fffffffc84c, pkt_num=1) at
../drivers/net/virtio/virtio_cvq.c:158
#7 0x0000000002f407a7 in virtio_send_command (cvq=0x11a0181e60,
ctrl=0x7fffffffc850, dlen=0x7fffffffc84c, pkt_num=1) at
../drivers/net/virtio/virtio_cvq.c:224
#8 0x0000000002f45af7 in virtio_set_multiple_queues_auto
(dev=0x4624b80 <rte_eth_devices>, nb_queues=1) at
../drivers/net/virtio/virtio_ethdev.c:192
#9 0x0000000002f45b99 in virtio_set_multiple_queues (dev=0x4624b80
<rte_eth_devices>, nb_queues=1) at
../drivers/net/virtio/virtio_ethdev.c:210
#10 0x0000000002f4ad2d in virtio_dev_start (dev=0x4624b80
<rte_eth_devices>) at ../drivers/net/virtio/virtio_ethdev.c:2385
#11 0x0000000000aa4336 in rte_eth_dev_start (port_id=0) at
../lib/ethdev/rte_ethdev.c:1752
#12 0x00000000005984f7 in eth_dev_start_mp (port_id=0) at
../app/test-pmd/testpmd.c:642
#13 0x000000000059ddb7 in start_port (pid=65535) at
../app/test-pmd/testpmd.c:3269
#14 0x00000000005a0eea in main (argc=2, argv=0x7fffffffdfe0) at
../app/test-pmd/testpmd.c:4644
(gdb) l
937 /* locate desc for status */
938 idx_status = i;
939 n_descs++;
940
941 hdr = virtio_user_iova2virt(vring->desc[idx_hdr].addr);
942 if (hdr->class == VIRTIO_NET_CTRL_MQ &&
943 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
944 uint16_t queues, *addr;
945
946 addr = virtio_user_iova2virt(vring->desc[idx_data].addr);
(gdb) p hdr
$1 = (struct virtio_net_ctrl_hdr *) 0x0
We need someone from Marvell to fix this issue.
The next option is to revert the whole series (which was discussed and
agreed before Maxime went off, in the event this series would trigger
any issue).
> Hello Srujana, Jerin,
>
> On Wed, Jul 3, 2024 at 12:04 PM Srujana Challa <schalla@marvell.com>
> wrote:
> >
> > This patch modifies the code to convert descriptor buffer IOVA
> > addresses to virtual addresses during the processing of shadow control
> > queue when IOVA mode is PA. This change enables Virtio-user to operate
> > with IOVA as the descriptor buffer address.
> >
> > Signed-off-by: Srujana Challa <schalla@marvell.com>
>
> This patch triggers a segfault in testpmd when using virtio-user
> (server) + vhost-user (client).
> This was caught in OVS unit tests running in GHA virtual machines.
> In such an env with no iommu, IOVA is forced to PA.
>
> This can be reproduced with two testpmd in an env without iommu like a vm,
> or alternatively forcing --iova-mode=pa in the cmdline:
>
> $ rm -f vhost-user; gdb ./build/app/dpdk-testpmd -ex 'run -l 0-2 --in-memory
> --socket-mem=512 --single-file-segments --no-pci --file-prefix virtio
> --vdev=net_virtio_user,path=vhost-user,queues=2,server=1 -- -i'
> ...
> EAL: Selected IOVA mode 'PA'
> ...
> vhost_user_start_server(): (vhost-user) waiting for client connection...
>
> $ ./build/app/dpdk-testpmd -l 0,3-4 --in-memory --socket-mem=512 --single-
> file-segments --no-pci --file-prefix vhost-user --vdev
> net_vhost,iface=vhost-user,client=1 -- -i ...
> EAL: Selected IOVA mode 'PA'
> ...
> VHOST_CONFIG: (vhost-user) virtio is now ready for processing.
>
>
> On the virtio-user side:
>
> Thread 1 "dpdk-testpmd" received signal SIGSEGV, Segmentation fault.
> 0x0000000002f956ab in virtio_user_handle_ctrl_msg_split
> (dev=0x11a01a8d00, vring=0x11a01a8aa0, idx_hdr=0) at
> ../drivers/net/virtio/virtio_user/virtio_user_dev.c:942
> 942 if (hdr->class == VIRTIO_NET_CTRL_MQ &&
> (gdb) bt
> #0 0x0000000002f956ab in virtio_user_handle_ctrl_msg_split
> (dev=0x11a01a8d00, vring=0x11a01a8aa0, idx_hdr=0) at
> ../drivers/net/virtio/virtio_user/virtio_user_dev.c:942
> #1 0x0000000002f95d06 in virtio_user_handle_cq_split
> (dev=0x11a01a8d00, queue_idx=4) at
> ../drivers/net/virtio/virtio_user/virtio_user_dev.c:1087
> #2 0x0000000002f95dba in virtio_user_handle_cq (dev=0x11a01a8d00,
> queue_idx=4) at
> ../drivers/net/virtio/virtio_user/virtio_user_dev.c:1104
> #3 0x0000000002f79af7 in virtio_user_notify_queue (hw=0x11a01a8d00,
> vq=0x11a0181e00) at ../drivers/net/virtio/virtio_user_ethdev.c:278
> #4 0x0000000002f45408 in virtqueue_notify (vq=0x11a0181e00) at
> ../drivers/net/virtio/virtqueue.h:525
> #5 0x0000000002f45bf0 in virtio_control_queue_notify (vq=0x11a0181e00,
> cookie=0x0) at
> ../drivers/net/virtio/virtio_ethdev.c:227
> #6 0x0000000002f404a5 in virtio_send_command_split
> (cvq=0x11a0181e60, ctrl=0x7fffffffc850, dlen=0x7fffffffc84c, pkt_num=1) at
> ../drivers/net/virtio/virtio_cvq.c:158
> #7 0x0000000002f407a7 in virtio_send_command (cvq=0x11a0181e60,
> ctrl=0x7fffffffc850, dlen=0x7fffffffc84c, pkt_num=1) at
> ../drivers/net/virtio/virtio_cvq.c:224
> #8 0x0000000002f45af7 in virtio_set_multiple_queues_auto
> (dev=0x4624b80 <rte_eth_devices>, nb_queues=1) at
> ../drivers/net/virtio/virtio_ethdev.c:192
> #9 0x0000000002f45b99 in virtio_set_multiple_queues (dev=0x4624b80
> <rte_eth_devices>, nb_queues=1) at
> ../drivers/net/virtio/virtio_ethdev.c:210
> #10 0x0000000002f4ad2d in virtio_dev_start (dev=0x4624b80
> <rte_eth_devices>) at ../drivers/net/virtio/virtio_ethdev.c:2385
> #11 0x0000000000aa4336 in rte_eth_dev_start (port_id=0) at
> ../lib/ethdev/rte_ethdev.c:1752
> #12 0x00000000005984f7 in eth_dev_start_mp (port_id=0) at
> ../app/test-pmd/testpmd.c:642
> #13 0x000000000059ddb7 in start_port (pid=65535) at
> ../app/test-pmd/testpmd.c:3269
> #14 0x00000000005a0eea in main (argc=2, argv=0x7fffffffdfe0) at
> ../app/test-pmd/testpmd.c:4644
> (gdb) l
> 937 /* locate desc for status */
> 938 idx_status = i;
> 939 n_descs++;
> 940
> 941 hdr = virtio_user_iova2virt(vring->desc[idx_hdr].addr);
> 942 if (hdr->class == VIRTIO_NET_CTRL_MQ &&
> 943 hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
> 944 uint16_t queues, *addr;
> 945
> 946 addr = virtio_user_iova2virt(vring->desc[idx_data].addr);
> (gdb) p hdr
> $1 = (struct virtio_net_ctrl_hdr *) 0x0
>
>
> We need someone from Marvell to fix this issue.
> The next option is to revert the whole series (which was discussed and agreed
> before Maxime went off, in the event this series would trigger any issue).
Sure, we are looking into the issue.
Thanks.
>
> --
> David Marchand
Hello,
On Wed, Jul 3, 2024 at 12:19 PM Jerin Jacob <jerinjacobk@gmail.com> wrote:
>
> On Wed, Jul 3, 2024 at 3:43 PM Srujana Challa <schalla@marvell.com> wrote:
> >
> > This patch modifies the code to convert descriptor buffer IOVA
> > addresses to virtual addresses during the processing of shadow
> > control queue when IOVA mode is PA. This change enables Virtio-user
> > to operate with IOVA as the descriptor buffer address.
> >
> > Signed-off-by: Srujana Challa <schalla@marvell.com>
> > ---
> > .../net/virtio/virtio_user/virtio_user_dev.c | 33 ++++++++++++-------
> > 1 file changed, 21 insertions(+), 12 deletions(-)
> >
> > diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
> > index 1365c8a5c8..7f35f4b06b 100644
> > --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
> > +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
> > @@ -896,6 +896,15 @@ virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
> >
> > #define CVQ_MAX_DATA_DESCS 32
> >
> > +static inline void *
> > +virtio_user_iova2virt(rte_iova_t iova)
> > +{
> > + if (rte_eal_iova_mode() == RTE_IOVA_PA)
>
> There is RTE_IOVA_DC as well. So we may put positive logic. i.e
> rte_eal_iova_mode() == RTE_IOVA_VA
Buses can provide RTE_IOVA_VA, RTE_IOVA_PA or RTE_IOVA_DC hints to EAL.
https://doc.dpdk.org/guides/prog_guide/env_abstraction_layer.html#iova-mode-detection
But at the point drivers are probed (and as a consequence when
handling descriptors here), the iova mode is fixed to be either
RTE_IOVA_PA or RTE_IOVA_VA.
https://git.dpdk.org/dpdk/tree/lib/eal/linux/eal.c#n1077
https://git.dpdk.org/dpdk/tree/lib/eal/linux/eal.c#n1124
https://git.dpdk.org/dpdk/tree/lib/eal/linux/eal.c#n1288
@@ -896,6 +896,15 @@ virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
#define CVQ_MAX_DATA_DESCS 32
+static inline void *
+virtio_user_iova2virt(rte_iova_t iova)
+{
+ if (rte_eal_iova_mode() == RTE_IOVA_PA)
+ return rte_mem_iova2virt(iova);
+ else
+ return (void *)(uintptr_t)iova;
+}
+
static uint32_t
virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vring,
uint16_t idx_hdr)
@@ -921,17 +930,18 @@ virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vri
idx_status = i;
n_descs++;
- hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
+ hdr = virtio_user_iova2virt(vring->desc[idx_hdr].addr);
if (hdr->class == VIRTIO_NET_CTRL_MQ &&
hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
- uint16_t queues;
+ uint16_t queues, *addr;
- queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
+ addr = virtio_user_iova2virt(vring->desc[idx_data].addr);
+ queues = *addr;
status = virtio_user_handle_mq(dev, queues);
} else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
struct virtio_net_ctrl_rss *rss;
- rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring->desc[idx_data].addr;
+ rss = virtio_user_iova2virt(vring->desc[idx_data].addr);
status = virtio_user_handle_mq(dev, rss->max_tx_vq);
} else if (hdr->class == VIRTIO_NET_CTRL_RX ||
hdr->class == VIRTIO_NET_CTRL_MAC ||
@@ -944,7 +954,7 @@ virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring *vri
(struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
/* Update status */
- *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
+ *(virtio_net_ctrl_ack *)virtio_user_iova2virt(vring->desc[idx_status].addr) = status;
return n_descs;
}
@@ -986,18 +996,18 @@ virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
n_descs++;
}
- hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
+ hdr = virtio_user_iova2virt(vring->desc[idx_hdr].addr);
if (hdr->class == VIRTIO_NET_CTRL_MQ &&
hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
- uint16_t queues;
+ uint16_t queues, *addr;
- queues = *(uint16_t *)(uintptr_t)
- vring->desc[idx_data].addr;
+ addr = virtio_user_iova2virt(vring->desc[idx_data].addr);
+ queues = *addr;
status = virtio_user_handle_mq(dev, queues);
} else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
struct virtio_net_ctrl_rss *rss;
- rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring->desc[idx_data].addr;
+ rss = virtio_user_iova2virt(vring->desc[idx_data].addr);
status = virtio_user_handle_mq(dev, rss->max_tx_vq);
} else if (hdr->class == VIRTIO_NET_CTRL_RX ||
hdr->class == VIRTIO_NET_CTRL_MAC ||
@@ -1010,8 +1020,7 @@ virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
(struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
/* Update status */
- *(virtio_net_ctrl_ack *)(uintptr_t)
- vring->desc[idx_status].addr = status;
+ *(virtio_net_ctrl_ack *)virtio_user_iova2virt(vring->desc[idx_status].addr) = status;
/* Update used descriptor */
vring->desc[idx_hdr].id = vring->desc[idx_status].id;