From patchwork Mon Oct 23 09:55:18 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: David Marchand X-Patchwork-Id: 133170 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 6ABBE431E0; Mon, 23 Oct 2023 11:55:30 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 57DA240275; Mon, 23 Oct 2023 11:55:30 +0200 (CEST) Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by mails.dpdk.org (Postfix) with ESMTP id D356640270 for ; Mon, 23 Oct 2023 11:55:28 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1698054928; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: content-transfer-encoding:content-transfer-encoding; bh=N1Oi9GbuvPNxphCBlDtbgu+LW18f7I/m7eDUn6NpEmo=; b=eYNb1iKwbRnt6/l8NXq37pIV9y4QimZdoRqrJ3g1xYVfjlPJQ8wSGfoTSrNvpMpv2Lw1iR wIL7wkhOQciUpR5iZ50O7sH2KQosxwwMfAdrx3IrIl6ya/rZvVQVtkIhjvenGAs6zizv0/ wsYgI+EswKmjpc5/V9ZgxaithcCfHsI= Received: from mimecast-mx02.redhat.com (mimecast-mx02.redhat.com [66.187.233.88]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-140-SWJduy0sMiCVik1Czu6yow-1; Mon, 23 Oct 2023 05:55:26 -0400 X-MC-Unique: SWJduy0sMiCVik1Czu6yow-1 Received: from smtp.corp.redhat.com (int-mx10.intmail.prod.int.rdu2.redhat.com [10.11.54.10]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mimecast-mx02.redhat.com (Postfix) with ESMTPS id BF9511020208; Mon, 23 Oct 2023 09:55:25 +0000 (UTC) Received: from dmarchan.redhat.com (unknown [10.45.225.98]) by smtp.corp.redhat.com (Postfix) with ESMTP id 05171492BFB; Mon, 23 Oct 2023 09:55:24 +0000 (UTC) From: David Marchand To: dev@dpdk.org Cc: Maxime Coquelin , Chenbo Xia Subject: [PATCH 1/3] vhost: robustify virtqueue access lock asserts Date: Mon, 23 Oct 2023 11:55:18 +0200 Message-ID: <20231023095520.2864868-1-david.marchand@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 3.4.1 on 10.11.54.10 X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org A simple comment in vhost_user_msg_handler() is not that robust. Add a lock_all_qps property to message handlers so that their implementation can add a build check and assert a vq is locked. Signed-off-by: David Marchand Acked-by: Eelco Chaudron Reviewed-by: Maxime Coquelin --- lib/vhost/vhost_user.c | 110 +++++++++++++++++++---------------------- 1 file changed, 51 insertions(+), 59 deletions(-) diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c index 901a80bbaa..5bbdbd54d8 100644 --- a/lib/vhost/vhost_user.c +++ b/lib/vhost/vhost_user.c @@ -56,14 +56,24 @@ #define INFLIGHT_ALIGNMENT 64 #define INFLIGHT_VERSION 0x1 -typedef struct vhost_message_handler { +typedef const struct vhost_message_handler { const char *description; int (*callback)(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd); bool accepts_fd; + bool lock_all_qps; } vhost_message_handler_t; static vhost_message_handler_t vhost_message_handlers[]; +/* vhost_user_msg_handler() locks all qps based on a handler's lock_all_qps. + * Later, a handler may need to ensure the vq has been locked (for example, + * when calling lock annotated helpers). + */ +#define VHOST_USER_ASSERT_LOCK(dev, vq, id) do { \ + RTE_BUILD_BUG_ON(!vhost_message_handlers[id].lock_all_qps); \ + vq_assert_lock(dev, vq); \ +} while (0) + static int send_vhost_reply(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx); static int read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx); @@ -400,7 +410,7 @@ vhost_user_set_features(struct virtio_net **pdev, cleanup_vq(vq, 1); cleanup_vq_inflight(dev, vq); /* vhost_user_lock_all_queue_pairs locked all qps */ - vq_assert_lock(dev, vq); + VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_FEATURES); rte_rwlock_write_unlock(&vq->access_lock); free_vq(dev, vq); } @@ -2224,7 +2234,7 @@ vhost_user_set_vring_enable(struct virtio_net **pdev, vq = dev->virtqueue[index]; if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) { /* vhost_user_lock_all_queue_pairs locked all qps */ - vq_assert_lock(dev, vq); + VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_VRING_ENABLE); if (enable && vq->async && vq->async->pkts_inflight_n) { VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to enable vring. Inflight packets must be completed first\n"); @@ -2829,41 +2839,43 @@ vhost_user_set_status(struct virtio_net **pdev, } #define VHOST_MESSAGE_HANDLERS \ -VHOST_MESSAGE_HANDLER(VHOST_USER_NONE, NULL, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_GET_FEATURES, vhost_user_get_features, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_FEATURES, vhost_user_set_features, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_OWNER, vhost_user_set_owner, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_RESET_OWNER, vhost_user_reset_owner, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_MEM_TABLE, vhost_user_set_mem_table, true) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_BASE, vhost_user_set_log_base, true) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_FD, vhost_user_set_log_fd, true) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_NUM, vhost_user_set_vring_num, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ADDR, vhost_user_set_vring_addr, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_BASE, vhost_user_set_vring_base, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_GET_VRING_BASE, vhost_user_get_vring_base, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_KICK, vhost_user_set_vring_kick, true) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_CALL, vhost_user_set_vring_call, true) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ERR, vhost_user_set_vring_err, true) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_GET_PROTOCOL_FEATURES, vhost_user_get_protocol_features, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_PROTOCOL_FEATURES, vhost_user_set_protocol_features, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_GET_QUEUE_NUM, vhost_user_get_queue_num, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ENABLE, vhost_user_set_vring_enable, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_SEND_RARP, vhost_user_send_rarp, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_NET_SET_MTU, vhost_user_net_set_mtu, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_BACKEND_REQ_FD, vhost_user_set_req_fd, true) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_IOTLB_MSG, vhost_user_iotlb_msg, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_GET_CONFIG, vhost_user_get_config, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_CONFIG, vhost_user_set_config, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_ADVISE, vhost_user_set_postcopy_advise, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_LISTEN, vhost_user_set_postcopy_listen, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_END, vhost_user_postcopy_end, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_GET_INFLIGHT_FD, vhost_user_get_inflight_fd, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_INFLIGHT_FD, vhost_user_set_inflight_fd, true) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_STATUS, vhost_user_set_status, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_GET_STATUS, vhost_user_get_status, false) - -#define VHOST_MESSAGE_HANDLER(id, handler, accepts_fd) \ - [id] = { #id, handler, accepts_fd }, +VHOST_MESSAGE_HANDLER(VHOST_USER_NONE, NULL, false, false) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_GET_FEATURES, vhost_user_get_features, false, false) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_FEATURES, vhost_user_set_features, false, true) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_OWNER, vhost_user_set_owner, false, true) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_RESET_OWNER, vhost_user_reset_owner, false, false) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_MEM_TABLE, vhost_user_set_mem_table, true, true) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_BASE, vhost_user_set_log_base, true, true) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_FD, vhost_user_set_log_fd, true, true) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_NUM, vhost_user_set_vring_num, false, true) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ADDR, vhost_user_set_vring_addr, false, true) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_BASE, vhost_user_set_vring_base, false, true) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_GET_VRING_BASE, vhost_user_get_vring_base, false, false) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_KICK, vhost_user_set_vring_kick, true, true) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_CALL, vhost_user_set_vring_call, true, true) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ERR, vhost_user_set_vring_err, true, true) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_GET_PROTOCOL_FEATURES, vhost_user_get_protocol_features, \ + false, false) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_PROTOCOL_FEATURES, vhost_user_set_protocol_features, \ + false, true) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_GET_QUEUE_NUM, vhost_user_get_queue_num, false, false) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ENABLE, vhost_user_set_vring_enable, false, true) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_SEND_RARP, vhost_user_send_rarp, false, true) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_NET_SET_MTU, vhost_user_net_set_mtu, false, true) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_BACKEND_REQ_FD, vhost_user_set_req_fd, true, true) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_IOTLB_MSG, vhost_user_iotlb_msg, false, false) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_GET_CONFIG, vhost_user_get_config, false, false) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_CONFIG, vhost_user_set_config, false, false) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_ADVISE, vhost_user_set_postcopy_advise, false, false) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_LISTEN, vhost_user_set_postcopy_listen, false, false) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_POSTCOPY_END, vhost_user_postcopy_end, false, false) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_GET_INFLIGHT_FD, vhost_user_get_inflight_fd, false, false) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_INFLIGHT_FD, vhost_user_set_inflight_fd, true, false) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_STATUS, vhost_user_set_status, false, false) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_GET_STATUS, vhost_user_get_status, false, false) + +#define VHOST_MESSAGE_HANDLER(id, handler, accepts_fd, lock_all_qps) \ + [id] = { #id, handler, accepts_fd, lock_all_qps }, static vhost_message_handler_t vhost_message_handlers[] = { VHOST_MESSAGE_HANDLERS }; @@ -3131,31 +3143,11 @@ vhost_user_msg_handler(int vid, int fd) * inactive, so it is safe. Otherwise taking the access_lock * would cause a dead lock. */ - switch (request) { - case VHOST_USER_SET_FEATURES: - case VHOST_USER_SET_PROTOCOL_FEATURES: - case VHOST_USER_SET_OWNER: - case VHOST_USER_SET_MEM_TABLE: - case VHOST_USER_SET_LOG_BASE: - case VHOST_USER_SET_LOG_FD: - case VHOST_USER_SET_VRING_NUM: - case VHOST_USER_SET_VRING_ADDR: - case VHOST_USER_SET_VRING_BASE: - case VHOST_USER_SET_VRING_KICK: - case VHOST_USER_SET_VRING_CALL: - case VHOST_USER_SET_VRING_ERR: - case VHOST_USER_SET_VRING_ENABLE: - case VHOST_USER_SEND_RARP: - case VHOST_USER_NET_SET_MTU: - case VHOST_USER_SET_BACKEND_REQ_FD: + if (msg_handler->lock_all_qps) { if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) { vhost_user_lock_all_queue_pairs(dev); unlock_required = 1; } - break; - default: - break; - } handled = false; From patchwork Mon Oct 23 09:55:19 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: David Marchand X-Patchwork-Id: 133172 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id C7D75431E0; Mon, 23 Oct 2023 11:55:43 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0A27C427D9; Mon, 23 Oct 2023 11:55:39 +0200 (CEST) Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by mails.dpdk.org (Postfix) with ESMTP id B94D340270 for ; Mon, 23 Oct 2023 11:55:36 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1698054936; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=3X2rRgPeg4s/QNKmtmSB9TVw9KboTpbY8YAgeYZR1l8=; b=brZh3+j8Zw0IZmdVfVQHRcS1fvLrZeAu3WAPSjo+mpjUM0/1E5tNpPrZG8hCebNpmyuFhl i+lgdSnjpXCXWRQeEwhc68KPozs86mAOyLsSh9Uzz2k2kJABfHVjEaMVOgWsUID74/QJ99 1FtVYX0/X9/BfKrDZfonEheCIM/8Ngs= Received: from mimecast-mx02.redhat.com (mx-ext.redhat.com [66.187.233.73]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.3, cipher=TLS_AES_256_GCM_SHA384) id us-mta-32-KHJJXG8jPdm96Un6CtCmTA-1; Mon, 23 Oct 2023 05:55:29 -0400 X-MC-Unique: KHJJXG8jPdm96Un6CtCmTA-1 Received: from smtp.corp.redhat.com (int-mx01.intmail.prod.int.rdu2.redhat.com [10.11.54.1]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest SHA256) (No client certificate requested) by mimecast-mx02.redhat.com (Postfix) with ESMTPS id B7B831C07588; Mon, 23 Oct 2023 09:55:28 +0000 (UTC) Received: from dmarchan.redhat.com (unknown [10.45.225.98]) by smtp.corp.redhat.com (Postfix) with ESMTP id A7BA925C0; Mon, 23 Oct 2023 09:55:27 +0000 (UTC) From: David Marchand To: dev@dpdk.org Cc: stable@dpdk.org, Maxime Coquelin , Chenbo Xia , Eelco Chaudron Subject: [PATCH 2/3] vhost: fix virtqueue access lock in datapath Date: Mon, 23 Oct 2023 11:55:19 +0200 Message-ID: <20231023095520.2864868-2-david.marchand@redhat.com> In-Reply-To: <20231023095520.2864868-1-david.marchand@redhat.com> References: <20231023095520.2864868-1-david.marchand@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 3.4.1 on 10.11.54.1 X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Now that a r/w lock is used, the access_ok field should only be updated under a write lock. Since the datapath code only takes a read lock on the virtqueue to check access_ok, this lock must be released and a write lock taken before calling vring_translate(). Fixes: 03f77d66d966 ("vhost: change virtqueue access lock to a read/write one") Cc: stable@dpdk.org Signed-off-by: David Marchand Acked-by: Eelco Chaudron Acked-by: Eelco Chaudron Reviewed-by: Maxime Coquelin --- lib/vhost/virtio_net.c | 60 +++++++++++++++++++++++++++++++----------- 1 file changed, 44 insertions(+), 16 deletions(-) diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index 759a78e3e3..4116f79d4f 100644 --- a/lib/vhost/virtio_net.c +++ b/lib/vhost/virtio_net.c @@ -1694,6 +1694,17 @@ virtio_dev_rx_packed(struct virtio_net *dev, return pkt_idx; } +static void +virtio_dev_vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq) +{ + rte_rwlock_write_lock(&vq->access_lock); + vhost_user_iotlb_rd_lock(vq); + if (!vq->access_ok) + vring_translate(dev, vq); + vhost_user_iotlb_rd_unlock(vq); + rte_rwlock_write_unlock(&vq->access_lock); +} + static __rte_always_inline uint32_t virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mbuf **pkts, uint32_t count) @@ -1708,9 +1719,13 @@ virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq, vhost_user_iotlb_rd_lock(vq); - if (unlikely(!vq->access_ok)) - if (unlikely(vring_translate(dev, vq) < 0)) - goto out; + if (unlikely(!vq->access_ok)) { + vhost_user_iotlb_rd_unlock(vq); + rte_rwlock_read_unlock(&vq->access_lock); + + virtio_dev_vring_translate(dev, vq); + goto out_no_unlock; + } count = RTE_MIN((uint32_t)MAX_PKT_BURST, count); if (count == 0) @@ -1729,6 +1744,7 @@ virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq, out_access_unlock: rte_rwlock_read_unlock(&vq->access_lock); +out_no_unlock: return nb_tx; } @@ -2523,9 +2539,13 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, struct vhost_virtqueue *vq, vhost_user_iotlb_rd_lock(vq); - if (unlikely(!vq->access_ok)) - if (unlikely(vring_translate(dev, vq) < 0)) - goto out; + if (unlikely(!vq->access_ok)) { + vhost_user_iotlb_rd_unlock(vq); + rte_rwlock_read_unlock(&vq->access_lock); + + virtio_dev_vring_translate(dev, vq); + goto out_no_unlock; + } count = RTE_MIN((uint32_t)MAX_PKT_BURST, count); if (count == 0) @@ -2546,6 +2566,7 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, struct vhost_virtqueue *vq, out_access_unlock: rte_rwlock_write_unlock(&vq->access_lock); +out_no_unlock: return nb_tx; } @@ -3576,11 +3597,13 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, vhost_user_iotlb_rd_lock(vq); - if (unlikely(!vq->access_ok)) - if (unlikely(vring_translate(dev, vq) < 0)) { - count = 0; - goto out; - } + if (unlikely(!vq->access_ok)) { + vhost_user_iotlb_rd_unlock(vq); + rte_rwlock_read_unlock(&vq->access_lock); + + virtio_dev_vring_translate(dev, vq); + goto out_no_unlock; + } /* * Construct a RARP broadcast packet, and inject it to the "pkts" @@ -3641,6 +3664,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, if (unlikely(rarp_mbuf != NULL)) count += 1; +out_no_unlock: return count; } @@ -4190,11 +4214,14 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id, vhost_user_iotlb_rd_lock(vq); - if (unlikely(vq->access_ok == 0)) - if (unlikely(vring_translate(dev, vq) < 0)) { - count = 0; - goto out; - } + if (unlikely(vq->access_ok == 0)) { + vhost_user_iotlb_rd_unlock(vq); + rte_rwlock_read_unlock(&vq->access_lock); + + virtio_dev_vring_translate(dev, vq); + count = 0; + goto out_no_unlock; + } /* * Construct a RARP broadcast packet, and inject it to the "pkts" @@ -4260,5 +4287,6 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id, if (unlikely(rarp_mbuf != NULL)) count += 1; +out_no_unlock: return count; } From patchwork Mon Oct 23 09:55:20 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: David Marchand X-Patchwork-Id: 133171 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1D798431E0; Mon, 23 Oct 2023 11:55:38 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id CB02541140; Mon, 23 Oct 2023 11:55:37 +0200 (CEST) Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by mails.dpdk.org (Postfix) with ESMTP id BDC584113C for ; Mon, 23 Oct 2023 11:55:35 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1698054935; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=l8Np6/GCkdhp696d62/BtT5HaMrmmArBJ3ghgq/MMSA=; b=iwA9l226rgqpHZdoASjD1Pzx9BdusdKFYgOB1jDqsYrwaVxAF4RR9qQT13LvfiqzpIk3LI O0iDOnLEaQmL0g/EIpztUVmvKc8dC8d/uriaEuCDrQWSCOjJSrC3FO6hJVT6O68fpclzj0 rNJCPIpy1pWtxfwsE7JQeQL7J9NlB6g= Received: from mimecast-mx02.redhat.com (mx-ext.redhat.com [66.187.233.73]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-326-tpdFBlVVMQOzf_5r7XB66Q-1; Mon, 23 Oct 2023 05:55:31 -0400 X-MC-Unique: tpdFBlVVMQOzf_5r7XB66Q-1 Received: from smtp.corp.redhat.com (int-mx06.intmail.prod.int.rdu2.redhat.com [10.11.54.6]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mimecast-mx02.redhat.com (Postfix) with ESMTPS id 846CC382254C; Mon, 23 Oct 2023 09:55:31 +0000 (UTC) Received: from dmarchan.redhat.com (unknown [10.45.225.98]) by smtp.corp.redhat.com (Postfix) with ESMTP id C122E2166B26; Mon, 23 Oct 2023 09:55:30 +0000 (UTC) From: David Marchand To: dev@dpdk.org Cc: Maxime Coquelin , Chenbo Xia Subject: [PATCH 3/3] vhost: annotate virtqueue access checks Date: Mon, 23 Oct 2023 11:55:20 +0200 Message-ID: <20231023095520.2864868-3-david.marchand@redhat.com> In-Reply-To: <20231023095520.2864868-1-david.marchand@redhat.com> References: <20231023095520.2864868-1-david.marchand@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 3.4.1 on 10.11.54.6 X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Modifying vq->access_ok should be done with a write lock taken. Annotate vring_translate() and vring_invalidate() and add missing locks. Signed-off-by: David Marchand Acked-by: Eelco Chaudron --- lib/vhost/vduse.c | 4 ++++ lib/vhost/vhost.h | 7 +++++-- lib/vhost/vhost_user.c | 10 ++++++++++ 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/lib/vhost/vduse.c b/lib/vhost/vduse.c index 080b58f7de..e198eeef64 100644 --- a/lib/vhost/vduse.c +++ b/lib/vhost/vduse.c @@ -196,6 +196,7 @@ vduse_vring_setup(struct virtio_net *dev, unsigned int index) vq->size * sizeof(struct batch_copy_elem), RTE_CACHE_LINE_SIZE, 0); + rte_rwlock_write_lock(&vq->access_lock); vhost_user_iotlb_rd_lock(vq); if (vring_translate(dev, vq)) VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to translate vring %d addresses\n", @@ -206,6 +207,7 @@ vduse_vring_setup(struct virtio_net *dev, unsigned int index) "Failed to disable guest notifications on vring %d\n", index); vhost_user_iotlb_rd_unlock(vq); + rte_rwlock_write_unlock(&vq->access_lock); vq_efd.index = index; vq_efd.fd = vq->kickfd; @@ -259,7 +261,9 @@ vduse_vring_cleanup(struct virtio_net *dev, unsigned int index) close(vq->kickfd); vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; + rte_rwlock_write_lock(&vq->access_lock); vring_invalidate(dev, vq); + rte_rwlock_write_unlock(&vq->access_lock); rte_free(vq->batch_copy_elems); vq->batch_copy_elems = NULL; diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h index 5fc9035a1f..70d18bdfbf 100644 --- a/lib/vhost/vhost.h +++ b/lib/vhost/vhost.h @@ -295,7 +295,8 @@ struct vhost_virtqueue { #define VIRTIO_UNINITIALIZED_EVENTFD (-2) bool enabled; - bool access_ok; + /* Protected by vq->access_lock */ + bool access_ok __rte_guarded_var; bool ready; rte_rwlock_t access_lock; @@ -874,11 +875,13 @@ void *vhost_alloc_copy_ind_table(struct virtio_net *dev, uint64_t desc_addr, uint64_t desc_len) __rte_shared_locks_required(&vq->iotlb_lock); int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq) + __rte_exclusive_locks_required(&vq->access_lock) __rte_shared_locks_required(&vq->iotlb_lock); uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t log_addr) __rte_shared_locks_required(&vq->iotlb_lock); -void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq); +void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq) + __rte_exclusive_locks_required(&vq->access_lock); static __rte_always_inline uint64_t vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c index 5bbdbd54d8..cbe2222ef3 100644 --- a/lib/vhost/vhost_user.c +++ b/lib/vhost/vhost_user.c @@ -797,6 +797,8 @@ translate_ring_addresses(struct virtio_net **pdev, struct vhost_virtqueue **pvq) dev = *pdev; vq = *pvq; + vq_assert_lock(dev, vq); + if (vq->ring_addrs.flags & (1 << VHOST_VRING_F_LOG)) { vq->log_guest_addr = log_addr_to_gpa(dev, vq); @@ -934,6 +936,9 @@ vhost_user_set_vring_addr(struct virtio_net **pdev, /* addr->index refers to the queue index. The txq 1, rxq is 0. */ vq = dev->virtqueue[ctx->msg.payload.addr.index]; + /* vhost_user_lock_all_queue_pairs locked all qps */ + VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_VRING_ADDR); + access_ok = vq->access_ok; /* @@ -1446,6 +1451,9 @@ vhost_user_set_mem_table(struct virtio_net **pdev, continue; if (vq->desc || vq->avail || vq->used) { + /* vhost_user_lock_all_queue_pairs locked all qps */ + VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_SET_MEM_TABLE); + /* * If the memory table got updated, the ring addresses * need to be translated again as virtual addresses have @@ -2208,7 +2216,9 @@ vhost_user_get_vring_base(struct virtio_net **pdev, vhost_user_iotlb_flush_all(dev); + rte_rwlock_write_lock(&vq->access_lock); vring_invalidate(dev, vq); + rte_rwlock_write_unlock(&vq->access_lock); return RTE_VHOST_MSG_RESULT_REPLY; }