From patchwork Tue Jun 15 14:15:11 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Jiang, Cheng1" X-Patchwork-Id: 94252 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id B61FAA0C44; Tue, 15 Jun 2021 16:31:36 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 8857641165; Tue, 15 Jun 2021 16:31:33 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 7FE6141164 for ; Tue, 15 Jun 2021 16:31:30 +0200 (CEST) IronPort-SDR: H0RImr4gaFpcp1s9IX5LUxnt/Euy3rmSPBOAJgmcOhU7Cu51t/CB7y0qIvs69c/QHlRTbsZNtm nYjIHAaGIn7A== X-IronPort-AV: E=McAfee;i="6200,9189,10015"; a="227472946" X-IronPort-AV: E=Sophos;i="5.83,275,1616482800"; d="scan'208";a="227472946" Received: from orsmga001.jf.intel.com ([10.7.209.18]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 15 Jun 2021 07:31:30 -0700 IronPort-SDR: S3rXea8j6jVsno5cSpkybbkCQxIXz9ZiE8pS3IsLr/yw8UEypbTnu4ONqnVWySVveCFHDp5GAz n1LGy9ZFISjA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.83,275,1616482800"; d="scan'208";a="484488295" Received: from dpdk_jiangcheng.sh.intel.com ([10.67.119.149]) by orsmga001.jf.intel.com with ESMTP; 15 Jun 2021 07:31:28 -0700 From: Cheng Jiang To: maxime.coquelin@redhat.com, Chenbo.Xia@intel.com Cc: dev@dpdk.org, jiayu.hu@intel.com, yvonnex.yang@intel.com, Cheng Jiang Date: Tue, 15 Jun 2021 14:15:11 +0000 Message-Id: <20210615141513.16163-2-cheng1.jiang@intel.com> X-Mailer: git-send-email 2.29.2 In-Reply-To: <20210615141513.16163-1-cheng1.jiang@intel.com> References: <20210602042802.31943-1-cheng1.jiang@intel.com> <20210615141513.16163-1-cheng1.jiang@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v2 1/3] vhost: add unsafe API to drain pkts in async vhost X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Applications need to stop DMA transfers and finish all the in-flight pkts when in VM memory hot-plug case and async vhost is used. This patch is to provide an unsafe API to drain in-flight pkts which are submitted to DMA engine in vhost async data path. Signed-off-by: Cheng Jiang --- lib/vhost/rte_vhost_async.h | 22 +++++++++ lib/vhost/version.map | 3 ++ lib/vhost/virtio_net.c | 90 +++++++++++++++++++++++++++---------- 3 files changed, 92 insertions(+), 23 deletions(-) diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h index 6faa31f5ad..041f40cf04 100644 --- a/lib/vhost/rte_vhost_async.h +++ b/lib/vhost/rte_vhost_async.h @@ -193,4 +193,26 @@ __rte_experimental uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id, struct rte_mbuf **pkts, uint16_t count); +/** + * This function checks async completion status and empty all pakcets + * for a specific vhost device queue. Packets which are inflight will + * be returned in an array. + * + * @note This function does not perform any locking + * + * @param vid + * id of vhost device to enqueue data + * @param queue_id + * queue id to enqueue data + * @param pkts + * blank array to get return packet pointer + * @param count + * size of the packet array + * @return + * num of packets returned + */ +__rte_experimental +uint16_t rte_vhost_drain_queue_thread_unsafe(int vid, uint16_t queue_id, + struct rte_mbuf **pkts, uint16_t count); + #endif /* _RTE_VHOST_ASYNC_H_ */ diff --git a/lib/vhost/version.map b/lib/vhost/version.map index 9103a23cd4..f480f188af 100644 --- a/lib/vhost/version.map +++ b/lib/vhost/version.map @@ -79,4 +79,7 @@ EXPERIMENTAL { # added in 21.05 rte_vhost_get_negotiated_protocol_features; + + # added in 21.08 + rte_vhost_drain_queue_thread_unsafe; }; diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index 8da8a86a10..793510974a 100644 --- a/lib/vhost/virtio_net.c +++ b/lib/vhost/virtio_net.c @@ -2082,36 +2082,18 @@ write_back_completed_descs_packed(struct vhost_virtqueue *vq, } while (nr_left > 0); } -uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id, +static __rte_always_inline uint16_t +vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts, uint16_t count) { - struct virtio_net *dev = get_device(vid); struct vhost_virtqueue *vq; uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0; uint16_t start_idx, pkts_idx, vq_size; struct async_inflight_info *pkts_info; uint16_t from, i; - if (!dev) - return 0; - - VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__); - if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) { - VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n", - dev->vid, __func__, queue_id); - return 0; - } - vq = dev->virtqueue[queue_id]; - if (unlikely(!vq->async_registered)) { - VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n", - dev->vid, __func__, queue_id); - return 0; - } - - rte_spinlock_lock(&vq->access_lock); - pkts_idx = vq->async_pkts_idx % vq->size; pkts_info = vq->async_pkts_info; vq_size = vq->size; @@ -2119,14 +2101,14 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id, vq_size, vq->async_pkts_inflight_n); if (count > vq->async_last_pkts_n) - n_pkts_cpl = vq->async_ops.check_completed_copies(vid, + n_pkts_cpl = vq->async_ops.check_completed_copies(dev->vid, queue_id, 0, count - vq->async_last_pkts_n); n_pkts_cpl += vq->async_last_pkts_n; n_pkts_put = RTE_MIN(count, n_pkts_cpl); if (unlikely(n_pkts_put == 0)) { vq->async_last_pkts_n = n_pkts_cpl; - goto done; + return 0; } if (vq_is_packed(dev)) { @@ -2165,12 +2147,74 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id, vq->last_async_desc_idx_split += n_descs; } -done: + return n_pkts_put; +} + +uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id, + struct rte_mbuf **pkts, uint16_t count) +{ + struct virtio_net *dev = get_device(vid); + struct vhost_virtqueue *vq; + uint16_t n_pkts_put = 0; + + if (!dev) + return 0; + + VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__); + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) { + VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n", + dev->vid, __func__, queue_id); + return 0; + } + + vq = dev->virtqueue[queue_id]; + + if (unlikely(!vq->async_registered)) { + VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n", + dev->vid, __func__, queue_id); + return 0; + } + + rte_spinlock_lock(&vq->access_lock); + + n_pkts_put = vhost_poll_enqueue_completed(dev, queue_id, pkts, count); + rte_spinlock_unlock(&vq->access_lock); return n_pkts_put; } +uint16_t rte_vhost_drain_queue_thread_unsafe(int vid, uint16_t queue_id, + struct rte_mbuf **pkts, uint16_t count) +{ + struct virtio_net *dev = get_device(vid); + struct vhost_virtqueue *vq; + uint16_t n_pkts = count; + + if (!dev) + return 0; + + VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__); + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) { + VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n", + dev->vid, __func__, queue_id); + return 0; + } + + vq = dev->virtqueue[queue_id]; + + if (unlikely(!vq->async_registered)) { + VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n", + dev->vid, __func__, queue_id); + return 0; + } + + while (count) + count -= vhost_poll_enqueue_completed(dev, queue_id, pkts, count); + + return n_pkts; +} + static __rte_always_inline uint32_t virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count,