From patchwork Tue Oct 26 16:28:59 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Maxime Coquelin X-Patchwork-Id: 102958 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9F4C4A0547; Tue, 26 Oct 2021 18:30:25 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 41D614115B; Tue, 26 Oct 2021 18:29:45 +0200 (CEST) Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [216.205.24.124]) by mails.dpdk.org (Postfix) with ESMTP id 9831B41176 for ; Tue, 26 Oct 2021 18:29:43 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1635265783; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=9GE1+vHZP24jGbmqE4FH+mJdx+Hk2H23J3AmM0P6ewY=; b=b9aU9Q9tWvgPmX8NhyIoiT5VjF34JFwSRo2FOZ4UFRpOeY0J9pKJ4t9gqkFpFQppS3djYs cihft3OUbYGRgW457MUK5LClArmikfAUggXONOwY4qTACY8qXRkqi0MU8fMTlgRbaWhvxK OaCG4k7nRM3n+pxAYQY6paDogvVKKR0= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) (Using TLS) by relay.mimecast.com with ESMTP id us-mta-457-8v8bCNZVPSmkkRTwwlhG6w-1; Tue, 26 Oct 2021 12:29:39 -0400 X-MC-Unique: 8v8bCNZVPSmkkRTwwlhG6w-1 Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com [10.5.11.22]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 51F4C36259; Tue, 26 Oct 2021 16:29:38 +0000 (UTC) Received: from max-t490s.redhat.com (unknown [10.39.208.37]) by smtp.corp.redhat.com (Postfix) with ESMTP id 07CBE100E809; Tue, 26 Oct 2021 16:29:35 +0000 (UTC) From: Maxime Coquelin To: dev@dpdk.org, chenbo.xia@intel.com, jiayu.hu@intel.com, yuanx.wang@intel.com, wenwux.ma@intel.com, bruce.richardson@intel.com, john.mcnamara@intel.com Cc: Maxime Coquelin Date: Tue, 26 Oct 2021 18:28:59 +0200 Message-Id: <20211026162904.482987-11-maxime.coquelin@redhat.com> In-Reply-To: <20211026162904.482987-1-maxime.coquelin@redhat.com> References: <20211026162904.482987-1-maxime.coquelin@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22 Authentication-Results: relay.mimecast.com; auth=pass smtp.auth=CUSA124A263 smtp.mailfrom=maxime.coquelin@redhat.com X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Subject: [dpdk-dev] [PATCH v2 10/15] vhost: simplify async enqueue completion X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" vhost_poll_enqueue_completed() assumes some inflight packets could have been completed in a previous call but not returned to the application. But this is not the case, since check_completed_copies callback is never called with more than the current count as argument. In other words, async->last_pkts_n is always 0. Removing it greatly simplfies the function. Signed-off-by: Maxime Coquelin --- lib/vhost/vhost.h | 1 - lib/vhost/virtio_net.c | 76 ++++++++++++++++-------------------------- 2 files changed, 28 insertions(+), 49 deletions(-) diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h index 9afb75fa6e..ae5bc257cc 100644 --- a/lib/vhost/vhost.h +++ b/lib/vhost/vhost.h @@ -141,7 +141,6 @@ struct vhost_async { struct async_inflight_info *pkts_info; uint16_t pkts_idx; uint16_t pkts_inflight_n; - uint16_t last_pkts_n; union { struct vring_used_elem *descs_split; struct vring_used_elem_packed *buffers_packed; diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index dfe864b3e9..3206b3f816 100644 --- a/lib/vhost/virtio_net.c +++ b/lib/vhost/virtio_net.c @@ -1619,7 +1619,11 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, vq->shadow_used_idx); async->desc_idx_split += vq->shadow_used_idx; + async->pkts_idx += pkt_idx; + if (async->pkts_idx >= vq->size) + async->pkts_idx -= vq->size; + async->pkts_inflight_n += pkt_idx; vq->shadow_used_idx = 0; } @@ -1921,68 +1925,44 @@ static __rte_always_inline uint16_t vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts, uint16_t count) { - struct vhost_virtqueue *vq; - struct vhost_async *async; - struct async_inflight_info *pkts_info; + struct vhost_virtqueue *vq = dev->virtqueue[queue_id]; + struct vhost_async *async = vq->async; + struct async_inflight_info *pkts_info = async->pkts_info; int32_t n_cpl; - uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0; - uint16_t start_idx, pkts_idx, vq_size; - uint16_t from, i; + uint16_t n_descs = 0, n_buffers = 0; + uint16_t start_idx, from, i; - vq = dev->virtqueue[queue_id]; - async = vq->async; - pkts_idx = async->pkts_idx % vq->size; - pkts_info = async->pkts_info; - vq_size = vq->size; - start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx, - vq_size, async->pkts_inflight_n); - - if (count > async->last_pkts_n) { - n_cpl = async->ops.check_completed_copies(dev->vid, - queue_id, 0, count - async->last_pkts_n); - if (likely(n_cpl >= 0)) { - n_pkts_cpl = n_cpl; - } else { - VHOST_LOG_DATA(ERR, - "(%d) %s: failed to check completed copies for queue id %d.\n", + start_idx = virtio_dev_rx_async_get_info_idx(async->pkts_idx, + vq->size, async->pkts_inflight_n); + + n_cpl = async->ops.check_completed_copies(dev->vid, queue_id, 0, count); + if (unlikely(n_cpl < 0)) { + VHOST_LOG_DATA(ERR, "(%d) %s: failed to check completed copies for queue id %d.\n", dev->vid, __func__, queue_id); - n_pkts_cpl = 0; - } + return 0; } - n_pkts_cpl += async->last_pkts_n; - n_pkts_put = RTE_MIN(n_pkts_cpl, count); - if (unlikely(n_pkts_put == 0)) { - async->last_pkts_n = n_pkts_cpl; + if (n_cpl == 0) return 0; - } - if (vq_is_packed(dev)) { - for (i = 0; i < n_pkts_put; i++) { - from = (start_idx + i) % vq_size; - n_buffers += pkts_info[from].nr_buffers; - pkts[i] = pkts_info[from].mbuf; - } - } else { - for (i = 0; i < n_pkts_put; i++) { - from = (start_idx + i) & (vq_size - 1); - n_descs += pkts_info[from].descs; - pkts[i] = pkts_info[from].mbuf; - } + for (i = 0; i < n_cpl; i++) { + from = (start_idx + i) % vq->size; + /* Only used with packed ring */ + n_buffers += pkts_info[from].nr_buffers; + /* Only used with split ring */ + n_descs += pkts_info[from].descs; + pkts[i] = pkts_info[from].mbuf; } - async->last_pkts_n = n_pkts_cpl - n_pkts_put; - async->pkts_inflight_n -= n_pkts_put; + + async->pkts_inflight_n -= n_cpl; if (likely(vq->enabled && vq->access_ok)) { if (vq_is_packed(dev)) { write_back_completed_descs_packed(vq, n_buffers); - vhost_vring_call_packed(dev, vq); } else { write_back_completed_descs_split(vq, n_descs); - - __atomic_add_fetch(&vq->used->idx, n_descs, - __ATOMIC_RELEASE); + __atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE); vhost_vring_call_split(dev, vq); } } else { @@ -1995,7 +1975,7 @@ vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id, } } - return n_pkts_put; + return n_cpl; } uint16_t