From patchwork Wed Jun 3 06:02:20 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ouyang Changchun X-Patchwork-Id: 5091 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 3E40BC312; Wed, 3 Jun 2015 08:02:39 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id C3FE95A35 for ; Wed, 3 Jun 2015 08:02:37 +0200 (CEST) Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga102.jf.intel.com with ESMTP; 02 Jun 2015 23:02:37 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.13,545,1427785200"; d="scan'208";a="719812999" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by fmsmga001.fm.intel.com with ESMTP; 02 Jun 2015 23:02:36 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id t5362Yqa000303; Wed, 3 Jun 2015 14:02:34 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id t5362UBC012141; Wed, 3 Jun 2015 14:02:32 +0800 Received: (from couyang@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id t5362UH6012137; Wed, 3 Jun 2015 14:02:30 +0800 From: Ouyang Changchun To: dev@dpdk.org Date: Wed, 3 Jun 2015 14:02:20 +0800 Message-Id: <1433311341-12087-4-git-send-email-changchun.ouyang@intel.com> X-Mailer: git-send-email 1.7.12.2 In-Reply-To: <1433311341-12087-1-git-send-email-changchun.ouyang@intel.com> References: <1433235064-2773-1-git-send-email-changchun.ouyang@intel.com> <1433311341-12087-1-git-send-email-changchun.ouyang@intel.com> Subject: [dpdk-dev] [PATCH v5 3/4] lib_vhost: Extract function X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Extract codes into 2 common functions: update_secure_len which is used to accumulate the buffer len in the vring descriptors. and fill_buf_vec which is used to fill struct buf_vec. Changes in v5 - merge fill_buf_vec into update_secure_len - do both tasks in one-time loop Signed-off-by: Changchun Ouyang --- lib/librte_vhost/vhost_rxtx.c | 85 ++++++++++++++++++------------------------- 1 file changed, 36 insertions(+), 49 deletions(-) diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c index 5824ffc..5c31a88 100644 --- a/lib/librte_vhost/vhost_rxtx.c +++ b/lib/librte_vhost/vhost_rxtx.c @@ -437,6 +437,34 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t res_base_idx, return entry_success; } +static inline void __attribute__((always_inline)) +update_secure_len(struct vhost_virtqueue *vq, uint32_t id, + uint32_t *secure_len, uint32_t *vec_idx) +{ + uint16_t wrapped_idx = id & (vq->size - 1); + uint32_t idx = vq->avail->ring[wrapped_idx]; + uint8_t next_desc; + uint32_t len = *secure_len; + uint32_t vec_id = *vec_idx; + + do { + next_desc = 0; + len += vq->desc[idx].len; + vq->buf_vec[vec_id].buf_addr = vq->desc[idx].addr; + vq->buf_vec[vec_id].buf_len = vq->desc[idx].len; + vq->buf_vec[vec_id].desc_idx = idx; + vec_id++; + + if (vq->desc[idx].flags & VRING_DESC_F_NEXT) { + idx = vq->desc[idx].next; + next_desc = 1; + } + } while (next_desc); + + *secure_len = len; + *vec_idx = vec_id; +} + /* * This function works for mergeable RX. */ @@ -446,8 +474,8 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, { struct vhost_virtqueue *vq; uint32_t pkt_idx = 0, entry_success = 0; - uint16_t avail_idx, res_cur_idx; - uint16_t res_base_idx, res_end_idx; + uint16_t avail_idx; + uint16_t res_base_idx, res_cur_idx; uint8_t success = 0; LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_rx()\n", @@ -463,17 +491,16 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, return 0; for (pkt_idx = 0; pkt_idx < count; pkt_idx++) { - uint32_t secure_len = 0; - uint16_t need_cnt; - uint32_t vec_idx = 0; uint32_t pkt_len = pkts[pkt_idx]->pkt_len + vq->vhost_hlen; - uint16_t i, id; do { /* * As many data cores may want access to available * buffers, they need to be reserved. */ + uint32_t secure_len = 0; + uint32_t vec_idx = 0; + res_base_idx = vq->last_used_idx_res; res_cur_idx = res_base_idx; @@ -487,22 +514,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, dev->device_fh); return pkt_idx; } else { - uint16_t wrapped_idx = - (res_cur_idx) & (vq->size - 1); - uint32_t idx = - vq->avail->ring[wrapped_idx]; - uint8_t next_desc; - - do { - next_desc = 0; - secure_len += vq->desc[idx].len; - if (vq->desc[idx].flags & - VRING_DESC_F_NEXT) { - idx = vq->desc[idx].next; - next_desc = 1; - } - } while (next_desc); - + update_secure_len(vq, res_cur_idx, &secure_len, &vec_idx); res_cur_idx++; } } while (pkt_len > secure_len); @@ -513,33 +525,8 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, res_cur_idx); } while (success == 0); - id = res_base_idx; - need_cnt = res_cur_idx - res_base_idx; - - for (i = 0; i < need_cnt; i++, id++) { - uint16_t wrapped_idx = id & (vq->size - 1); - uint32_t idx = vq->avail->ring[wrapped_idx]; - uint8_t next_desc; - do { - next_desc = 0; - vq->buf_vec[vec_idx].buf_addr = - vq->desc[idx].addr; - vq->buf_vec[vec_idx].buf_len = - vq->desc[idx].len; - vq->buf_vec[vec_idx].desc_idx = idx; - vec_idx++; - - if (vq->desc[idx].flags & VRING_DESC_F_NEXT) { - idx = vq->desc[idx].next; - next_desc = 1; - } - } while (next_desc); - } - - res_end_idx = res_cur_idx; - entry_success = copy_from_mbuf_to_vring(dev, res_base_idx, - res_end_idx, pkts[pkt_idx]); + res_cur_idx, pkts[pkt_idx]); rte_compiler_barrier(); @@ -551,7 +538,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, rte_pause(); *(volatile uint16_t *)&vq->used->idx += entry_success; - vq->last_used_idx = res_end_idx; + vq->last_used_idx = res_cur_idx; /* flush used->idx update before we read avail->flags. */ rte_mb();