From patchwork Tue Aug 31 12:59:24 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Sunil Pai G X-Patchwork-Id: 97601 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 07EEAA0C46; Tue, 31 Aug 2021 15:00:43 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id BA94E4013F; Tue, 31 Aug 2021 15:00:42 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id 249FD40041 for ; Tue, 31 Aug 2021 15:00:40 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10092"; a="198701460" X-IronPort-AV: E=Sophos;i="5.84,366,1620716400"; d="scan'208";a="198701460" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 31 Aug 2021 06:00:39 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.84,366,1620716400"; d="scan'208";a="498283202" Received: from silpixa00400896.ir.intel.com ([10.243.22.68]) by fmsmga008.fm.intel.com with ESMTP; 31 Aug 2021 06:00:37 -0700 From: Sunil Pai G To: dev@dpdk.org Cc: harry.van.haaren@intel.com, Jiayu.Hu@intel.com, maxime.coquelin@redhat.com, sunil.pai.g@intel.com Date: Tue, 31 Aug 2021 12:59:24 +0000 Message-Id: <20210831125924.3353952-1-sunil.pai.g@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH RFC] examples/vhost: remove the callbacks in app X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Since the vhost library performs the dmadev offload, its no longer necessary to have them in app. Note to the reader: ------------------- The intent of this patch is to explore possible different approaches of async implementations. Please consider this patch for discussions only and not for merge/upstream. This patch is dependent on the series: https://patches.dpdk.org/project/dpdk/list/?series=18407. Signed-off-by: Sunil Pai G --- examples/vhost/ioat.c | 150 +++++++----------------------------------- examples/vhost/ioat.h | 39 +---------- examples/vhost/main.c | 27 ++++---- 3 files changed, 39 insertions(+), 177 deletions(-) diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c index 457f8171f0..1f0c35b338 100644 --- a/examples/vhost/ioat.c +++ b/examples/vhost/ioat.c @@ -3,27 +3,16 @@ */ #include -#ifdef RTE_RAW_IOAT -#include -#include + #include "ioat.h" #include "main.h" +#include -struct dma_for_vhost dma_bind[MAX_VHOST_DEVICE]; - -struct packet_tracker { - unsigned short size_track[MAX_ENQUEUED_SIZE]; - unsigned short next_read; - unsigned short next_write; - unsigned short last_remain; - unsigned short ioat_space; -}; - -struct packet_tracker cb_tracker[MAX_VHOST_DEVICE]; +struct dma_for_vhost dma_bind[MAX_VHOST_DEVICE] = {0}; int -open_ioat(const char *value) +open_dmadev(const char *value) { struct dma_for_vhost *dma_info = dma_bind; char *input = strndup(value, strlen(value) + 1); @@ -31,8 +20,10 @@ open_ioat(const char *value) char *ptrs[2]; char *start, *end, *substr; int64_t vid, vring_id; - struct rte_ioat_rawdev_config config; - struct rte_rawdev_info info = { .dev_private = &config }; + struct rte_dmadev_info dev_info = {0}; + struct rte_dmadev_conf dev_conf = {0}; + struct rte_dmadev_vchan_conf vchan_conf = {0}; + uint64_t capab = RTE_DMADEV_CAPA_MEM_TO_MEM | RTE_DMADEV_CAPA_OPS_COPY; char name[32]; int dev_id; int ret = 0; @@ -91,29 +82,34 @@ open_ioat(const char *value) rte_pci_device_name(&(dma_info + vid)->dmas[vring_id].addr, name, sizeof(name)); - dev_id = rte_rawdev_get_dev_id(name); + dev_id = rte_dmadev_get_dev_id(name); if (dev_id == (uint16_t)(-ENODEV) || dev_id == (uint16_t)(-EINVAL)) { ret = -1; goto out; } - if (rte_rawdev_info_get(dev_id, &info, sizeof(config)) < 0 || - strstr(info.driver_name, "ioat") == NULL) { + if (!rte_dmadev_info_get(dev_id, &dev_info)) { + if (!((dev_info.dev_capa & capab) && dev_info.max_vchans >= 1)) { + ret = -1; + goto out; + } + } + + dev_conf.nb_vchans = 1; + dev_conf.enable_silent = false; + if (rte_dmadev_configure(dev_id, &dev_conf)) { ret = -1; goto out; } + vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM; + vchan_conf.nb_desc = DMADEV_RING_SIZE; + ret = rte_dmadev_vchan_setup(dev_id, 0, &vchan_conf); + (dma_info + vid)->dmas[vring_id].dev_id = dev_id; (dma_info + vid)->dmas[vring_id].is_valid = true; - config.ring_size = IOAT_RING_SIZE; - config.hdls_disable = true; - if (rte_rawdev_configure(dev_id, &info, sizeof(config)) < 0) { - ret = -1; - goto out; - } - rte_rawdev_start(dev_id); - cb_tracker[dev_id].ioat_space = IOAT_RING_SIZE - 1; + rte_dmadev_start(dev_id); dma_info->nr++; i++; } @@ -122,101 +118,3 @@ open_ioat(const char *value) return ret; } -int32_t -ioat_transfer_data_cb(int vid, uint16_t queue_id, - struct rte_vhost_async_desc *descs, - struct rte_vhost_async_status *opaque_data, uint16_t count) -{ - uint32_t i_desc; - uint16_t dev_id = dma_bind[vid].dmas[queue_id * 2 + VIRTIO_RXQ].dev_id; - struct rte_vhost_iov_iter *src = NULL; - struct rte_vhost_iov_iter *dst = NULL; - unsigned long i_seg; - unsigned short mask = MAX_ENQUEUED_SIZE - 1; - unsigned short write = cb_tracker[dev_id].next_write; - - if (!opaque_data) { - for (i_desc = 0; i_desc < count; i_desc++) { - src = descs[i_desc].src; - dst = descs[i_desc].dst; - i_seg = 0; - if (cb_tracker[dev_id].ioat_space < src->nr_segs) - break; - while (i_seg < src->nr_segs) { - rte_ioat_enqueue_copy(dev_id, - (uintptr_t)(src->iov[i_seg].iov_base) - + src->offset, - (uintptr_t)(dst->iov[i_seg].iov_base) - + dst->offset, - src->iov[i_seg].iov_len, - 0, - 0); - i_seg++; - } - write &= mask; - cb_tracker[dev_id].size_track[write] = src->nr_segs; - cb_tracker[dev_id].ioat_space -= src->nr_segs; - write++; - } - } else { - /* Opaque data is not supported */ - return -1; - } - /* ring the doorbell */ - rte_ioat_perform_ops(dev_id); - cb_tracker[dev_id].next_write = write; - return i_desc; -} - -int32_t -ioat_check_completed_copies_cb(int vid, uint16_t queue_id, - struct rte_vhost_async_status *opaque_data, - uint16_t max_packets) -{ - if (!opaque_data) { - uintptr_t dump[255]; - int n_seg; - unsigned short read, write; - unsigned short nb_packet = 0; - unsigned short mask = MAX_ENQUEUED_SIZE - 1; - unsigned short i; - - uint16_t dev_id = dma_bind[vid].dmas[queue_id * 2 - + VIRTIO_RXQ].dev_id; - n_seg = rte_ioat_completed_ops(dev_id, 255, NULL, NULL, dump, dump); - if (n_seg < 0) { - RTE_LOG(ERR, - VHOST_DATA, - "fail to poll completed buf on IOAT device %u", - dev_id); - return 0; - } - if (n_seg == 0) - return 0; - - cb_tracker[dev_id].ioat_space += n_seg; - n_seg += cb_tracker[dev_id].last_remain; - - read = cb_tracker[dev_id].next_read; - write = cb_tracker[dev_id].next_write; - for (i = 0; i < max_packets; i++) { - read &= mask; - if (read == write) - break; - if (n_seg >= cb_tracker[dev_id].size_track[read]) { - n_seg -= cb_tracker[dev_id].size_track[read]; - read++; - nb_packet++; - } else { - break; - } - } - cb_tracker[dev_id].next_read = read; - cb_tracker[dev_id].last_remain = n_seg; - return nb_packet; - } - /* Opaque data is not supported */ - return -1; -} - -#endif /* RTE_RAW_IOAT */ diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h index 62e163c585..415e33d5a8 100644 --- a/examples/vhost/ioat.h +++ b/examples/vhost/ioat.h @@ -10,7 +10,7 @@ #include #define MAX_VHOST_DEVICE 1024 -#define IOAT_RING_SIZE 4096 +#define DMADEV_RING_SIZE 4096 #define MAX_ENQUEUED_SIZE 4096 struct dma_info { @@ -24,40 +24,5 @@ struct dma_for_vhost { uint16_t nr; }; -#ifdef RTE_RAW_IOAT -int open_ioat(const char *value); - -int32_t -ioat_transfer_data_cb(int vid, uint16_t queue_id, - struct rte_vhost_async_desc *descs, - struct rte_vhost_async_status *opaque_data, uint16_t count); - -int32_t -ioat_check_completed_copies_cb(int vid, uint16_t queue_id, - struct rte_vhost_async_status *opaque_data, - uint16_t max_packets); -#else -static int open_ioat(const char *value __rte_unused) -{ - return -1; -} - -static int32_t -ioat_transfer_data_cb(int vid __rte_unused, uint16_t queue_id __rte_unused, - struct rte_vhost_async_desc *descs __rte_unused, - struct rte_vhost_async_status *opaque_data __rte_unused, - uint16_t count __rte_unused) -{ - return -1; -} - -static int32_t -ioat_check_completed_copies_cb(int vid __rte_unused, - uint16_t queue_id __rte_unused, - struct rte_vhost_async_status *opaque_data __rte_unused, - uint16_t max_packets __rte_unused) -{ - return -1; -} -#endif +int open_dmadev(const char *value); #endif /* _IOAT_H_ */ diff --git a/examples/vhost/main.c b/examples/vhost/main.c index bc3d71c898..6d59d26534 100644 --- a/examples/vhost/main.c +++ b/examples/vhost/main.c @@ -96,6 +96,7 @@ static int builtin_net_driver; static int async_vhost_driver; static char *dma_type; +extern struct dma_for_vhost dma_bind[MAX_VHOST_DEVICE]; /* Specify timeout (in useconds) between retries on RX. */ static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US; @@ -200,7 +201,7 @@ static inline int open_dma(const char *value) { if (dma_type != NULL && strncmp(dma_type, "ioat", 4) == 0) - return open_ioat(value); + return open_dmadev(value); return -1; } @@ -848,9 +849,10 @@ complete_async_pkts(struct vhost_dev *vdev) { struct rte_mbuf *p_cpl[MAX_PKT_BURST]; uint16_t complete_count; + int dmadev_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id; complete_count = rte_vhost_poll_enqueue_completed(vdev->vid, - VIRTIO_RXQ, p_cpl, MAX_PKT_BURST); + VIRTIO_RXQ, p_cpl, MAX_PKT_BURST, dmadev_id); if (complete_count) { free_pkts(p_cpl, complete_count); __atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST); @@ -887,6 +889,7 @@ drain_vhost(struct vhost_dev *vdev) uint32_t buff_idx = rte_lcore_id() * MAX_VHOST_DEVICE + vdev->vid; uint16_t nr_xmit = vhost_txbuff[buff_idx]->len; struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table; + int dmadev_id = dma_bind[vdev->vid].dmas[ VIRTIO_RXQ].dev_id; if (builtin_net_driver) { ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit); @@ -897,7 +900,7 @@ drain_vhost(struct vhost_dev *vdev) complete_async_pkts(vdev); ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, - m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr); + m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr, dmadev_id); __atomic_add_fetch(&vdev->pkts_inflight, ret - cpu_cpl_nr, __ATOMIC_SEQ_CST); if (cpu_cpl_nr) @@ -1193,7 +1196,7 @@ drain_eth_rx(struct vhost_dev *vdev) { uint16_t rx_count, enqueue_count; struct rte_mbuf *pkts[MAX_PKT_BURST]; - + int dmadev_id = dma_bind[0].dmas[vdev->vmdq_rx_q * 2 + VIRTIO_RXQ].dev_id; rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q, pkts, MAX_PKT_BURST); @@ -1229,7 +1232,7 @@ drain_eth_rx(struct vhost_dev *vdev) complete_async_pkts(vdev); enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, pkts, rx_count, - m_cpu_cpl, &cpu_cpl_nr); + m_cpu_cpl, &cpu_cpl_nr, dmadev_id); __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count - cpu_cpl_nr, __ATOMIC_SEQ_CST); @@ -1360,7 +1363,7 @@ destroy_device(int vid) struct vhost_dev *vdev = NULL; int lcore; uint16_t i; - + int dmadev_id = dma_bind[vid].dmas[VIRTIO_RXQ].dev_id; TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { if (vdev->vid == vid) break; @@ -1410,7 +1413,7 @@ destroy_device(int vid) while (vdev->pkts_inflight) { n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, VIRTIO_RXQ, - m_cpl, vdev->pkts_inflight); + m_cpl, vdev->pkts_inflight, dmadev_id); free_pkts(m_cpl, n_pkt); __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST); } @@ -1487,18 +1490,13 @@ new_device(int vid) if (async_vhost_driver) { struct rte_vhost_async_config config = {0}; - struct rte_vhost_async_channel_ops channel_ops; if (dma_type != NULL && strncmp(dma_type, "ioat", 4) == 0) { - channel_ops.transfer_data = ioat_transfer_data_cb; - channel_ops.check_completed_copies = - ioat_check_completed_copies_cb; config.features = RTE_VHOST_ASYNC_INORDER; config.async_threshold = 256; - return rte_vhost_async_channel_register(vid, VIRTIO_RXQ, - config, &channel_ops); + return rte_vhost_async_channel_register(vid, VIRTIO_RXQ, config); } } @@ -1509,6 +1507,7 @@ static int vring_state_changed(int vid, uint16_t queue_id, int enable) { struct vhost_dev *vdev = NULL; + int dmadev_id = dma_bind[vid].dmas[queue_id * 2 + VIRTIO_RXQ].dev_id; TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { if (vdev->vid == vid) @@ -1527,7 +1526,7 @@ vring_state_changed(int vid, uint16_t queue_id, int enable) while (vdev->pkts_inflight) { n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, queue_id, - m_cpl, vdev->pkts_inflight); + m_cpl, vdev->pkts_inflight, dmadev_id); free_pkts(m_cpl, n_pkt); __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST); }