[17/40] net/virtio: move features definition to generic header
Checks
Commit Message
This patch moves all the Virtio definition to the generic
header. It also renames some helpers to no more reference
PCI.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
drivers/net/virtio/meson.build | 3 +-
drivers/net/virtio/virtio.c | 22 ++++
drivers/net/virtio/virtio.h | 94 +++++++++++++++
drivers/net/virtio/virtio_ethdev.c | 110 +++++++++---------
drivers/net/virtio/virtio_pci.c | 21 +---
drivers/net/virtio/virtio_pci.h | 90 --------------
drivers/net/virtio/virtio_ring.h | 2 +-
drivers/net/virtio/virtio_rxtx.c | 38 +++---
drivers/net/virtio/virtio_rxtx_packed_avx.c | 6 +-
.../net/virtio/virtio_user/vhost_kernel_tap.c | 2 +-
drivers/net/virtio/virtio_user_ethdev.c | 6 +-
drivers/net/virtio/virtqueue.c | 4 +-
drivers/net/virtio/virtqueue.h | 8 +-
13 files changed, 209 insertions(+), 197 deletions(-)
create mode 100644 drivers/net/virtio/virtio.c
Comments
Hi Maxime,
> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Monday, December 21, 2020 5:14 AM
> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>; olivier.matz@6wind.com;
> amorenoz@redhat.com; david.marchand@redhat.com
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Subject: [PATCH 17/40] net/virtio: move features definition to generic header
>
> This patch moves all the Virtio definition to the generic
> header. It also renames some helpers to no more reference
> PCI.
>
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
> drivers/net/virtio/meson.build | 3 +-
> drivers/net/virtio/virtio.c | 22 ++++
> drivers/net/virtio/virtio.h | 94 +++++++++++++++
> drivers/net/virtio/virtio_ethdev.c | 110 +++++++++---------
> drivers/net/virtio/virtio_pci.c | 21 +---
> drivers/net/virtio/virtio_pci.h | 90 --------------
> drivers/net/virtio/virtio_ring.h | 2 +-
> drivers/net/virtio/virtio_rxtx.c | 38 +++---
> drivers/net/virtio/virtio_rxtx_packed_avx.c | 6 +-
> .../net/virtio/virtio_user/vhost_kernel_tap.c | 2 +-
> drivers/net/virtio/virtio_user_ethdev.c | 6 +-
> drivers/net/virtio/virtqueue.c | 4 +-
> drivers/net/virtio/virtqueue.h | 8 +-
> 13 files changed, 209 insertions(+), 197 deletions(-)
> create mode 100644 drivers/net/virtio/virtio.c
>
> diff --git a/drivers/net/virtio/meson.build b/drivers/net/virtio/meson.build
> index 0b62418f33..7de41cd04d 100644
> --- a/drivers/net/virtio/meson.build
> +++ b/drivers/net/virtio/meson.build
> @@ -1,7 +1,8 @@
> # SPDX-License-Identifier: BSD-3-Clause
> # Copyright(c) 2018 Intel Corporation
>
> -sources += files('virtio_ethdev.c',
> +sources += files('virtio.c',
> + 'virtio_ethdev.c',
Better align the file names 😊
> 'virtio_pci_ethdev.c',
> 'virtio_pci.c',
> 'virtio_rxtx.c',
> diff --git a/drivers/net/virtio/virtio.c b/drivers/net/virtio/virtio.c
> new file mode 100644
> index 0000000000..d8d6bf7add
> --- /dev/null
> +++ b/drivers/net/virtio/virtio.c
> @@ -0,0 +1,22 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2010-2014 Intel Corporation
> + * Copyright(c) 2020 Red Hat, Inc.
> + */
> +
> +#include "virtio.h"
> +
> +uint64_t
> +virtio_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
> +{
> + uint64_t features;
[snip]
> @@ -1664,9 +1664,9 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t
> req_features)
> eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
>
> /* Setting up rx_header size for the device */
> - if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
> - vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
> - vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))
> + if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
> + virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
> + virtio_with_feature(hw, VIRTIO_F_RING_PACKED))
There are mixed usage of virtio_with_packed_queue and virtio_with_features(hw,
VIRTIO_F_RING_PACKED). I think we should use only one. Since virtio_with_packed_queue
is introduced, should we only keep this one? What do you think?
Thanks
Chenbo
> hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
> else
> hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
> @@ -1681,7 +1681,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t
> req_features)
> hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
>
> if (hw->speed == ETH_SPEED_NUM_UNKNOWN) {
> - if (vtpci_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX)) {
> + if (virtio_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX)) {
> config = &local_config;
> vtpci_read_dev_config(hw,
> offsetof(struct virtio_net_config, speed),
> @@ -1697,14 +1697,14 @@ virtio_init_device(struct rte_eth_dev *eth_dev,
> uint64_t req_features)
> hw->duplex = ETH_LINK_FULL_DUPLEX;
> PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
> hw->speed, hw->duplex);
> - if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
> + if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
> config = &local_config;
>
> vtpci_read_dev_config(hw,
> offsetof(struct virtio_net_config, mac),
> &config->mac, sizeof(config->mac));
>
> - if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
> + if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
> vtpci_read_dev_config(hw,
> offsetof(struct virtio_net_config, status),
> &config->status, sizeof(config->status));
> @@ -1714,7 +1714,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t
> req_features)
> config->status = 0;
> }
>
> - if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
> + if (virtio_with_feature(hw, VIRTIO_NET_F_MQ)) {
> vtpci_read_dev_config(hw,
> offsetof(struct virtio_net_config,
> max_virtqueue_pairs),
> &config->max_virtqueue_pairs,
> @@ -1727,7 +1727,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t
> req_features)
>
> hw->max_queue_pairs = config->max_virtqueue_pairs;
>
> - if (vtpci_with_feature(hw, VIRTIO_NET_F_MTU)) {
> + if (virtio_with_feature(hw, VIRTIO_NET_F_MTU)) {
> vtpci_read_dev_config(hw,
> offsetof(struct virtio_net_config, mtu),
> &config->mtu,
> @@ -1838,7 +1838,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
> goto err_virtio_init;
>
> if (vectorized) {
> - if (!vtpci_packed_queue(hw)) {
> + if (!virtio_with_packed_queue(hw)) {
> hw->use_vec_rx = 1;
> } else {
> #if !defined(CC_AVX512_SUPPORT)
> @@ -1965,17 +1965,17 @@ virtio_dev_devargs_parse(struct rte_devargs *devargs,
> uint32_t *speed, int *vect
> static bool
> rx_offload_enabled(struct virtio_hw *hw)
> {
> - return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
> - vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
> - vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
> + return virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
> + virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
> + virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
> }
>
> static bool
> tx_offload_enabled(struct virtio_hw *hw)
> {
> - return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
> - vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
> - vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
> + return virtio_with_feature(hw, VIRTIO_NET_F_CSUM) ||
> + virtio_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
> + virtio_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
> }
>
> /*
> @@ -2048,29 +2048,29 @@ virtio_dev_configure(struct rte_eth_dev *dev)
>
> if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
> DEV_RX_OFFLOAD_TCP_CKSUM)) &&
> - !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
> + !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
> PMD_DRV_LOG(ERR,
> "rx checksum not available on this host");
> return -ENOTSUP;
> }
>
> if ((rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) &&
> - (!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
> - !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
> + (!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
> + !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
> PMD_DRV_LOG(ERR,
> "Large Receive Offload not available on this host");
> return -ENOTSUP;
> }
>
> /* start control queue */
> - if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
> + if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
> virtio_dev_cq_start(dev);
>
> if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
> hw->vlan_strip = 1;
>
> - if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
> - && !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
> + if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
> + !virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
> PMD_DRV_LOG(ERR,
> "vlan filtering not available on this host");
> return -ENOTSUP;
> @@ -2087,12 +2087,12 @@ virtio_dev_configure(struct rte_eth_dev *dev)
> return -EBUSY;
> }
>
> - if (vtpci_packed_queue(hw)) {
> + if (virtio_with_packed_queue(hw)) {
> #if defined(RTE_ARCH_X86_64) && defined(CC_AVX512_SUPPORT)
> if ((hw->use_vec_rx || hw->use_vec_tx) &&
> (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) ||
> - !vtpci_with_feature(hw, VIRTIO_F_IN_ORDER) ||
> - !vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
> + !virtio_with_feature(hw, VIRTIO_F_IN_ORDER) ||
> + !virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
> rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)) {
> PMD_DRV_LOG(INFO,
> "disabled packed ring vectorized path for requirements
> not met");
> @@ -2105,7 +2105,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
> #endif
>
> if (hw->use_vec_rx) {
> - if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
> + if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
> PMD_DRV_LOG(INFO,
> "disabled packed ring vectorized rx for
> mrg_rxbuf enabled");
> hw->use_vec_rx = 0;
> @@ -2118,7 +2118,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
> }
> }
> } else {
> - if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
> + if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER)) {
> hw->use_inorder_tx = 1;
> hw->use_inorder_rx = 1;
> hw->use_vec_rx = 0;
> @@ -2132,7 +2132,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
> hw->use_vec_rx = 0;
> }
> #endif
> - if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
> + if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
> PMD_DRV_LOG(INFO,
> "disabled split ring vectorized rx for mrg_rxbuf
> enabled");
> hw->use_vec_rx = 0;
> @@ -2350,7 +2350,7 @@ virtio_dev_link_update(struct rte_eth_dev *dev,
> __rte_unused int wait_to_complet
> if (!hw->started) {
> link.link_status = ETH_LINK_DOWN;
> link.link_speed = ETH_SPEED_NUM_NONE;
> - } else if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
> + } else if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
> PMD_INIT_LOG(DEBUG, "Get link status from hw");
> vtpci_read_dev_config(hw,
> offsetof(struct virtio_net_config, status),
> @@ -2381,7 +2381,7 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int
> mask)
>
> if (mask & ETH_VLAN_FILTER_MASK) {
> if ((offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
> - !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
> + !virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
>
> PMD_DRV_LOG(NOTICE,
> "vlan filtering not available on this host");
> diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c
> index df69fcdd45..9c07ebad00 100644
> --- a/drivers/net/virtio/virtio_pci.c
> +++ b/drivers/net/virtio/virtio_pci.c
> @@ -356,7 +356,7 @@ modern_set_features(struct virtio_hw *hw, uint64_t
> features)
> static int
> modern_features_ok(struct virtio_hw *hw)
> {
> - if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
> + if (!virtio_with_feature(hw, VIRTIO_F_VERSION_1)) {
> PMD_INIT_LOG(ERR, "Version 1+ required with modern devices\n");
> return -1;
> }
> @@ -479,12 +479,12 @@ modern_notify_queue(struct virtio_hw *hw, struct
> virtqueue *vq)
> {
> uint32_t notify_data;
>
> - if (!vtpci_with_feature(hw, VIRTIO_F_NOTIFICATION_DATA)) {
> + if (!virtio_with_feature(hw, VIRTIO_F_NOTIFICATION_DATA)) {
> rte_write16(vq->vq_queue_index, vq->notify_addr);
> return;
> }
>
> - if (vtpci_with_feature(hw, VIRTIO_F_RING_PACKED)) {
> + if (virtio_with_feature(hw, VIRTIO_F_RING_PACKED)) {
> /*
> * Bit[0:15]: vq queue index
> * Bit[16:30]: avail index
> @@ -548,21 +548,6 @@ vtpci_write_dev_config(struct virtio_hw *hw, size_t
> offset,
> VIRTIO_OPS(hw)->write_dev_cfg(hw, offset, src, length);
> }
>
> -uint64_t
> -vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
> -{
> - uint64_t features;
> -
> - /*
> - * Limit negotiated features to what the driver, virtqueue, and
> - * host all support.
> - */
> - features = host_features & hw->guest_features;
> - VIRTIO_OPS(hw)->set_features(hw, features);
> -
> - return features;
> -}
> -
> void
> vtpci_reset(struct virtio_hw *hw)
> {
> diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
> index 8b07c4a369..b02e5c15f5 100644
> --- a/drivers/net/virtio/virtio_pci.h
> +++ b/drivers/net/virtio/virtio_pci.h
> @@ -79,83 +79,6 @@ struct virtnet_ctl;
> */
> #define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16))
>
> -/* The feature bitmap for virtio net */
> -#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
> -#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial
> csum */
> -#define VIRTIO_NET_F_MTU 3 /* Initial MTU advice. */
> -#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
> -#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
> -#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
> -#define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in.
> */
> -#define VIRTIO_NET_F_GUEST_UFO 10 /* Guest can handle UFO in. */
> -#define VIRTIO_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */
> -#define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */
> -#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in.
> */
> -#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */
> -#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers.
> */
> -#define VIRTIO_NET_F_STATUS 16 /* virtio_net_config.status available */
> -#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */
> -#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */
> -#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */
> -#define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */
> -#define VIRTIO_NET_F_GUEST_ANNOUNCE 21 /* Guest can announce device on the
> - * network */
> -#define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow
> - * Steering */
> -#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
> -
> -/* Do we get callbacks when the ring is completely used, even if we've
> - * suppressed them? */
> -#define VIRTIO_F_NOTIFY_ON_EMPTY 24
> -
> -/* Can the device handle any descriptor layout? */
> -#define VIRTIO_F_ANY_LAYOUT 27
> -
> -/* We support indirect buffer descriptors */
> -#define VIRTIO_RING_F_INDIRECT_DESC 28
> -
> -#define VIRTIO_F_VERSION_1 32
> -#define VIRTIO_F_IOMMU_PLATFORM 33
> -#define VIRTIO_F_RING_PACKED 34
> -
> -/*
> - * Some VirtIO feature bits (currently bits 28 through 31) are
> - * reserved for the transport being used (eg. virtio_ring), the
> - * rest are per-device feature bits.
> - */
> -#define VIRTIO_TRANSPORT_F_START 28
> -#define VIRTIO_TRANSPORT_F_END 34
> -
> -/*
> - * Inorder feature indicates that all buffers are used by the device
> - * in the same order in which they have been made available.
> - */
> -#define VIRTIO_F_IN_ORDER 35
> -
> -/*
> - * This feature indicates that memory accesses by the driver and the device
> - * are ordered in a way described by the platform.
> - */
> -#define VIRTIO_F_ORDER_PLATFORM 36
> -
> -/*
> - * This feature indicates that the driver passes extra data (besides
> - * identifying the virtqueue) in its device notifications.
> - */
> -#define VIRTIO_F_NOTIFICATION_DATA 38
> -
> -/* Device set linkspeed and duplex */
> -#define VIRTIO_NET_F_SPEED_DUPLEX 63
> -
> -/* The Guest publishes the used index for which it expects an interrupt
> - * at the end of the avail ring. Host should ignore the avail->flags field.
> */
> -/* The Host publishes the avail index for which it expects a kick
> - * at the end of the used ring. Guest should ignore the used->flags field. */
> -#define VIRTIO_RING_F_EVENT_IDX 29
> -
> -#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
> -#define VIRTIO_NET_S_ANNOUNCE 2 /* Announcement is needed */
> -
> /*
> * Maximum number of virtqueues per device.
> */
> @@ -271,17 +194,6 @@ enum virtio_msix_status {
> VIRTIO_MSIX_ENABLED = 2
> };
>
> -static inline int
> -vtpci_with_feature(struct virtio_hw *hw, uint64_t bit)
> -{
> - return (hw->guest_features & (1ULL << bit)) != 0;
> -}
> -
> -static inline int
> -vtpci_packed_queue(struct virtio_hw *hw)
> -{
> - return vtpci_with_feature(hw, VIRTIO_F_RING_PACKED);
> -}
>
> /*
> * Function declaration from virtio_pci.c
> @@ -294,8 +206,6 @@ void vtpci_reinit_complete(struct virtio_hw *);
> uint8_t vtpci_get_status(struct virtio_hw *);
> void vtpci_set_status(struct virtio_hw *, uint8_t);
>
> -uint64_t vtpci_negotiate_features(struct virtio_hw *, uint64_t);
> -
> void vtpci_write_dev_config(struct virtio_hw *, size_t, const void *, int);
>
> void vtpci_read_dev_config(struct virtio_hw *, size_t, void *, int);
> diff --git a/drivers/net/virtio/virtio_ring.h
> b/drivers/net/virtio/virtio_ring.h
> index 0f6574f684..17a56b0a73 100644
> --- a/drivers/net/virtio/virtio_ring.h
> +++ b/drivers/net/virtio/virtio_ring.h
> @@ -133,7 +133,7 @@ vring_size(struct virtio_hw *hw, unsigned int num,
> unsigned long align)
> {
> size_t size;
>
> - if (vtpci_packed_queue(hw)) {
> + if (virtio_with_packed_queue(hw)) {
> size = num * sizeof(struct vring_packed_desc);
> size += sizeof(struct vring_packed_desc_event);
> size = RTE_ALIGN_CEIL(size, align);
> diff --git a/drivers/net/virtio/virtio_rxtx.c
> b/drivers/net/virtio/virtio_rxtx.c
> index 93fe856cbd..10989118b0 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -685,14 +685,14 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev,
> uint16_t queue_idx)
> struct rte_mbuf *m;
> uint16_t desc_idx;
> int error, nbufs, i;
> - bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER);
> + bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
>
> PMD_INIT_FUNC_TRACE();
>
> /* Allocate blank mbufs for the each rx descriptor */
> nbufs = 0;
>
> - if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
> + if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
> for (desc_idx = 0; desc_idx < vq->vq_nentries;
> desc_idx++) {
> vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
> @@ -710,12 +710,12 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev,
> uint16_t queue_idx)
> &rxvq->fake_mbuf;
> }
>
> - if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
> + if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
> while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
> virtio_rxq_rearm_vec(rxvq);
> nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
> }
> - } else if (!vtpci_packed_queue(vq->hw) && in_order) {
> + } else if (!virtio_with_packed_queue(vq->hw) && in_order) {
> if ((!virtqueue_full(vq))) {
> uint16_t free_cnt = vq->vq_free_cnt;
> struct rte_mbuf *pkts[free_cnt];
> @@ -741,7 +741,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev,
> uint16_t queue_idx)
> break;
>
> /* Enqueue allocated buffers */
> - if (vtpci_packed_queue(vq->hw))
> + if (virtio_with_packed_queue(vq->hw))
> error = virtqueue_enqueue_recv_refill_packed(vq,
> &m, 1);
> else
> @@ -754,7 +754,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev,
> uint16_t queue_idx)
> nbufs++;
> }
>
> - if (!vtpci_packed_queue(vq->hw))
> + if (!virtio_with_packed_queue(vq->hw))
> vq_update_avail_idx(vq);
> }
>
> @@ -829,8 +829,8 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
>
> PMD_INIT_FUNC_TRACE();
>
> - if (!vtpci_packed_queue(hw)) {
> - if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER))
> + if (!virtio_with_packed_queue(hw)) {
> + if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
> vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
> }
>
> @@ -847,7 +847,7 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf
> *m)
> * Requeue the discarded mbuf. This should always be
> * successful since it was just dequeued.
> */
> - if (vtpci_packed_queue(vq->hw))
> + if (virtio_with_packed_queue(vq->hw))
> error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
> else
> error = virtqueue_enqueue_recv_refill(vq, &m, 1);
> @@ -1209,7 +1209,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
> ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
> - hdr_size);
>
> - if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
> + if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
> seg_num = header->num_buffers;
> if (seg_num == 0)
> seg_num = 1;
> @@ -1735,7 +1735,7 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf
> **tx_pkts,
> struct virtio_hw *hw = vq->hw;
> uint16_t hdr_size = hw->vtnet_hdr_size;
> uint16_t nb_tx = 0;
> - bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER);
> + bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
>
> if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
> return nb_tx;
> @@ -1754,8 +1754,8 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf
> **tx_pkts,
> int can_push = 0, use_indirect = 0, slots, need;
>
> /* optimize ring usage */
> - if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
> - vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
> + if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
> + virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
> rte_mbuf_refcnt_read(txm) == 1 &&
> RTE_MBUF_DIRECT(txm) &&
> txm->nb_segs == 1 &&
> @@ -1763,7 +1763,7 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf
> **tx_pkts,
> rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
> __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
> can_push = 1;
> - else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
> + else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
> txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
> use_indirect = 1;
> /* How many main ring entries are needed to this Tx?
> @@ -1835,8 +1835,8 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
> int can_push = 0, use_indirect = 0, slots, need;
>
> /* optimize ring usage */
> - if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
> - vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
> + if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
> + virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
> rte_mbuf_refcnt_read(txm) == 1 &&
> RTE_MBUF_DIRECT(txm) &&
> txm->nb_segs == 1 &&
> @@ -1844,7 +1844,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
> rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
> __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
> can_push = 1;
> - else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
> + else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
> txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
> use_indirect = 1;
>
> @@ -1937,8 +1937,8 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> int slots;
>
> /* optimize ring usage */
> - if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
> - vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
> + if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
> + virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
> rte_mbuf_refcnt_read(txm) == 1 &&
> RTE_MBUF_DIRECT(txm) &&
> txm->nb_segs == 1 &&
> diff --git a/drivers/net/virtio/virtio_rxtx_packed_avx.c
> b/drivers/net/virtio/virtio_rxtx_packed_avx.c
> index a6a49ec439..c272766a9f 100644
> --- a/drivers/net/virtio/virtio_rxtx_packed_avx.c
> +++ b/drivers/net/virtio/virtio_rxtx_packed_avx.c
> @@ -211,14 +211,14 @@ virtqueue_enqueue_single_packed_vec(struct virtnet_tx
> *txvq,
> int16_t need;
>
> /* optimize ring usage */
> - if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
> - vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
> + if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
> + virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
> rte_mbuf_refcnt_read(txm) == 1 &&
> RTE_MBUF_DIRECT(txm) &&
> txm->nb_segs == 1 &&
> rte_pktmbuf_headroom(txm) >= hdr_size)
> can_push = 1;
> - else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
> + else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
> txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
> use_indirect = 1;
> /* How many main ring entries are needed to this Tx?
> diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
> b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
> index 79b8446f8e..eade702c5c 100644
> --- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
> +++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
> @@ -16,7 +16,7 @@
>
> #include "vhost_kernel_tap.h"
> #include "../virtio_logs.h"
> -#include "../virtio_pci.h"
> +#include "../virtio.h"
>
> int
> vhost_kernel_tap_set_offload(int fd, uint64_t features)
> diff --git a/drivers/net/virtio/virtio_user_ethdev.c
> b/drivers/net/virtio/virtio_user_ethdev.c
> index 14468ddf52..d05613ba3b 100644
> --- a/drivers/net/virtio/virtio_user_ethdev.c
> +++ b/drivers/net/virtio/virtio_user_ethdev.c
> @@ -122,7 +122,7 @@ virtio_user_server_reconnect(struct virtio_user_dev *dev)
> dev->features &= dev->device_features;
>
> /* For packed ring, resetting queues is required in reconnection. */
> - if (vtpci_packed_queue(hw) &&
> + if (virtio_with_packed_queue(hw) &&
> (old_status & VIRTIO_CONFIG_STATUS_DRIVER_OK)) {
> PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped"
> " when packed ring reconnecting.");
> @@ -423,7 +423,7 @@ virtio_user_setup_queue(struct virtio_hw *hw, struct
> virtqueue *vq)
> {
> struct virtio_user_dev *dev = virtio_user_get_dev(hw);
>
> - if (vtpci_packed_queue(hw))
> + if (virtio_with_packed_queue(hw))
> virtio_user_setup_queue_packed(vq, dev);
> else
> virtio_user_setup_queue_split(vq, dev);
> @@ -456,7 +456,7 @@ virtio_user_notify_queue(struct virtio_hw *hw, struct
> virtqueue *vq)
> struct virtio_user_dev *dev = virtio_user_get_dev(hw);
>
> if (hw->cvq && (hw->cvq->vq == vq)) {
> - if (vtpci_packed_queue(vq->hw))
> + if (virtio_with_packed_queue(vq->hw))
> virtio_user_handle_cq_packed(dev, vq->vq_queue_index);
> else
> virtio_user_handle_cq(dev, vq->vq_queue_index);
> diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
> index 2702e120ee..59a2cb6599 100644
> --- a/drivers/net/virtio/virtqueue.c
> +++ b/drivers/net/virtio/virtqueue.c
> @@ -32,7 +32,7 @@ virtqueue_detach_unused(struct virtqueue *vq)
> end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1);
>
> for (idx = 0; idx < vq->vq_nentries; idx++) {
> - if (hw->use_vec_rx && !vtpci_packed_queue(hw) &&
> + if (hw->use_vec_rx && !virtio_with_packed_queue(hw) &&
> type == VTNET_RQ) {
> if (start <= end && idx >= start && idx < end)
> continue;
> @@ -137,7 +137,7 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
> {
> struct virtio_hw *hw = vq->hw;
>
> - if (vtpci_packed_queue(hw))
> + if (virtio_with_packed_queue(hw))
> virtqueue_rxvq_flush_packed(vq);
> else
> virtqueue_rxvq_flush_split(vq);
> diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
> index 9d2089766b..6c1df6f8e5 100644
> --- a/drivers/net/virtio/virtqueue.h
> +++ b/drivers/net/virtio/virtqueue.h
> @@ -12,7 +12,7 @@
> #include <rte_mempool.h>
> #include <rte_net.h>
>
> -#include "virtio_pci.h"
> +#include "virtio.h"
> #include "virtio_ring.h"
> #include "virtio_logs.h"
> #include "virtio_rxtx.h"
> @@ -386,7 +386,7 @@ virtqueue_disable_intr_split(struct virtqueue *vq)
> static inline void
> virtqueue_disable_intr(struct virtqueue *vq)
> {
> - if (vtpci_packed_queue(vq->hw))
> + if (virtio_with_packed_queue(vq->hw))
> virtqueue_disable_intr_packed(vq);
> else
> virtqueue_disable_intr_split(vq);
> @@ -420,7 +420,7 @@ virtqueue_enable_intr_split(struct virtqueue *vq)
> static inline void
> virtqueue_enable_intr(struct virtqueue *vq)
> {
> - if (vtpci_packed_queue(vq->hw))
> + if (virtio_with_packed_queue(vq->hw))
> virtqueue_enable_intr_packed(vq);
> else
> virtqueue_enable_intr_split(vq);
> @@ -573,7 +573,7 @@ virtqueue_notify(struct virtqueue *vq)
> used_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \
> __ATOMIC_RELAXED); \
> nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
> - if (vtpci_packed_queue((vq)->hw)) { \
> + if (virtio_with_packed_queue((vq)->hw)) { \
> PMD_INIT_LOG(DEBUG, \
> "VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \
> " cached_flags=0x%x; used_wrap_counter=%d", \
> --
> 2.29.2
On 12/30/20 4:14 AM, Xia, Chenbo wrote:
> Hi Maxime,
>
>> -----Original Message-----
>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
>> Sent: Monday, December 21, 2020 5:14 AM
>> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>; olivier.matz@6wind.com;
>> amorenoz@redhat.com; david.marchand@redhat.com
>> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
>> Subject: [PATCH 17/40] net/virtio: move features definition to generic header
>>
>> This patch moves all the Virtio definition to the generic
>> header. It also renames some helpers to no more reference
>> PCI.
>>
>> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
>> ---
>> drivers/net/virtio/meson.build | 3 +-
>> drivers/net/virtio/virtio.c | 22 ++++
>> drivers/net/virtio/virtio.h | 94 +++++++++++++++
>> drivers/net/virtio/virtio_ethdev.c | 110 +++++++++---------
>> drivers/net/virtio/virtio_pci.c | 21 +---
>> drivers/net/virtio/virtio_pci.h | 90 --------------
>> drivers/net/virtio/virtio_ring.h | 2 +-
>> drivers/net/virtio/virtio_rxtx.c | 38 +++---
>> drivers/net/virtio/virtio_rxtx_packed_avx.c | 6 +-
>> .../net/virtio/virtio_user/vhost_kernel_tap.c | 2 +-
>> drivers/net/virtio/virtio_user_ethdev.c | 6 +-
>> drivers/net/virtio/virtqueue.c | 4 +-
>> drivers/net/virtio/virtqueue.h | 8 +-
>> 13 files changed, 209 insertions(+), 197 deletions(-)
>> create mode 100644 drivers/net/virtio/virtio.c
>>
>> diff --git a/drivers/net/virtio/meson.build b/drivers/net/virtio/meson.build
>> index 0b62418f33..7de41cd04d 100644
>> --- a/drivers/net/virtio/meson.build
>> +++ b/drivers/net/virtio/meson.build
>> @@ -1,7 +1,8 @@
>> # SPDX-License-Identifier: BSD-3-Clause
>> # Copyright(c) 2018 Intel Corporation
>>
>> -sources += files('virtio_ethdev.c',
>> +sources += files('virtio.c',
>> + 'virtio_ethdev.c',
>
> Better align the file names 😊
Done.
>
>> 'virtio_pci_ethdev.c',
>> 'virtio_pci.c',
>> 'virtio_rxtx.c',
>> diff --git a/drivers/net/virtio/virtio.c b/drivers/net/virtio/virtio.c
>> new file mode 100644
>> index 0000000000..d8d6bf7add
>> --- /dev/null
>> +++ b/drivers/net/virtio/virtio.c
>> @@ -0,0 +1,22 @@
>> +/* SPDX-License-Identifier: BSD-3-Clause
>> + * Copyright(c) 2010-2014 Intel Corporation
>> + * Copyright(c) 2020 Red Hat, Inc.
>> + */
>> +
>> +#include "virtio.h"
>> +
>> +uint64_t
>> +virtio_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
>> +{
>> + uint64_t features;
>
> [snip]
>
>> @@ -1664,9 +1664,9 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t
>> req_features)
>> eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
>>
>> /* Setting up rx_header size for the device */
>> - if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
>> - vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
>> - vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))
>> + if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
>> + virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
>> + virtio_with_feature(hw, VIRTIO_F_RING_PACKED))
>
> There are mixed usage of virtio_with_packed_queue and virtio_with_features(hw,
> VIRTIO_F_RING_PACKED). I think we should use only one. Since virtio_with_packed_queue
> is introduced, should we only keep this one? What do you think?
Yes, it may be better to use one as it make it easier to grep for it.
I will apply your suggestion in v2.
Thanks!
Maxime
> Thanks
> Chenbo
>
>> hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
>> else
>> hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
>> @@ -1681,7 +1681,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t
>> req_features)
>> hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
>>
>> if (hw->speed == ETH_SPEED_NUM_UNKNOWN) {
>> - if (vtpci_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX)) {
>> + if (virtio_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX)) {
>> config = &local_config;
>> vtpci_read_dev_config(hw,
>> offsetof(struct virtio_net_config, speed),
>> @@ -1697,14 +1697,14 @@ virtio_init_device(struct rte_eth_dev *eth_dev,
>> uint64_t req_features)
>> hw->duplex = ETH_LINK_FULL_DUPLEX;
>> PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
>> hw->speed, hw->duplex);
>> - if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
>> + if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
>> config = &local_config;
>>
>> vtpci_read_dev_config(hw,
>> offsetof(struct virtio_net_config, mac),
>> &config->mac, sizeof(config->mac));
>>
>> - if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
>> + if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
>> vtpci_read_dev_config(hw,
>> offsetof(struct virtio_net_config, status),
>> &config->status, sizeof(config->status));
>> @@ -1714,7 +1714,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t
>> req_features)
>> config->status = 0;
>> }
>>
>> - if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
>> + if (virtio_with_feature(hw, VIRTIO_NET_F_MQ)) {
>> vtpci_read_dev_config(hw,
>> offsetof(struct virtio_net_config,
>> max_virtqueue_pairs),
>> &config->max_virtqueue_pairs,
>> @@ -1727,7 +1727,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t
>> req_features)
>>
>> hw->max_queue_pairs = config->max_virtqueue_pairs;
>>
>> - if (vtpci_with_feature(hw, VIRTIO_NET_F_MTU)) {
>> + if (virtio_with_feature(hw, VIRTIO_NET_F_MTU)) {
>> vtpci_read_dev_config(hw,
>> offsetof(struct virtio_net_config, mtu),
>> &config->mtu,
>> @@ -1838,7 +1838,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
>> goto err_virtio_init;
>>
>> if (vectorized) {
>> - if (!vtpci_packed_queue(hw)) {
>> + if (!virtio_with_packed_queue(hw)) {
>> hw->use_vec_rx = 1;
>> } else {
>> #if !defined(CC_AVX512_SUPPORT)
>> @@ -1965,17 +1965,17 @@ virtio_dev_devargs_parse(struct rte_devargs *devargs,
>> uint32_t *speed, int *vect
>> static bool
>> rx_offload_enabled(struct virtio_hw *hw)
>> {
>> - return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
>> - vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
>> - vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
>> + return virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
>> + virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
>> + virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
>> }
>>
>> static bool
>> tx_offload_enabled(struct virtio_hw *hw)
>> {
>> - return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
>> - vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
>> - vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
>> + return virtio_with_feature(hw, VIRTIO_NET_F_CSUM) ||
>> + virtio_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
>> + virtio_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
>> }
>>
>> /*
>> @@ -2048,29 +2048,29 @@ virtio_dev_configure(struct rte_eth_dev *dev)
>>
>> if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
>> DEV_RX_OFFLOAD_TCP_CKSUM)) &&
>> - !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
>> + !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
>> PMD_DRV_LOG(ERR,
>> "rx checksum not available on this host");
>> return -ENOTSUP;
>> }
>>
>> if ((rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) &&
>> - (!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
>> - !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
>> + (!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
>> + !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
>> PMD_DRV_LOG(ERR,
>> "Large Receive Offload not available on this host");
>> return -ENOTSUP;
>> }
>>
>> /* start control queue */
>> - if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
>> + if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
>> virtio_dev_cq_start(dev);
>>
>> if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
>> hw->vlan_strip = 1;
>>
>> - if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
>> - && !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
>> + if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
>> + !virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
>> PMD_DRV_LOG(ERR,
>> "vlan filtering not available on this host");
>> return -ENOTSUP;
>> @@ -2087,12 +2087,12 @@ virtio_dev_configure(struct rte_eth_dev *dev)
>> return -EBUSY;
>> }
>>
>> - if (vtpci_packed_queue(hw)) {
>> + if (virtio_with_packed_queue(hw)) {
>> #if defined(RTE_ARCH_X86_64) && defined(CC_AVX512_SUPPORT)
>> if ((hw->use_vec_rx || hw->use_vec_tx) &&
>> (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) ||
>> - !vtpci_with_feature(hw, VIRTIO_F_IN_ORDER) ||
>> - !vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
>> + !virtio_with_feature(hw, VIRTIO_F_IN_ORDER) ||
>> + !virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
>> rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)) {
>> PMD_DRV_LOG(INFO,
>> "disabled packed ring vectorized path for requirements
>> not met");
>> @@ -2105,7 +2105,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
>> #endif
>>
>> if (hw->use_vec_rx) {
>> - if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
>> + if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
>> PMD_DRV_LOG(INFO,
>> "disabled packed ring vectorized rx for
>> mrg_rxbuf enabled");
>> hw->use_vec_rx = 0;
>> @@ -2118,7 +2118,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
>> }
>> }
>> } else {
>> - if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
>> + if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER)) {
>> hw->use_inorder_tx = 1;
>> hw->use_inorder_rx = 1;
>> hw->use_vec_rx = 0;
>> @@ -2132,7 +2132,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
>> hw->use_vec_rx = 0;
>> }
>> #endif
>> - if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
>> + if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
>> PMD_DRV_LOG(INFO,
>> "disabled split ring vectorized rx for mrg_rxbuf
>> enabled");
>> hw->use_vec_rx = 0;
>> @@ -2350,7 +2350,7 @@ virtio_dev_link_update(struct rte_eth_dev *dev,
>> __rte_unused int wait_to_complet
>> if (!hw->started) {
>> link.link_status = ETH_LINK_DOWN;
>> link.link_speed = ETH_SPEED_NUM_NONE;
>> - } else if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
>> + } else if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
>> PMD_INIT_LOG(DEBUG, "Get link status from hw");
>> vtpci_read_dev_config(hw,
>> offsetof(struct virtio_net_config, status),
>> @@ -2381,7 +2381,7 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int
>> mask)
>>
>> if (mask & ETH_VLAN_FILTER_MASK) {
>> if ((offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
>> - !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
>> + !virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
>>
>> PMD_DRV_LOG(NOTICE,
>> "vlan filtering not available on this host");
>> diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c
>> index df69fcdd45..9c07ebad00 100644
>> --- a/drivers/net/virtio/virtio_pci.c
>> +++ b/drivers/net/virtio/virtio_pci.c
>> @@ -356,7 +356,7 @@ modern_set_features(struct virtio_hw *hw, uint64_t
>> features)
>> static int
>> modern_features_ok(struct virtio_hw *hw)
>> {
>> - if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
>> + if (!virtio_with_feature(hw, VIRTIO_F_VERSION_1)) {
>> PMD_INIT_LOG(ERR, "Version 1+ required with modern devices\n");
>> return -1;
>> }
>> @@ -479,12 +479,12 @@ modern_notify_queue(struct virtio_hw *hw, struct
>> virtqueue *vq)
>> {
>> uint32_t notify_data;
>>
>> - if (!vtpci_with_feature(hw, VIRTIO_F_NOTIFICATION_DATA)) {
>> + if (!virtio_with_feature(hw, VIRTIO_F_NOTIFICATION_DATA)) {
>> rte_write16(vq->vq_queue_index, vq->notify_addr);
>> return;
>> }
>>
>> - if (vtpci_with_feature(hw, VIRTIO_F_RING_PACKED)) {
>> + if (virtio_with_feature(hw, VIRTIO_F_RING_PACKED)) {
>> /*
>> * Bit[0:15]: vq queue index
>> * Bit[16:30]: avail index
>> @@ -548,21 +548,6 @@ vtpci_write_dev_config(struct virtio_hw *hw, size_t
>> offset,
>> VIRTIO_OPS(hw)->write_dev_cfg(hw, offset, src, length);
>> }
>>
>> -uint64_t
>> -vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
>> -{
>> - uint64_t features;
>> -
>> - /*
>> - * Limit negotiated features to what the driver, virtqueue, and
>> - * host all support.
>> - */
>> - features = host_features & hw->guest_features;
>> - VIRTIO_OPS(hw)->set_features(hw, features);
>> -
>> - return features;
>> -}
>> -
>> void
>> vtpci_reset(struct virtio_hw *hw)
>> {
>> diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
>> index 8b07c4a369..b02e5c15f5 100644
>> --- a/drivers/net/virtio/virtio_pci.h
>> +++ b/drivers/net/virtio/virtio_pci.h
>> @@ -79,83 +79,6 @@ struct virtnet_ctl;
>> */
>> #define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16))
>>
>> -/* The feature bitmap for virtio net */
>> -#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
>> -#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial
>> csum */
>> -#define VIRTIO_NET_F_MTU 3 /* Initial MTU advice. */
>> -#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
>> -#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
>> -#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
>> -#define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in.
>> */
>> -#define VIRTIO_NET_F_GUEST_UFO 10 /* Guest can handle UFO in. */
>> -#define VIRTIO_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */
>> -#define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */
>> -#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in.
>> */
>> -#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */
>> -#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers.
>> */
>> -#define VIRTIO_NET_F_STATUS 16 /* virtio_net_config.status available */
>> -#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */
>> -#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */
>> -#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */
>> -#define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */
>> -#define VIRTIO_NET_F_GUEST_ANNOUNCE 21 /* Guest can announce device on the
>> - * network */
>> -#define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow
>> - * Steering */
>> -#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
>> -
>> -/* Do we get callbacks when the ring is completely used, even if we've
>> - * suppressed them? */
>> -#define VIRTIO_F_NOTIFY_ON_EMPTY 24
>> -
>> -/* Can the device handle any descriptor layout? */
>> -#define VIRTIO_F_ANY_LAYOUT 27
>> -
>> -/* We support indirect buffer descriptors */
>> -#define VIRTIO_RING_F_INDIRECT_DESC 28
>> -
>> -#define VIRTIO_F_VERSION_1 32
>> -#define VIRTIO_F_IOMMU_PLATFORM 33
>> -#define VIRTIO_F_RING_PACKED 34
>> -
>> -/*
>> - * Some VirtIO feature bits (currently bits 28 through 31) are
>> - * reserved for the transport being used (eg. virtio_ring), the
>> - * rest are per-device feature bits.
>> - */
>> -#define VIRTIO_TRANSPORT_F_START 28
>> -#define VIRTIO_TRANSPORT_F_END 34
>> -
>> -/*
>> - * Inorder feature indicates that all buffers are used by the device
>> - * in the same order in which they have been made available.
>> - */
>> -#define VIRTIO_F_IN_ORDER 35
>> -
>> -/*
>> - * This feature indicates that memory accesses by the driver and the device
>> - * are ordered in a way described by the platform.
>> - */
>> -#define VIRTIO_F_ORDER_PLATFORM 36
>> -
>> -/*
>> - * This feature indicates that the driver passes extra data (besides
>> - * identifying the virtqueue) in its device notifications.
>> - */
>> -#define VIRTIO_F_NOTIFICATION_DATA 38
>> -
>> -/* Device set linkspeed and duplex */
>> -#define VIRTIO_NET_F_SPEED_DUPLEX 63
>> -
>> -/* The Guest publishes the used index for which it expects an interrupt
>> - * at the end of the avail ring. Host should ignore the avail->flags field.
>> */
>> -/* The Host publishes the avail index for which it expects a kick
>> - * at the end of the used ring. Guest should ignore the used->flags field. */
>> -#define VIRTIO_RING_F_EVENT_IDX 29
>> -
>> -#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
>> -#define VIRTIO_NET_S_ANNOUNCE 2 /* Announcement is needed */
>> -
>> /*
>> * Maximum number of virtqueues per device.
>> */
>> @@ -271,17 +194,6 @@ enum virtio_msix_status {
>> VIRTIO_MSIX_ENABLED = 2
>> };
>>
>> -static inline int
>> -vtpci_with_feature(struct virtio_hw *hw, uint64_t bit)
>> -{
>> - return (hw->guest_features & (1ULL << bit)) != 0;
>> -}
>> -
>> -static inline int
>> -vtpci_packed_queue(struct virtio_hw *hw)
>> -{
>> - return vtpci_with_feature(hw, VIRTIO_F_RING_PACKED);
>> -}
>>
>> /*
>> * Function declaration from virtio_pci.c
>> @@ -294,8 +206,6 @@ void vtpci_reinit_complete(struct virtio_hw *);
>> uint8_t vtpci_get_status(struct virtio_hw *);
>> void vtpci_set_status(struct virtio_hw *, uint8_t);
>>
>> -uint64_t vtpci_negotiate_features(struct virtio_hw *, uint64_t);
>> -
>> void vtpci_write_dev_config(struct virtio_hw *, size_t, const void *, int);
>>
>> void vtpci_read_dev_config(struct virtio_hw *, size_t, void *, int);
>> diff --git a/drivers/net/virtio/virtio_ring.h
>> b/drivers/net/virtio/virtio_ring.h
>> index 0f6574f684..17a56b0a73 100644
>> --- a/drivers/net/virtio/virtio_ring.h
>> +++ b/drivers/net/virtio/virtio_ring.h
>> @@ -133,7 +133,7 @@ vring_size(struct virtio_hw *hw, unsigned int num,
>> unsigned long align)
>> {
>> size_t size;
>>
>> - if (vtpci_packed_queue(hw)) {
>> + if (virtio_with_packed_queue(hw)) {
>> size = num * sizeof(struct vring_packed_desc);
>> size += sizeof(struct vring_packed_desc_event);
>> size = RTE_ALIGN_CEIL(size, align);
>> diff --git a/drivers/net/virtio/virtio_rxtx.c
>> b/drivers/net/virtio/virtio_rxtx.c
>> index 93fe856cbd..10989118b0 100644
>> --- a/drivers/net/virtio/virtio_rxtx.c
>> +++ b/drivers/net/virtio/virtio_rxtx.c
>> @@ -685,14 +685,14 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev,
>> uint16_t queue_idx)
>> struct rte_mbuf *m;
>> uint16_t desc_idx;
>> int error, nbufs, i;
>> - bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER);
>> + bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
>>
>> PMD_INIT_FUNC_TRACE();
>>
>> /* Allocate blank mbufs for the each rx descriptor */
>> nbufs = 0;
>>
>> - if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
>> + if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
>> for (desc_idx = 0; desc_idx < vq->vq_nentries;
>> desc_idx++) {
>> vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
>> @@ -710,12 +710,12 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev,
>> uint16_t queue_idx)
>> &rxvq->fake_mbuf;
>> }
>>
>> - if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
>> + if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
>> while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
>> virtio_rxq_rearm_vec(rxvq);
>> nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
>> }
>> - } else if (!vtpci_packed_queue(vq->hw) && in_order) {
>> + } else if (!virtio_with_packed_queue(vq->hw) && in_order) {
>> if ((!virtqueue_full(vq))) {
>> uint16_t free_cnt = vq->vq_free_cnt;
>> struct rte_mbuf *pkts[free_cnt];
>> @@ -741,7 +741,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev,
>> uint16_t queue_idx)
>> break;
>>
>> /* Enqueue allocated buffers */
>> - if (vtpci_packed_queue(vq->hw))
>> + if (virtio_with_packed_queue(vq->hw))
>> error = virtqueue_enqueue_recv_refill_packed(vq,
>> &m, 1);
>> else
>> @@ -754,7 +754,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev,
>> uint16_t queue_idx)
>> nbufs++;
>> }
>>
>> - if (!vtpci_packed_queue(vq->hw))
>> + if (!virtio_with_packed_queue(vq->hw))
>> vq_update_avail_idx(vq);
>> }
>>
>> @@ -829,8 +829,8 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
>>
>> PMD_INIT_FUNC_TRACE();
>>
>> - if (!vtpci_packed_queue(hw)) {
>> - if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER))
>> + if (!virtio_with_packed_queue(hw)) {
>> + if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
>> vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
>> }
>>
>> @@ -847,7 +847,7 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf
>> *m)
>> * Requeue the discarded mbuf. This should always be
>> * successful since it was just dequeued.
>> */
>> - if (vtpci_packed_queue(vq->hw))
>> + if (virtio_with_packed_queue(vq->hw))
>> error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
>> else
>> error = virtqueue_enqueue_recv_refill(vq, &m, 1);
>> @@ -1209,7 +1209,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
>> ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
>> - hdr_size);
>>
>> - if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
>> + if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
>> seg_num = header->num_buffers;
>> if (seg_num == 0)
>> seg_num = 1;
>> @@ -1735,7 +1735,7 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf
>> **tx_pkts,
>> struct virtio_hw *hw = vq->hw;
>> uint16_t hdr_size = hw->vtnet_hdr_size;
>> uint16_t nb_tx = 0;
>> - bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER);
>> + bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
>>
>> if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
>> return nb_tx;
>> @@ -1754,8 +1754,8 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf
>> **tx_pkts,
>> int can_push = 0, use_indirect = 0, slots, need;
>>
>> /* optimize ring usage */
>> - if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
>> - vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
>> + if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
>> + virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
>> rte_mbuf_refcnt_read(txm) == 1 &&
>> RTE_MBUF_DIRECT(txm) &&
>> txm->nb_segs == 1 &&
>> @@ -1763,7 +1763,7 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf
>> **tx_pkts,
>> rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
>> __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
>> can_push = 1;
>> - else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
>> + else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
>> txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
>> use_indirect = 1;
>> /* How many main ring entries are needed to this Tx?
>> @@ -1835,8 +1835,8 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf
>> **tx_pkts, uint16_t nb_pkts)
>> int can_push = 0, use_indirect = 0, slots, need;
>>
>> /* optimize ring usage */
>> - if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
>> - vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
>> + if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
>> + virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
>> rte_mbuf_refcnt_read(txm) == 1 &&
>> RTE_MBUF_DIRECT(txm) &&
>> txm->nb_segs == 1 &&
>> @@ -1844,7 +1844,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf
>> **tx_pkts, uint16_t nb_pkts)
>> rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
>> __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
>> can_push = 1;
>> - else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
>> + else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
>> txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
>> use_indirect = 1;
>>
>> @@ -1937,8 +1937,8 @@ virtio_xmit_pkts_inorder(void *tx_queue,
>> int slots;
>>
>> /* optimize ring usage */
>> - if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
>> - vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
>> + if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
>> + virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
>> rte_mbuf_refcnt_read(txm) == 1 &&
>> RTE_MBUF_DIRECT(txm) &&
>> txm->nb_segs == 1 &&
>> diff --git a/drivers/net/virtio/virtio_rxtx_packed_avx.c
>> b/drivers/net/virtio/virtio_rxtx_packed_avx.c
>> index a6a49ec439..c272766a9f 100644
>> --- a/drivers/net/virtio/virtio_rxtx_packed_avx.c
>> +++ b/drivers/net/virtio/virtio_rxtx_packed_avx.c
>> @@ -211,14 +211,14 @@ virtqueue_enqueue_single_packed_vec(struct virtnet_tx
>> *txvq,
>> int16_t need;
>>
>> /* optimize ring usage */
>> - if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
>> - vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
>> + if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
>> + virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
>> rte_mbuf_refcnt_read(txm) == 1 &&
>> RTE_MBUF_DIRECT(txm) &&
>> txm->nb_segs == 1 &&
>> rte_pktmbuf_headroom(txm) >= hdr_size)
>> can_push = 1;
>> - else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
>> + else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
>> txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
>> use_indirect = 1;
>> /* How many main ring entries are needed to this Tx?
>> diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
>> b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
>> index 79b8446f8e..eade702c5c 100644
>> --- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
>> +++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
>> @@ -16,7 +16,7 @@
>>
>> #include "vhost_kernel_tap.h"
>> #include "../virtio_logs.h"
>> -#include "../virtio_pci.h"
>> +#include "../virtio.h"
>>
>> int
>> vhost_kernel_tap_set_offload(int fd, uint64_t features)
>> diff --git a/drivers/net/virtio/virtio_user_ethdev.c
>> b/drivers/net/virtio/virtio_user_ethdev.c
>> index 14468ddf52..d05613ba3b 100644
>> --- a/drivers/net/virtio/virtio_user_ethdev.c
>> +++ b/drivers/net/virtio/virtio_user_ethdev.c
>> @@ -122,7 +122,7 @@ virtio_user_server_reconnect(struct virtio_user_dev *dev)
>> dev->features &= dev->device_features;
>>
>> /* For packed ring, resetting queues is required in reconnection. */
>> - if (vtpci_packed_queue(hw) &&
>> + if (virtio_with_packed_queue(hw) &&
>> (old_status & VIRTIO_CONFIG_STATUS_DRIVER_OK)) {
>> PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped"
>> " when packed ring reconnecting.");
>> @@ -423,7 +423,7 @@ virtio_user_setup_queue(struct virtio_hw *hw, struct
>> virtqueue *vq)
>> {
>> struct virtio_user_dev *dev = virtio_user_get_dev(hw);
>>
>> - if (vtpci_packed_queue(hw))
>> + if (virtio_with_packed_queue(hw))
>> virtio_user_setup_queue_packed(vq, dev);
>> else
>> virtio_user_setup_queue_split(vq, dev);
>> @@ -456,7 +456,7 @@ virtio_user_notify_queue(struct virtio_hw *hw, struct
>> virtqueue *vq)
>> struct virtio_user_dev *dev = virtio_user_get_dev(hw);
>>
>> if (hw->cvq && (hw->cvq->vq == vq)) {
>> - if (vtpci_packed_queue(vq->hw))
>> + if (virtio_with_packed_queue(vq->hw))
>> virtio_user_handle_cq_packed(dev, vq->vq_queue_index);
>> else
>> virtio_user_handle_cq(dev, vq->vq_queue_index);
>> diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
>> index 2702e120ee..59a2cb6599 100644
>> --- a/drivers/net/virtio/virtqueue.c
>> +++ b/drivers/net/virtio/virtqueue.c
>> @@ -32,7 +32,7 @@ virtqueue_detach_unused(struct virtqueue *vq)
>> end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1);
>>
>> for (idx = 0; idx < vq->vq_nentries; idx++) {
>> - if (hw->use_vec_rx && !vtpci_packed_queue(hw) &&
>> + if (hw->use_vec_rx && !virtio_with_packed_queue(hw) &&
>> type == VTNET_RQ) {
>> if (start <= end && idx >= start && idx < end)
>> continue;
>> @@ -137,7 +137,7 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
>> {
>> struct virtio_hw *hw = vq->hw;
>>
>> - if (vtpci_packed_queue(hw))
>> + if (virtio_with_packed_queue(hw))
>> virtqueue_rxvq_flush_packed(vq);
>> else
>> virtqueue_rxvq_flush_split(vq);
>> diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
>> index 9d2089766b..6c1df6f8e5 100644
>> --- a/drivers/net/virtio/virtqueue.h
>> +++ b/drivers/net/virtio/virtqueue.h
>> @@ -12,7 +12,7 @@
>> #include <rte_mempool.h>
>> #include <rte_net.h>
>>
>> -#include "virtio_pci.h"
>> +#include "virtio.h"
>> #include "virtio_ring.h"
>> #include "virtio_logs.h"
>> #include "virtio_rxtx.h"
>> @@ -386,7 +386,7 @@ virtqueue_disable_intr_split(struct virtqueue *vq)
>> static inline void
>> virtqueue_disable_intr(struct virtqueue *vq)
>> {
>> - if (vtpci_packed_queue(vq->hw))
>> + if (virtio_with_packed_queue(vq->hw))
>> virtqueue_disable_intr_packed(vq);
>> else
>> virtqueue_disable_intr_split(vq);
>> @@ -420,7 +420,7 @@ virtqueue_enable_intr_split(struct virtqueue *vq)
>> static inline void
>> virtqueue_enable_intr(struct virtqueue *vq)
>> {
>> - if (vtpci_packed_queue(vq->hw))
>> + if (virtio_with_packed_queue(vq->hw))
>> virtqueue_enable_intr_packed(vq);
>> else
>> virtqueue_enable_intr_split(vq);
>> @@ -573,7 +573,7 @@ virtqueue_notify(struct virtqueue *vq)
>> used_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \
>> __ATOMIC_RELAXED); \
>> nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
>> - if (vtpci_packed_queue((vq)->hw)) { \
>> + if (virtio_with_packed_queue((vq)->hw)) { \
>> PMD_INIT_LOG(DEBUG, \
>> "VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \
>> " cached_flags=0x%x; used_wrap_counter=%d", \
>> --
>> 2.29.2
>
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2018 Intel Corporation
-sources += files('virtio_ethdev.c',
+sources += files('virtio.c',
+ 'virtio_ethdev.c',
'virtio_pci_ethdev.c',
'virtio_pci.c',
'virtio_rxtx.c',
new file mode 100644
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ * Copyright(c) 2020 Red Hat, Inc.
+ */
+
+#include "virtio.h"
+
+uint64_t
+virtio_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
+{
+ uint64_t features;
+
+ /*
+ * Limit negotiated features to what the driver, virtqueue, and
+ * host all support.
+ */
+ features = host_features & hw->guest_features;
+ VIRTIO_OPS(hw)->set_features(hw, features);
+
+ return features;
+}
+
@@ -8,6 +8,86 @@
#include <rte_ether.h>
+/* The feature bitmap for virtio net */
+#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
+#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
+#define VIRTIO_NET_F_MTU 3 /* Initial MTU advice. */
+#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
+#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
+#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
+#define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in. */
+#define VIRTIO_NET_F_GUEST_UFO 10 /* Guest can handle UFO in. */
+#define VIRTIO_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */
+#define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */
+#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */
+#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */
+#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */
+#define VIRTIO_NET_F_STATUS 16 /* virtio_net_config.status available */
+#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */
+#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */
+#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */
+#define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */
+#define VIRTIO_NET_F_GUEST_ANNOUNCE 21 /* Guest can announce device on the network */
+#define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow Steering */
+#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
+
+/*
+ * Do we get callbacks when the ring is completely used,
+ * even if we've suppressed them?
+ */
+#define VIRTIO_F_NOTIFY_ON_EMPTY 24
+
+/* Can the device handle any descriptor layout? */
+#define VIRTIO_F_ANY_LAYOUT 27
+
+/* We support indirect buffer descriptors */
+#define VIRTIO_RING_F_INDIRECT_DESC 28
+
+#define VIRTIO_F_VERSION_1 32
+#define VIRTIO_F_IOMMU_PLATFORM 33
+#define VIRTIO_F_RING_PACKED 34
+
+/*
+ * Some VirtIO feature bits (currently bits 28 through 31) are
+ * reserved for the transport being used (eg. virtio_ring), the
+ * rest are per-device feature bits.
+ */
+#define VIRTIO_TRANSPORT_F_START 28
+#define VIRTIO_TRANSPORT_F_END 34
+
+/*
+ * Inorder feature indicates that all buffers are used by the device
+ * in the same order in which they have been made available.
+ */
+#define VIRTIO_F_IN_ORDER 35
+
+/*
+ * This feature indicates that memory accesses by the driver and the device
+ * are ordered in a way described by the platform.
+ */
+#define VIRTIO_F_ORDER_PLATFORM 36
+
+/*
+ * This feature indicates that the driver passes extra data (besides
+ * identifying the virtqueue) in its device notifications.
+ */
+#define VIRTIO_F_NOTIFICATION_DATA 38
+
+/* Device set linkspeed and duplex */
+#define VIRTIO_NET_F_SPEED_DUPLEX 63
+
+/*
+ * The Guest publishes the used index for which it expects an interrupt
+ * at the end of the avail ring. Host should ignore the avail->flags field
+ *
+ * The Host publishes the avail index for which it expects a kick
+ * at the end of the used ring. Guest should ignore the used->flags field.
+ */
+#define VIRTIO_RING_F_EVENT_IDX 29
+
+#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
+#define VIRTIO_NET_S_ANNOUNCE 2 /* Announcement is needed */
+
struct virtio_hw {
struct virtqueue **vqs;
uint64_t guest_features;
@@ -72,4 +152,18 @@ struct virtio_hw_internal {
extern struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
+static inline int
+virtio_with_feature(struct virtio_hw *hw, uint64_t bit)
+{
+ return (hw->guest_features & (1ULL << bit)) != 0;
+}
+
+static inline int
+virtio_with_packed_queue(struct virtio_hw *hw)
+{
+ return virtio_with_feature(hw, VIRTIO_F_RING_PACKED);
+}
+
+uint64_t virtio_negotiate_features(struct virtio_hw *hw, uint64_t host_features);
+
#endif /* _VIRTIO_H_ */
@@ -339,7 +339,7 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
sizeof(struct virtio_pmd_ctrl));
- if (vtpci_packed_queue(vq->hw))
+ if (virtio_with_packed_queue(vq->hw))
result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num);
else
result = virtio_send_command_split(cvq, ctrl, dlen, pkt_num);
@@ -383,7 +383,7 @@ virtio_get_nr_vq(struct virtio_hw *hw)
{
uint16_t nr_vq = hw->max_queue_pairs * 2;
- if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
+ if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
nr_vq += 1;
return nr_vq;
@@ -405,7 +405,7 @@ virtio_init_vring(struct virtqueue *vq)
vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
vq->vq_free_cnt = vq->vq_nentries;
memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
- if (vtpci_packed_queue(vq->hw)) {
+ if (virtio_with_packed_queue(vq->hw)) {
vring_init_packed(&vq->vq_packed.ring, ring_mem,
VIRTIO_PCI_VRING_ALIGN, size);
vring_desc_init_packed(vq, size);
@@ -453,7 +453,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
return -EINVAL;
}
- if (!vtpci_packed_queue(hw) && !rte_is_power_of_2(vq_size)) {
+ if (!virtio_with_packed_queue(hw) && !rte_is_power_of_2(vq_size)) {
PMD_INIT_LOG(ERR, "split virtqueue size is not power of 2");
return -EINVAL;
}
@@ -486,7 +486,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
vq->hw = hw;
vq->vq_queue_index = vtpci_queue_idx;
vq->vq_nentries = vq_size;
- if (vtpci_packed_queue(hw)) {
+ if (virtio_with_packed_queue(hw)) {
vq->vq_packed.used_wrap_counter = 1;
vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
vq->vq_packed.event_flags_shadow = 0;
@@ -584,7 +584,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
memset(txr, 0, vq_size * sizeof(*txr));
for (i = 0; i < vq_size; i++) {
/* first indirect descriptor is always the tx header */
- if (!vtpci_packed_queue(hw)) {
+ if (!virtio_with_packed_queue(hw)) {
struct vring_desc *start_dp = txr[i].tx_indir;
vring_desc_init_split(start_dp,
RTE_DIM(txr[i].tx_indir));
@@ -729,7 +729,7 @@ virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
int dlen[1];
int ret;
- if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
+ if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
PMD_INIT_LOG(INFO, "host does not support rx control");
return -ENOTSUP;
}
@@ -756,7 +756,7 @@ virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
int dlen[1];
int ret;
- if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
+ if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
PMD_INIT_LOG(INFO, "host does not support rx control");
return -ENOTSUP;
}
@@ -783,7 +783,7 @@ virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
int dlen[1];
int ret;
- if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
+ if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
PMD_INIT_LOG(INFO, "host does not support rx control");
return -ENOTSUP;
}
@@ -810,7 +810,7 @@ virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
int dlen[1];
int ret;
- if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
+ if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
PMD_INIT_LOG(INFO, "host does not support rx control");
return -ENOTSUP;
}
@@ -1104,7 +1104,7 @@ virtio_set_hwaddr(struct virtio_hw *hw)
static void
virtio_get_hwaddr(struct virtio_hw *hw)
{
- if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) {
+ if (virtio_with_feature(hw, VIRTIO_NET_F_MAC)) {
vtpci_read_dev_config(hw,
offsetof(struct virtio_net_config, mac),
&hw->mac_addr, RTE_ETHER_ADDR_LEN);
@@ -1122,7 +1122,7 @@ virtio_mac_table_set(struct virtio_hw *hw,
struct virtio_pmd_ctrl ctrl;
int err, len[2];
- if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+ if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
PMD_DRV_LOG(INFO, "host does not support mac table");
return -1;
}
@@ -1217,7 +1217,7 @@ virtio_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
memcpy(hw->mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
/* Use atomic update if available */
- if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+ if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
struct virtio_pmd_ctrl ctrl;
int len = RTE_ETHER_ADDR_LEN;
@@ -1228,7 +1228,7 @@ virtio_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
return virtio_send_command(hw->cvq, &ctrl, &len, 1);
}
- if (!vtpci_with_feature(hw, VIRTIO_NET_F_MAC))
+ if (!virtio_with_feature(hw, VIRTIO_NET_F_MAC))
return -ENOTSUP;
virtio_set_hwaddr(hw);
@@ -1242,7 +1242,7 @@ virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
struct virtio_pmd_ctrl ctrl;
int len;
- if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN))
+ if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN))
return -ENOTSUP;
ctrl.hdr.class = VIRTIO_NET_CTRL_VLAN;
@@ -1281,7 +1281,7 @@ virtio_intr_disable(struct rte_eth_dev *dev)
}
static int
-virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
+virtio_ethdev_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
{
uint64_t host_features;
@@ -1311,7 +1311,7 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
* guest feature bits.
*/
hw->guest_features = req_features;
- hw->guest_features = vtpci_negotiate_features(hw, host_features);
+ hw->guest_features = virtio_negotiate_features(hw, host_features);
PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
hw->guest_features);
@@ -1320,7 +1320,7 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
return -1;
}
- if (vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
+ if (virtio_with_feature(hw, VIRTIO_F_VERSION_1)) {
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
if (!(vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
@@ -1454,7 +1454,7 @@ virtio_interrupt_handler(void *param)
RTE_ETH_EVENT_INTR_LSC,
NULL);
- if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
+ if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
vtpci_read_dev_config(hw,
offsetof(struct virtio_net_config, status),
&status, sizeof(status));
@@ -1474,7 +1474,7 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
struct virtio_hw *hw = eth_dev->data->dev_private;
eth_dev->tx_pkt_prepare = virtio_xmit_pkts_prepare;
- if (vtpci_packed_queue(hw)) {
+ if (virtio_with_packed_queue(hw)) {
PMD_INIT_LOG(INFO,
"virtio: using packed ring %s Tx path on port %u",
hw->use_vec_tx ? "vectorized" : "standard",
@@ -1495,14 +1495,14 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
}
}
- if (vtpci_packed_queue(hw)) {
+ if (virtio_with_packed_queue(hw)) {
if (hw->use_vec_rx) {
PMD_INIT_LOG(INFO,
"virtio: using packed ring vectorized Rx path on port %u",
eth_dev->data->port_id);
eth_dev->rx_pkt_burst =
&virtio_recv_pkts_packed_vec;
- } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ } else if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
PMD_INIT_LOG(INFO,
"virtio: using packed ring mergeable buffer Rx path on port %u",
eth_dev->data->port_id);
@@ -1524,7 +1524,7 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
"virtio: using inorder Rx path on port %u",
eth_dev->data->port_id);
eth_dev->rx_pkt_burst = &virtio_recv_pkts_inorder;
- } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ } else if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
PMD_INIT_LOG(INFO,
"virtio: using mergeable buffer Rx path on port %u",
eth_dev->data->port_id);
@@ -1649,13 +1649,13 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
/* Tell the host we've known how to drive the device. */
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
- if (virtio_negotiate_features(hw, req_features) < 0)
+ if (virtio_ethdev_negotiate_features(hw, req_features) < 0)
return -1;
- hw->weak_barriers = !vtpci_with_feature(hw, VIRTIO_F_ORDER_PLATFORM);
+ hw->weak_barriers = !virtio_with_feature(hw, VIRTIO_F_ORDER_PLATFORM);
/* If host does not support both status and MSI-X then disable LSC */
- if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) &&
+ if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS) &&
hw->use_msix != VIRTIO_MSIX_NONE)
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
else
@@ -1664,9 +1664,9 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Setting up rx_header size for the device */
- if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
- vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
- vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))
+ if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
+ virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
+ virtio_with_feature(hw, VIRTIO_F_RING_PACKED))
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
else
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
@@ -1681,7 +1681,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
if (hw->speed == ETH_SPEED_NUM_UNKNOWN) {
- if (vtpci_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX)) {
+ if (virtio_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX)) {
config = &local_config;
vtpci_read_dev_config(hw,
offsetof(struct virtio_net_config, speed),
@@ -1697,14 +1697,14 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
hw->duplex = ETH_LINK_FULL_DUPLEX;
PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
hw->speed, hw->duplex);
- if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
+ if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
config = &local_config;
vtpci_read_dev_config(hw,
offsetof(struct virtio_net_config, mac),
&config->mac, sizeof(config->mac));
- if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
+ if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
vtpci_read_dev_config(hw,
offsetof(struct virtio_net_config, status),
&config->status, sizeof(config->status));
@@ -1714,7 +1714,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
config->status = 0;
}
- if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
+ if (virtio_with_feature(hw, VIRTIO_NET_F_MQ)) {
vtpci_read_dev_config(hw,
offsetof(struct virtio_net_config, max_virtqueue_pairs),
&config->max_virtqueue_pairs,
@@ -1727,7 +1727,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
hw->max_queue_pairs = config->max_virtqueue_pairs;
- if (vtpci_with_feature(hw, VIRTIO_NET_F_MTU)) {
+ if (virtio_with_feature(hw, VIRTIO_NET_F_MTU)) {
vtpci_read_dev_config(hw,
offsetof(struct virtio_net_config, mtu),
&config->mtu,
@@ -1838,7 +1838,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
goto err_virtio_init;
if (vectorized) {
- if (!vtpci_packed_queue(hw)) {
+ if (!virtio_with_packed_queue(hw)) {
hw->use_vec_rx = 1;
} else {
#if !defined(CC_AVX512_SUPPORT)
@@ -1965,17 +1965,17 @@ virtio_dev_devargs_parse(struct rte_devargs *devargs, uint32_t *speed, int *vect
static bool
rx_offload_enabled(struct virtio_hw *hw)
{
- return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
+ return virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
+ virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
+ virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
}
static bool
tx_offload_enabled(struct virtio_hw *hw)
{
- return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
+ return virtio_with_feature(hw, VIRTIO_NET_F_CSUM) ||
+ virtio_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
+ virtio_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
}
/*
@@ -2048,29 +2048,29 @@ virtio_dev_configure(struct rte_eth_dev *dev)
if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM)) &&
- !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
+ !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
PMD_DRV_LOG(ERR,
"rx checksum not available on this host");
return -ENOTSUP;
}
if ((rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) &&
- (!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
- !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
+ (!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
+ !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
PMD_DRV_LOG(ERR,
"Large Receive Offload not available on this host");
return -ENOTSUP;
}
/* start control queue */
- if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
+ if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
virtio_dev_cq_start(dev);
if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
hw->vlan_strip = 1;
- if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
- && !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
+ if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
+ !virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
PMD_DRV_LOG(ERR,
"vlan filtering not available on this host");
return -ENOTSUP;
@@ -2087,12 +2087,12 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return -EBUSY;
}
- if (vtpci_packed_queue(hw)) {
+ if (virtio_with_packed_queue(hw)) {
#if defined(RTE_ARCH_X86_64) && defined(CC_AVX512_SUPPORT)
if ((hw->use_vec_rx || hw->use_vec_tx) &&
(!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) ||
- !vtpci_with_feature(hw, VIRTIO_F_IN_ORDER) ||
- !vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
+ !virtio_with_feature(hw, VIRTIO_F_IN_ORDER) ||
+ !virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)) {
PMD_DRV_LOG(INFO,
"disabled packed ring vectorized path for requirements not met");
@@ -2105,7 +2105,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
#endif
if (hw->use_vec_rx) {
- if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
PMD_DRV_LOG(INFO,
"disabled packed ring vectorized rx for mrg_rxbuf enabled");
hw->use_vec_rx = 0;
@@ -2118,7 +2118,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
}
}
} else {
- if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
+ if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER)) {
hw->use_inorder_tx = 1;
hw->use_inorder_rx = 1;
hw->use_vec_rx = 0;
@@ -2132,7 +2132,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
hw->use_vec_rx = 0;
}
#endif
- if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
PMD_DRV_LOG(INFO,
"disabled split ring vectorized rx for mrg_rxbuf enabled");
hw->use_vec_rx = 0;
@@ -2350,7 +2350,7 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
if (!hw->started) {
link.link_status = ETH_LINK_DOWN;
link.link_speed = ETH_SPEED_NUM_NONE;
- } else if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
+ } else if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
PMD_INIT_LOG(DEBUG, "Get link status from hw");
vtpci_read_dev_config(hw,
offsetof(struct virtio_net_config, status),
@@ -2381,7 +2381,7 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_FILTER_MASK) {
if ((offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
- !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
+ !virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
PMD_DRV_LOG(NOTICE,
"vlan filtering not available on this host");
@@ -356,7 +356,7 @@ modern_set_features(struct virtio_hw *hw, uint64_t features)
static int
modern_features_ok(struct virtio_hw *hw)
{
- if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
+ if (!virtio_with_feature(hw, VIRTIO_F_VERSION_1)) {
PMD_INIT_LOG(ERR, "Version 1+ required with modern devices\n");
return -1;
}
@@ -479,12 +479,12 @@ modern_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
{
uint32_t notify_data;
- if (!vtpci_with_feature(hw, VIRTIO_F_NOTIFICATION_DATA)) {
+ if (!virtio_with_feature(hw, VIRTIO_F_NOTIFICATION_DATA)) {
rte_write16(vq->vq_queue_index, vq->notify_addr);
return;
}
- if (vtpci_with_feature(hw, VIRTIO_F_RING_PACKED)) {
+ if (virtio_with_feature(hw, VIRTIO_F_RING_PACKED)) {
/*
* Bit[0:15]: vq queue index
* Bit[16:30]: avail index
@@ -548,21 +548,6 @@ vtpci_write_dev_config(struct virtio_hw *hw, size_t offset,
VIRTIO_OPS(hw)->write_dev_cfg(hw, offset, src, length);
}
-uint64_t
-vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
-{
- uint64_t features;
-
- /*
- * Limit negotiated features to what the driver, virtqueue, and
- * host all support.
- */
- features = host_features & hw->guest_features;
- VIRTIO_OPS(hw)->set_features(hw, features);
-
- return features;
-}
-
void
vtpci_reset(struct virtio_hw *hw)
{
@@ -79,83 +79,6 @@ struct virtnet_ctl;
*/
#define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16))
-/* The feature bitmap for virtio net */
-#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
-#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
-#define VIRTIO_NET_F_MTU 3 /* Initial MTU advice. */
-#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
-#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
-#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
-#define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in. */
-#define VIRTIO_NET_F_GUEST_UFO 10 /* Guest can handle UFO in. */
-#define VIRTIO_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */
-#define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */
-#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */
-#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */
-#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */
-#define VIRTIO_NET_F_STATUS 16 /* virtio_net_config.status available */
-#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */
-#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */
-#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */
-#define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */
-#define VIRTIO_NET_F_GUEST_ANNOUNCE 21 /* Guest can announce device on the
- * network */
-#define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow
- * Steering */
-#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
-
-/* Do we get callbacks when the ring is completely used, even if we've
- * suppressed them? */
-#define VIRTIO_F_NOTIFY_ON_EMPTY 24
-
-/* Can the device handle any descriptor layout? */
-#define VIRTIO_F_ANY_LAYOUT 27
-
-/* We support indirect buffer descriptors */
-#define VIRTIO_RING_F_INDIRECT_DESC 28
-
-#define VIRTIO_F_VERSION_1 32
-#define VIRTIO_F_IOMMU_PLATFORM 33
-#define VIRTIO_F_RING_PACKED 34
-
-/*
- * Some VirtIO feature bits (currently bits 28 through 31) are
- * reserved for the transport being used (eg. virtio_ring), the
- * rest are per-device feature bits.
- */
-#define VIRTIO_TRANSPORT_F_START 28
-#define VIRTIO_TRANSPORT_F_END 34
-
-/*
- * Inorder feature indicates that all buffers are used by the device
- * in the same order in which they have been made available.
- */
-#define VIRTIO_F_IN_ORDER 35
-
-/*
- * This feature indicates that memory accesses by the driver and the device
- * are ordered in a way described by the platform.
- */
-#define VIRTIO_F_ORDER_PLATFORM 36
-
-/*
- * This feature indicates that the driver passes extra data (besides
- * identifying the virtqueue) in its device notifications.
- */
-#define VIRTIO_F_NOTIFICATION_DATA 38
-
-/* Device set linkspeed and duplex */
-#define VIRTIO_NET_F_SPEED_DUPLEX 63
-
-/* The Guest publishes the used index for which it expects an interrupt
- * at the end of the avail ring. Host should ignore the avail->flags field. */
-/* The Host publishes the avail index for which it expects a kick
- * at the end of the used ring. Guest should ignore the used->flags field. */
-#define VIRTIO_RING_F_EVENT_IDX 29
-
-#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
-#define VIRTIO_NET_S_ANNOUNCE 2 /* Announcement is needed */
-
/*
* Maximum number of virtqueues per device.
*/
@@ -271,17 +194,6 @@ enum virtio_msix_status {
VIRTIO_MSIX_ENABLED = 2
};
-static inline int
-vtpci_with_feature(struct virtio_hw *hw, uint64_t bit)
-{
- return (hw->guest_features & (1ULL << bit)) != 0;
-}
-
-static inline int
-vtpci_packed_queue(struct virtio_hw *hw)
-{
- return vtpci_with_feature(hw, VIRTIO_F_RING_PACKED);
-}
/*
* Function declaration from virtio_pci.c
@@ -294,8 +206,6 @@ void vtpci_reinit_complete(struct virtio_hw *);
uint8_t vtpci_get_status(struct virtio_hw *);
void vtpci_set_status(struct virtio_hw *, uint8_t);
-uint64_t vtpci_negotiate_features(struct virtio_hw *, uint64_t);
-
void vtpci_write_dev_config(struct virtio_hw *, size_t, const void *, int);
void vtpci_read_dev_config(struct virtio_hw *, size_t, void *, int);
@@ -133,7 +133,7 @@ vring_size(struct virtio_hw *hw, unsigned int num, unsigned long align)
{
size_t size;
- if (vtpci_packed_queue(hw)) {
+ if (virtio_with_packed_queue(hw)) {
size = num * sizeof(struct vring_packed_desc);
size += sizeof(struct vring_packed_desc_event);
size = RTE_ALIGN_CEIL(size, align);
@@ -685,14 +685,14 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
struct rte_mbuf *m;
uint16_t desc_idx;
int error, nbufs, i;
- bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER);
+ bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
PMD_INIT_FUNC_TRACE();
/* Allocate blank mbufs for the each rx descriptor */
nbufs = 0;
- if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
+ if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
for (desc_idx = 0; desc_idx < vq->vq_nentries;
desc_idx++) {
vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
@@ -710,12 +710,12 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
&rxvq->fake_mbuf;
}
- if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
+ if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
virtio_rxq_rearm_vec(rxvq);
nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
}
- } else if (!vtpci_packed_queue(vq->hw) && in_order) {
+ } else if (!virtio_with_packed_queue(vq->hw) && in_order) {
if ((!virtqueue_full(vq))) {
uint16_t free_cnt = vq->vq_free_cnt;
struct rte_mbuf *pkts[free_cnt];
@@ -741,7 +741,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
break;
/* Enqueue allocated buffers */
- if (vtpci_packed_queue(vq->hw))
+ if (virtio_with_packed_queue(vq->hw))
error = virtqueue_enqueue_recv_refill_packed(vq,
&m, 1);
else
@@ -754,7 +754,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
nbufs++;
}
- if (!vtpci_packed_queue(vq->hw))
+ if (!virtio_with_packed_queue(vq->hw))
vq_update_avail_idx(vq);
}
@@ -829,8 +829,8 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
- if (!vtpci_packed_queue(hw)) {
- if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER))
+ if (!virtio_with_packed_queue(hw)) {
+ if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
}
@@ -847,7 +847,7 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
* Requeue the discarded mbuf. This should always be
* successful since it was just dequeued.
*/
- if (vtpci_packed_queue(vq->hw))
+ if (virtio_with_packed_queue(vq->hw))
error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
else
error = virtqueue_enqueue_recv_refill(vq, &m, 1);
@@ -1209,7 +1209,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
- hdr_size);
- if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
seg_num = header->num_buffers;
if (seg_num == 0)
seg_num = 1;
@@ -1735,7 +1735,7 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
struct virtio_hw *hw = vq->hw;
uint16_t hdr_size = hw->vtnet_hdr_size;
uint16_t nb_tx = 0;
- bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER);
+ bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
return nb_tx;
@@ -1754,8 +1754,8 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
int can_push = 0, use_indirect = 0, slots, need;
/* optimize ring usage */
- if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
- vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
+ if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
+ virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
rte_mbuf_refcnt_read(txm) == 1 &&
RTE_MBUF_DIRECT(txm) &&
txm->nb_segs == 1 &&
@@ -1763,7 +1763,7 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
__alignof__(struct virtio_net_hdr_mrg_rxbuf)))
can_push = 1;
- else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
+ else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
use_indirect = 1;
/* How many main ring entries are needed to this Tx?
@@ -1835,8 +1835,8 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
int can_push = 0, use_indirect = 0, slots, need;
/* optimize ring usage */
- if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
- vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
+ if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
+ virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
rte_mbuf_refcnt_read(txm) == 1 &&
RTE_MBUF_DIRECT(txm) &&
txm->nb_segs == 1 &&
@@ -1844,7 +1844,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
__alignof__(struct virtio_net_hdr_mrg_rxbuf)))
can_push = 1;
- else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
+ else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
use_indirect = 1;
@@ -1937,8 +1937,8 @@ virtio_xmit_pkts_inorder(void *tx_queue,
int slots;
/* optimize ring usage */
- if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
- vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
+ if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
+ virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
rte_mbuf_refcnt_read(txm) == 1 &&
RTE_MBUF_DIRECT(txm) &&
txm->nb_segs == 1 &&
@@ -211,14 +211,14 @@ virtqueue_enqueue_single_packed_vec(struct virtnet_tx *txvq,
int16_t need;
/* optimize ring usage */
- if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
- vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
+ if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
+ virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
rte_mbuf_refcnt_read(txm) == 1 &&
RTE_MBUF_DIRECT(txm) &&
txm->nb_segs == 1 &&
rte_pktmbuf_headroom(txm) >= hdr_size)
can_push = 1;
- else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
+ else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
use_indirect = 1;
/* How many main ring entries are needed to this Tx?
@@ -16,7 +16,7 @@
#include "vhost_kernel_tap.h"
#include "../virtio_logs.h"
-#include "../virtio_pci.h"
+#include "../virtio.h"
int
vhost_kernel_tap_set_offload(int fd, uint64_t features)
@@ -122,7 +122,7 @@ virtio_user_server_reconnect(struct virtio_user_dev *dev)
dev->features &= dev->device_features;
/* For packed ring, resetting queues is required in reconnection. */
- if (vtpci_packed_queue(hw) &&
+ if (virtio_with_packed_queue(hw) &&
(old_status & VIRTIO_CONFIG_STATUS_DRIVER_OK)) {
PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped"
" when packed ring reconnecting.");
@@ -423,7 +423,7 @@ virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
{
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
- if (vtpci_packed_queue(hw))
+ if (virtio_with_packed_queue(hw))
virtio_user_setup_queue_packed(vq, dev);
else
virtio_user_setup_queue_split(vq, dev);
@@ -456,7 +456,7 @@ virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
if (hw->cvq && (hw->cvq->vq == vq)) {
- if (vtpci_packed_queue(vq->hw))
+ if (virtio_with_packed_queue(vq->hw))
virtio_user_handle_cq_packed(dev, vq->vq_queue_index);
else
virtio_user_handle_cq(dev, vq->vq_queue_index);
@@ -32,7 +32,7 @@ virtqueue_detach_unused(struct virtqueue *vq)
end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1);
for (idx = 0; idx < vq->vq_nentries; idx++) {
- if (hw->use_vec_rx && !vtpci_packed_queue(hw) &&
+ if (hw->use_vec_rx && !virtio_with_packed_queue(hw) &&
type == VTNET_RQ) {
if (start <= end && idx >= start && idx < end)
continue;
@@ -137,7 +137,7 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
{
struct virtio_hw *hw = vq->hw;
- if (vtpci_packed_queue(hw))
+ if (virtio_with_packed_queue(hw))
virtqueue_rxvq_flush_packed(vq);
else
virtqueue_rxvq_flush_split(vq);
@@ -12,7 +12,7 @@
#include <rte_mempool.h>
#include <rte_net.h>
-#include "virtio_pci.h"
+#include "virtio.h"
#include "virtio_ring.h"
#include "virtio_logs.h"
#include "virtio_rxtx.h"
@@ -386,7 +386,7 @@ virtqueue_disable_intr_split(struct virtqueue *vq)
static inline void
virtqueue_disable_intr(struct virtqueue *vq)
{
- if (vtpci_packed_queue(vq->hw))
+ if (virtio_with_packed_queue(vq->hw))
virtqueue_disable_intr_packed(vq);
else
virtqueue_disable_intr_split(vq);
@@ -420,7 +420,7 @@ virtqueue_enable_intr_split(struct virtqueue *vq)
static inline void
virtqueue_enable_intr(struct virtqueue *vq)
{
- if (vtpci_packed_queue(vq->hw))
+ if (virtio_with_packed_queue(vq->hw))
virtqueue_enable_intr_packed(vq);
else
virtqueue_enable_intr_split(vq);
@@ -573,7 +573,7 @@ virtqueue_notify(struct virtqueue *vq)
used_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \
__ATOMIC_RELAXED); \
nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
- if (vtpci_packed_queue((vq)->hw)) { \
+ if (virtio_with_packed_queue((vq)->hw)) { \
PMD_INIT_LOG(DEBUG, \
"VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \
" cached_flags=0x%x; used_wrap_counter=%d", \