From patchwork Fri Feb 11 17:14:41 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ferruh Yigit X-Patchwork-Id: 107380 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id CBC17A0032; Fri, 11 Feb 2022 18:15:01 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9143340140; Fri, 11 Feb 2022 18:15:01 +0100 (CET) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id 98BAB40042 for ; Fri, 11 Feb 2022 18:14:59 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1644599699; x=1676135699; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=kKMaEeJF4lIT5Gy3aV9FszzI7zQRVdL90lvs1YP5dxk=; b=oFdpMN/IoCt3URtM6oM1Kjt8agVmjfuzsxQYpxPnmJKkmNUxrEqbaFqT 9wr1P582cBxGhASUQrTKycritIUAkaf5dZXhoarsgkJioAcf83d6/tGn6 aWBKILx2mKAUfUOWgmoPjlYJkWH3oOrnqTxwN/iDXw6pAdyLf9UbgWdOn zg3zmw1+J48HfSqVVls0eyWcrkV84+OxseFMjsiDBjviph1dnAojj3Zpw FtAjV6plQDJ3TGWB71mfX3LVtMef6YTaXnBKlzmXz7mLeoc0fqZpGND2r 7dbYdZA73ANK86EC2URttdWdsVGrsu5dstEclfO6cEnl+Hm4GkZgjzMrE Q==; X-IronPort-AV: E=McAfee;i="6200,9189,10255"; a="247362410" X-IronPort-AV: E=Sophos;i="5.88,361,1635231600"; d="scan'208";a="247362410" Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 11 Feb 2022 09:14:58 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.88,361,1635231600"; d="scan'208";a="679563782" Received: from silpixa00399752.ir.intel.com (HELO silpixa00399752.ger.corp.intel.com) ([10.237.222.27]) by fmsmga001.fm.intel.com with ESMTP; 11 Feb 2022 09:14:56 -0800 From: Ferruh Yigit To: Thomas Monjalon , Andrew Rybchenko , Anatoly Burakov Cc: dev@dpdk.org, Ferruh Yigit Subject: [PATCH v3 2/2] ethdev: move driver interface functions to its own file Date: Fri, 11 Feb 2022 17:14:41 +0000 Message-Id: <20220211171441.2717010-2-ferruh.yigit@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20220211171441.2717010-1-ferruh.yigit@intel.com> References: <20220208194437.426143-1-ferruh.yigit@intel.com> <20220211171441.2717010-1-ferruh.yigit@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Relevant functions moved to ethdev_driver.c. No functional change. Signed-off-by: Ferruh Yigit Acked-by: Thomas Monjalon --- lib/ethdev/ethdev_driver.c | 758 ++++++++++++++++++++++++++++++ lib/ethdev/ethdev_private.c | 131 ++++++ lib/ethdev/ethdev_private.h | 36 ++ lib/ethdev/rte_ethdev.c | 901 ------------------------------------ 4 files changed, 925 insertions(+), 901 deletions(-) diff --git a/lib/ethdev/ethdev_driver.c b/lib/ethdev/ethdev_driver.c index fb7323f4d327..e0ea30be5fe9 100644 --- a/lib/ethdev/ethdev_driver.c +++ b/lib/ethdev/ethdev_driver.c @@ -2,7 +2,633 @@ * Copyright(c) 2022 Intel Corporation */ +#include +#include + #include "ethdev_driver.h" +#include "ethdev_private.h" + +/** + * A set of values to describe the possible states of a switch domain. + */ +enum rte_eth_switch_domain_state { + RTE_ETH_SWITCH_DOMAIN_UNUSED = 0, + RTE_ETH_SWITCH_DOMAIN_ALLOCATED +}; + +/** + * Array of switch domains available for allocation. Array is sized to + * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than + * ethdev ports in a single process. + */ +static struct rte_eth_dev_switch { + enum rte_eth_switch_domain_state state; +} eth_dev_switch_domains[RTE_MAX_ETHPORTS]; + +static struct rte_eth_dev * +eth_dev_allocated(const char *name) +{ + uint16_t i; + + RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX); + + for (i = 0; i < RTE_MAX_ETHPORTS; i++) { + if (rte_eth_devices[i].data != NULL && + strcmp(rte_eth_devices[i].data->name, name) == 0) + return &rte_eth_devices[i]; + } + return NULL; +} + +static uint16_t +eth_dev_find_free_port(void) +{ + uint16_t i; + + for (i = 0; i < RTE_MAX_ETHPORTS; i++) { + /* Using shared name field to find a free port. */ + if (eth_dev_shared_data->data[i].name[0] == '\0') { + RTE_ASSERT(rte_eth_devices[i].state == + RTE_ETH_DEV_UNUSED); + return i; + } + } + return RTE_MAX_ETHPORTS; +} + +static struct rte_eth_dev * +eth_dev_get(uint16_t port_id) +{ + struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; + + eth_dev->data = ð_dev_shared_data->data[port_id]; + + return eth_dev; +} + +struct rte_eth_dev * +rte_eth_dev_allocate(const char *name) +{ + uint16_t port_id; + struct rte_eth_dev *eth_dev = NULL; + size_t name_len; + + name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN); + if (name_len == 0) { + RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n"); + return NULL; + } + + if (name_len >= RTE_ETH_NAME_MAX_LEN) { + RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n"); + return NULL; + } + + eth_dev_shared_data_prepare(); + + /* Synchronize port creation between primary and secondary threads. */ + rte_spinlock_lock(ð_dev_shared_data->ownership_lock); + + if (eth_dev_allocated(name) != NULL) { + RTE_ETHDEV_LOG(ERR, + "Ethernet device with name %s already allocated\n", + name); + goto unlock; + } + + port_id = eth_dev_find_free_port(); + if (port_id == RTE_MAX_ETHPORTS) { + RTE_ETHDEV_LOG(ERR, + "Reached maximum number of Ethernet ports\n"); + goto unlock; + } + + eth_dev = eth_dev_get(port_id); + strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name)); + eth_dev->data->port_id = port_id; + eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS; + eth_dev->data->mtu = RTE_ETHER_MTU; + pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL); + +unlock: + rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); + + return eth_dev; +} + +struct rte_eth_dev * +rte_eth_dev_allocated(const char *name) +{ + struct rte_eth_dev *ethdev; + + eth_dev_shared_data_prepare(); + + rte_spinlock_lock(ð_dev_shared_data->ownership_lock); + + ethdev = eth_dev_allocated(name); + + rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); + + return ethdev; +} + +/* + * Attach to a port already registered by the primary process, which + * makes sure that the same device would have the same port ID both + * in the primary and secondary process. + */ +struct rte_eth_dev * +rte_eth_dev_attach_secondary(const char *name) +{ + uint16_t i; + struct rte_eth_dev *eth_dev = NULL; + + eth_dev_shared_data_prepare(); + + /* Synchronize port attachment to primary port creation and release. */ + rte_spinlock_lock(ð_dev_shared_data->ownership_lock); + + for (i = 0; i < RTE_MAX_ETHPORTS; i++) { + if (strcmp(eth_dev_shared_data->data[i].name, name) == 0) + break; + } + if (i == RTE_MAX_ETHPORTS) { + RTE_ETHDEV_LOG(ERR, + "Device %s is not driven by the primary process\n", + name); + } else { + eth_dev = eth_dev_get(i); + RTE_ASSERT(eth_dev->data->port_id == i); + } + + rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); + return eth_dev; +} + +int +rte_eth_dev_callback_process(struct rte_eth_dev *dev, + enum rte_eth_event_type event, void *ret_param) +{ + struct rte_eth_dev_callback *cb_lst; + struct rte_eth_dev_callback dev_cb; + int rc = 0; + + rte_spinlock_lock(ð_dev_cb_lock); + TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { + if (cb_lst->cb_fn == NULL || cb_lst->event != event) + continue; + dev_cb = *cb_lst; + cb_lst->active = 1; + if (ret_param != NULL) + dev_cb.ret_param = ret_param; + + rte_spinlock_unlock(ð_dev_cb_lock); + rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event, + dev_cb.cb_arg, dev_cb.ret_param); + rte_spinlock_lock(ð_dev_cb_lock); + cb_lst->active = 0; + } + rte_spinlock_unlock(ð_dev_cb_lock); + return rc; +} + +void +rte_eth_dev_probing_finish(struct rte_eth_dev *dev) +{ + if (dev == NULL) + return; + + /* + * for secondary process, at that point we expect device + * to be already 'usable', so shared data and all function pointers + * for fast-path devops have to be setup properly inside rte_eth_dev. + */ + if (rte_eal_process_type() == RTE_PROC_SECONDARY) + eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev); + + rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL); + + dev->state = RTE_ETH_DEV_ATTACHED; +} + +int +rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) +{ + if (eth_dev == NULL) + return -EINVAL; + + eth_dev_shared_data_prepare(); + + if (eth_dev->state != RTE_ETH_DEV_UNUSED) + rte_eth_dev_callback_process(eth_dev, + RTE_ETH_EVENT_DESTROY, NULL); + + eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id); + + rte_spinlock_lock(ð_dev_shared_data->ownership_lock); + + eth_dev->state = RTE_ETH_DEV_UNUSED; + eth_dev->device = NULL; + eth_dev->process_private = NULL; + eth_dev->intr_handle = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + eth_dev->tx_pkt_prepare = NULL; + eth_dev->rx_queue_count = NULL; + eth_dev->rx_descriptor_status = NULL; + eth_dev->tx_descriptor_status = NULL; + eth_dev->dev_ops = NULL; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + rte_free(eth_dev->data->rx_queues); + rte_free(eth_dev->data->tx_queues); + rte_free(eth_dev->data->mac_addrs); + rte_free(eth_dev->data->hash_mac_addrs); + rte_free(eth_dev->data->dev_private); + pthread_mutex_destroy(ð_dev->data->flow_ops_mutex); + memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data)); + } + + rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); + + return 0; +} + +int +rte_eth_dev_create(struct rte_device *device, const char *name, + size_t priv_data_size, + ethdev_bus_specific_init ethdev_bus_specific_init, + void *bus_init_params, + ethdev_init_t ethdev_init, void *init_params) +{ + struct rte_eth_dev *ethdev; + int retval; + + RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL); + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + ethdev = rte_eth_dev_allocate(name); + if (!ethdev) + return -ENODEV; + + if (priv_data_size) { + ethdev->data->dev_private = rte_zmalloc_socket( + name, priv_data_size, RTE_CACHE_LINE_SIZE, + device->numa_node); + + if (!ethdev->data->dev_private) { + RTE_ETHDEV_LOG(ERR, + "failed to allocate private data\n"); + retval = -ENOMEM; + goto probe_failed; + } + } + } else { + ethdev = rte_eth_dev_attach_secondary(name); + if (!ethdev) { + RTE_ETHDEV_LOG(ERR, + "secondary process attach failed, ethdev doesn't exist\n"); + return -ENODEV; + } + } + + ethdev->device = device; + + if (ethdev_bus_specific_init) { + retval = ethdev_bus_specific_init(ethdev, bus_init_params); + if (retval) { + RTE_ETHDEV_LOG(ERR, + "ethdev bus specific initialisation failed\n"); + goto probe_failed; + } + } + + retval = ethdev_init(ethdev, init_params); + if (retval) { + RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n"); + goto probe_failed; + } + + rte_eth_dev_probing_finish(ethdev); + + return retval; + +probe_failed: + rte_eth_dev_release_port(ethdev); + return retval; +} + +int +rte_eth_dev_destroy(struct rte_eth_dev *ethdev, + ethdev_uninit_t ethdev_uninit) +{ + int ret; + + ethdev = rte_eth_dev_allocated(ethdev->data->name); + if (!ethdev) + return -ENODEV; + + RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL); + + ret = ethdev_uninit(ethdev); + if (ret) + return ret; + + return rte_eth_dev_release_port(ethdev); +} + +struct rte_eth_dev * +rte_eth_dev_get_by_name(const char *name) +{ + uint16_t pid; + + if (rte_eth_dev_get_port_by_name(name, &pid)) + return NULL; + + return &rte_eth_devices[pid]; +} + +int +rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) +{ + if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) + return 1; + return 0; +} + +int +rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) +{ + if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) + return 1; + return 0; +} + +void +rte_eth_dev_internal_reset(struct rte_eth_dev *dev) +{ + if (dev->data->dev_started) { + RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n", + dev->data->port_id); + return; + } + + eth_dev_rx_queue_config(dev, 0); + eth_dev_tx_queue_config(dev, 0); + + memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf)); +} + +static int +eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in) +{ + int state; + struct rte_kvargs_pair *pair; + char *letter; + + arglist->str = strdup(str_in); + if (arglist->str == NULL) + return -ENOMEM; + + letter = arglist->str; + state = 0; + arglist->count = 0; + pair = &arglist->pairs[0]; + while (1) { + switch (state) { + case 0: /* Initial */ + if (*letter == '=') + return -EINVAL; + else if (*letter == '\0') + return 0; + + state = 1; + pair->key = letter; + /* fall-thru */ + + case 1: /* Parsing key */ + if (*letter == '=') { + *letter = '\0'; + pair->value = letter + 1; + state = 2; + } else if (*letter == ',' || *letter == '\0') + return -EINVAL; + break; + + + case 2: /* Parsing value */ + if (*letter == '[') + state = 3; + else if (*letter == ',') { + *letter = '\0'; + arglist->count++; + pair = &arglist->pairs[arglist->count]; + state = 0; + } else if (*letter == '\0') { + letter--; + arglist->count++; + pair = &arglist->pairs[arglist->count]; + state = 0; + } + break; + + case 3: /* Parsing list */ + if (*letter == ']') + state = 2; + else if (*letter == '\0') + return -EINVAL; + break; + } + letter++; + } +} + +int +rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da) +{ + struct rte_kvargs args; + struct rte_kvargs_pair *pair; + unsigned int i; + int result = 0; + + memset(eth_da, 0, sizeof(*eth_da)); + + result = eth_dev_devargs_tokenise(&args, dargs); + if (result < 0) + goto parse_cleanup; + + for (i = 0; i < args.count; i++) { + pair = &args.pairs[i]; + if (strcmp("representor", pair->key) == 0) { + if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) { + RTE_LOG(ERR, EAL, "duplicated representor key: %s\n", + dargs); + result = -1; + goto parse_cleanup; + } + result = rte_eth_devargs_parse_representor_ports( + pair->value, eth_da); + if (result < 0) + goto parse_cleanup; + } + } + +parse_cleanup: + if (args.str) + free(args.str); + + return result; +} + +static inline int +eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id, + const char *ring_name) +{ + return snprintf(name, len, "eth_p%d_q%d_%s", + port_id, queue_id, ring_name); +} + +int +rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name, + uint16_t queue_id) +{ + char z_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz; + int rc = 0; + + rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, + queue_id, ring_name); + if (rc >= RTE_MEMZONE_NAMESIZE) { + RTE_ETHDEV_LOG(ERR, "ring name too long\n"); + return -ENAMETOOLONG; + } + + mz = rte_memzone_lookup(z_name); + if (mz) + rc = rte_memzone_free(mz); + else + rc = -ENOENT; + + return rc; +} + +const struct rte_memzone * +rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, + uint16_t queue_id, size_t size, unsigned int align, + int socket_id) +{ + char z_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz; + int rc; + + rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, + queue_id, ring_name); + if (rc >= RTE_MEMZONE_NAMESIZE) { + RTE_ETHDEV_LOG(ERR, "ring name too long\n"); + rte_errno = ENAMETOOLONG; + return NULL; + } + + mz = rte_memzone_lookup(z_name); + if (mz) { + if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) || + size > mz->len || + ((uintptr_t)mz->addr & (align - 1)) != 0) { + RTE_ETHDEV_LOG(ERR, + "memzone %s does not justify the requested attributes\n", + mz->name); + return NULL; + } + + return mz; + } + + return rte_memzone_reserve_aligned(z_name, size, socket_id, + RTE_MEMZONE_IOVA_CONTIG, align); +} + +int +rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue, + struct rte_hairpin_peer_info *peer_info, + uint32_t direction) +{ + struct rte_eth_dev *dev; + + if (peer_info == NULL) + return -EINVAL; + + /* No need to check the validity again. */ + dev = &rte_eth_devices[cur_port]; + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind, + -ENOTSUP); + + return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue, + peer_info, direction); +} + +int +rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue, + uint32_t direction) +{ + struct rte_eth_dev *dev; + + /* No need to check the validity again. */ + dev = &rte_eth_devices[cur_port]; + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind, + -ENOTSUP); + + return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue, + direction); +} + +int +rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue, + struct rte_hairpin_peer_info *cur_info, + struct rte_hairpin_peer_info *peer_info, + uint32_t direction) +{ + struct rte_eth_dev *dev; + + /* Current queue information is not mandatory. */ + if (peer_info == NULL) + return -EINVAL; + + /* No need to check the validity again. */ + dev = &rte_eth_devices[peer_port]; + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update, + -ENOTSUP); + + return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue, + cur_info, peer_info, direction); +} + +int +rte_eth_ip_reassembly_dynfield_register(int *field_offset, int *flag_offset) +{ + static const struct rte_mbuf_dynfield field_desc = { + .name = RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME, + .size = sizeof(rte_eth_ip_reassembly_dynfield_t), + .align = __alignof__(rte_eth_ip_reassembly_dynfield_t), + }; + static const struct rte_mbuf_dynflag ip_reassembly_dynflag = { + .name = RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME, + }; + int offset; + + offset = rte_mbuf_dynfield_register(&field_desc); + if (offset < 0) + return -1; + if (field_offset != NULL) + *field_offset = offset; + + offset = rte_mbuf_dynflag_register(&ip_reassembly_dynflag); + if (offset < 0) + return -1; + if (flag_offset != NULL) + *flag_offset = offset; + + return 0; +} uint16_t rte_eth_pkt_burst_dummy(void *queue __rte_unused, @@ -11,3 +637,135 @@ rte_eth_pkt_burst_dummy(void *queue __rte_unused, { return 0; } + +int +rte_eth_representor_id_get(uint16_t port_id, + enum rte_eth_representor_type type, + int controller, int pf, int representor_port, + uint16_t *repr_id) +{ + int ret, n, count; + uint32_t i; + struct rte_eth_representor_info *info = NULL; + size_t size; + + if (type == RTE_ETH_REPRESENTOR_NONE) + return 0; + if (repr_id == NULL) + return -EINVAL; + + /* Get PMD representor range info. */ + ret = rte_eth_representor_info_get(port_id, NULL); + if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF && + controller == -1 && pf == -1) { + /* Direct mapping for legacy VF representor. */ + *repr_id = representor_port; + return 0; + } else if (ret < 0) { + return ret; + } + n = ret; + size = sizeof(*info) + n * sizeof(info->ranges[0]); + info = calloc(1, size); + if (info == NULL) + return -ENOMEM; + info->nb_ranges_alloc = n; + ret = rte_eth_representor_info_get(port_id, info); + if (ret < 0) + goto out; + + /* Default controller and pf to caller. */ + if (controller == -1) + controller = info->controller; + if (pf == -1) + pf = info->pf; + + /* Locate representor ID. */ + ret = -ENOENT; + for (i = 0; i < info->nb_ranges; ++i) { + if (info->ranges[i].type != type) + continue; + if (info->ranges[i].controller != controller) + continue; + if (info->ranges[i].id_end < info->ranges[i].id_base) { + RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n", + port_id, info->ranges[i].id_base, + info->ranges[i].id_end, i); + continue; + + } + count = info->ranges[i].id_end - info->ranges[i].id_base + 1; + switch (info->ranges[i].type) { + case RTE_ETH_REPRESENTOR_PF: + if (pf < info->ranges[i].pf || + pf >= info->ranges[i].pf + count) + continue; + *repr_id = info->ranges[i].id_base + + (pf - info->ranges[i].pf); + ret = 0; + goto out; + case RTE_ETH_REPRESENTOR_VF: + if (info->ranges[i].pf != pf) + continue; + if (representor_port < info->ranges[i].vf || + representor_port >= info->ranges[i].vf + count) + continue; + *repr_id = info->ranges[i].id_base + + (representor_port - info->ranges[i].vf); + ret = 0; + goto out; + case RTE_ETH_REPRESENTOR_SF: + if (info->ranges[i].pf != pf) + continue; + if (representor_port < info->ranges[i].sf || + representor_port >= info->ranges[i].sf + count) + continue; + *repr_id = info->ranges[i].id_base + + (representor_port - info->ranges[i].sf); + ret = 0; + goto out; + default: + break; + } + } +out: + free(info); + return ret; +} + +int +rte_eth_switch_domain_alloc(uint16_t *domain_id) +{ + uint16_t i; + + *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; + + for (i = 0; i < RTE_MAX_ETHPORTS; i++) { + if (eth_dev_switch_domains[i].state == + RTE_ETH_SWITCH_DOMAIN_UNUSED) { + eth_dev_switch_domains[i].state = + RTE_ETH_SWITCH_DOMAIN_ALLOCATED; + *domain_id = i; + return 0; + } + } + + return -ENOSPC; +} + +int +rte_eth_switch_domain_free(uint16_t domain_id) +{ + if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID || + domain_id >= RTE_MAX_ETHPORTS) + return -EINVAL; + + if (eth_dev_switch_domains[domain_id].state != + RTE_ETH_SWITCH_DOMAIN_ALLOCATED) + return -EINVAL; + + eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED; + + return 0; +} + diff --git a/lib/ethdev/ethdev_private.c b/lib/ethdev/ethdev_private.c index 8fca20c7d45b..84dc0b320ed0 100644 --- a/lib/ethdev/ethdev_private.c +++ b/lib/ethdev/ethdev_private.c @@ -3,10 +3,22 @@ */ #include + #include "rte_ethdev.h" #include "ethdev_driver.h" #include "ethdev_private.h" +static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; + +/* Shared memory between primary and secondary processes. */ +struct eth_dev_shared *eth_dev_shared_data; + +/* spinlock for shared data allocation */ +static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER; + +/* spinlock for eth device callbacks */ +rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; + uint16_t eth_dev_to_id(const struct rte_eth_dev *dev) { @@ -302,3 +314,122 @@ rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id, return nb_pkts; } + +void +eth_dev_shared_data_prepare(void) +{ + const unsigned int flags = 0; + const struct rte_memzone *mz; + + rte_spinlock_lock(ð_dev_shared_data_lock); + + if (eth_dev_shared_data == NULL) { + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + /* Allocate port data and ownership shared memory. */ + mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA, + sizeof(*eth_dev_shared_data), + rte_socket_id(), flags); + } else + mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA); + if (mz == NULL) + rte_panic("Cannot allocate ethdev shared data\n"); + + eth_dev_shared_data = mz->addr; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + eth_dev_shared_data->next_owner_id = + RTE_ETH_DEV_NO_OWNER + 1; + rte_spinlock_init(ð_dev_shared_data->ownership_lock); + memset(eth_dev_shared_data->data, 0, + sizeof(eth_dev_shared_data->data)); + } + } + + rte_spinlock_unlock(ð_dev_shared_data_lock); +} + +void +eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid) +{ + void **rxq = dev->data->rx_queues; + + if (rxq[qid] == NULL) + return; + + if (dev->dev_ops->rx_queue_release != NULL) + (*dev->dev_ops->rx_queue_release)(dev, qid); + rxq[qid] = NULL; +} + +void +eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid) +{ + void **txq = dev->data->tx_queues; + + if (txq[qid] == NULL) + return; + + if (dev->dev_ops->tx_queue_release != NULL) + (*dev->dev_ops->tx_queue_release)(dev, qid); + txq[qid] = NULL; +} + +int +eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) +{ + uint16_t old_nb_queues = dev->data->nb_rx_queues; + unsigned int i; + + if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */ + dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", + sizeof(dev->data->rx_queues[0]) * + RTE_MAX_QUEUES_PER_PORT, + RTE_CACHE_LINE_SIZE); + if (dev->data->rx_queues == NULL) { + dev->data->nb_rx_queues = 0; + return -(ENOMEM); + } + } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */ + for (i = nb_queues; i < old_nb_queues; i++) + eth_dev_rxq_release(dev, i); + + } else if (dev->data->rx_queues != NULL && nb_queues == 0) { + for (i = nb_queues; i < old_nb_queues; i++) + eth_dev_rxq_release(dev, i); + + rte_free(dev->data->rx_queues); + dev->data->rx_queues = NULL; + } + dev->data->nb_rx_queues = nb_queues; + return 0; +} + +int +eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) +{ + uint16_t old_nb_queues = dev->data->nb_tx_queues; + unsigned int i; + + if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */ + dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", + sizeof(dev->data->tx_queues[0]) * + RTE_MAX_QUEUES_PER_PORT, + RTE_CACHE_LINE_SIZE); + if (dev->data->tx_queues == NULL) { + dev->data->nb_tx_queues = 0; + return -(ENOMEM); + } + } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */ + for (i = nb_queues; i < old_nb_queues; i++) + eth_dev_txq_release(dev, i); + + } else if (dev->data->tx_queues != NULL && nb_queues == 0) { + for (i = nb_queues; i < old_nb_queues; i++) + eth_dev_txq_release(dev, i); + + rte_free(dev->data->tx_queues); + dev->data->tx_queues = NULL; + } + dev->data->nb_tx_queues = nb_queues; + return 0; +} + diff --git a/lib/ethdev/ethdev_private.h b/lib/ethdev/ethdev_private.h index cc91025e8d9b..cc9879907ce5 100644 --- a/lib/ethdev/ethdev_private.h +++ b/lib/ethdev/ethdev_private.h @@ -5,10 +5,38 @@ #ifndef _ETH_PRIVATE_H_ #define _ETH_PRIVATE_H_ +#include + +#include #include #include "rte_ethdev.h" +struct eth_dev_shared { + uint64_t next_owner_id; + rte_spinlock_t ownership_lock; + struct rte_eth_dev_data data[RTE_MAX_ETHPORTS]; +}; + +extern struct eth_dev_shared *eth_dev_shared_data; + +/** + * The user application callback description. + * + * It contains callback address to be registered by user application, + * the pointer to the parameters for callback, and the event type. + */ +struct rte_eth_dev_callback { + TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */ + rte_eth_dev_cb_fn cb_fn; /**< Callback address */ + void *cb_arg; /**< Parameter for callback */ + void *ret_param; /**< Return parameter */ + enum rte_eth_event_type event; /**< Interrupt event type */ + uint32_t active; /**< Callback is executing */ +}; + +extern rte_spinlock_t eth_dev_cb_lock; + /* * Convert rte_eth_dev pointer to port ID. * NULL will be translated to RTE_MAX_ETHPORTS. @@ -33,4 +61,12 @@ void eth_dev_fp_ops_reset(struct rte_eth_fp_ops *fpo); void eth_dev_fp_ops_setup(struct rte_eth_fp_ops *fpo, const struct rte_eth_dev *dev); + +void eth_dev_shared_data_prepare(void); + +void eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid); +void eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid); +int eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues); +int eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues); + #endif /* _ETH_PRIVATE_H_ */ diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index 2a479bea2128..70c850a2f18a 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include @@ -41,37 +40,23 @@ #include "ethdev_profile.h" #include "ethdev_private.h" -static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; /* public fast-path API */ struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; -/* spinlock for eth device callbacks */ -static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; - /* spinlock for add/remove Rx callbacks */ static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; /* spinlock for add/remove Tx callbacks */ static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; -/* spinlock for shared data allocation */ -static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER; - /* store statistics names and its offset in stats structure */ struct rte_eth_xstats_name_off { char name[RTE_ETH_XSTATS_NAME_SIZE]; unsigned offset; }; -/* Shared memory between primary and secondary processes. */ -static struct { - uint64_t next_owner_id; - rte_spinlock_t ownership_lock; - struct rte_eth_dev_data data[RTE_MAX_ETHPORTS]; -} *eth_dev_shared_data; - static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, @@ -175,21 +160,6 @@ static const struct { {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"}, }; -/** - * The user application callback description. - * - * It contains callback address to be registered by user application, - * the pointer to the parameters for callback, and the event type. - */ -struct rte_eth_dev_callback { - TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */ - rte_eth_dev_cb_fn cb_fn; /**< Callback address */ - void *cb_arg; /**< Parameter for callback */ - void *ret_param; /**< Return parameter */ - enum rte_eth_event_type event; /**< Interrupt event type */ - uint32_t active; /**< Callback is executing */ -}; - enum { STAT_QMAP_TX = 0, STAT_QMAP_RX @@ -399,227 +369,12 @@ rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) rte_eth_devices[ref_port_id].device); } -static void -eth_dev_shared_data_prepare(void) -{ - const unsigned flags = 0; - const struct rte_memzone *mz; - - rte_spinlock_lock(ð_dev_shared_data_lock); - - if (eth_dev_shared_data == NULL) { - if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - /* Allocate port data and ownership shared memory. */ - mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA, - sizeof(*eth_dev_shared_data), - rte_socket_id(), flags); - } else - mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA); - if (mz == NULL) - rte_panic("Cannot allocate ethdev shared data\n"); - - eth_dev_shared_data = mz->addr; - if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - eth_dev_shared_data->next_owner_id = - RTE_ETH_DEV_NO_OWNER + 1; - rte_spinlock_init(ð_dev_shared_data->ownership_lock); - memset(eth_dev_shared_data->data, 0, - sizeof(eth_dev_shared_data->data)); - } - } - - rte_spinlock_unlock(ð_dev_shared_data_lock); -} - static bool eth_dev_is_allocated(const struct rte_eth_dev *ethdev) { return ethdev->data->name[0] != '\0'; } -static struct rte_eth_dev * -eth_dev_allocated(const char *name) -{ - uint16_t i; - - RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX); - - for (i = 0; i < RTE_MAX_ETHPORTS; i++) { - if (rte_eth_devices[i].data != NULL && - strcmp(rte_eth_devices[i].data->name, name) == 0) - return &rte_eth_devices[i]; - } - return NULL; -} - -struct rte_eth_dev * -rte_eth_dev_allocated(const char *name) -{ - struct rte_eth_dev *ethdev; - - eth_dev_shared_data_prepare(); - - rte_spinlock_lock(ð_dev_shared_data->ownership_lock); - - ethdev = eth_dev_allocated(name); - - rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); - - return ethdev; -} - -static uint16_t -eth_dev_find_free_port(void) -{ - uint16_t i; - - for (i = 0; i < RTE_MAX_ETHPORTS; i++) { - /* Using shared name field to find a free port. */ - if (eth_dev_shared_data->data[i].name[0] == '\0') { - RTE_ASSERT(rte_eth_devices[i].state == - RTE_ETH_DEV_UNUSED); - return i; - } - } - return RTE_MAX_ETHPORTS; -} - -static struct rte_eth_dev * -eth_dev_get(uint16_t port_id) -{ - struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; - - eth_dev->data = ð_dev_shared_data->data[port_id]; - - return eth_dev; -} - -struct rte_eth_dev * -rte_eth_dev_allocate(const char *name) -{ - uint16_t port_id; - struct rte_eth_dev *eth_dev = NULL; - size_t name_len; - - name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN); - if (name_len == 0) { - RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n"); - return NULL; - } - - if (name_len >= RTE_ETH_NAME_MAX_LEN) { - RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n"); - return NULL; - } - - eth_dev_shared_data_prepare(); - - /* Synchronize port creation between primary and secondary threads. */ - rte_spinlock_lock(ð_dev_shared_data->ownership_lock); - - if (eth_dev_allocated(name) != NULL) { - RTE_ETHDEV_LOG(ERR, - "Ethernet device with name %s already allocated\n", - name); - goto unlock; - } - - port_id = eth_dev_find_free_port(); - if (port_id == RTE_MAX_ETHPORTS) { - RTE_ETHDEV_LOG(ERR, - "Reached maximum number of Ethernet ports\n"); - goto unlock; - } - - eth_dev = eth_dev_get(port_id); - strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name)); - eth_dev->data->port_id = port_id; - eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS; - eth_dev->data->mtu = RTE_ETHER_MTU; - pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL); - -unlock: - rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); - - return eth_dev; -} - -/* - * Attach to a port already registered by the primary process, which - * makes sure that the same device would have the same port ID both - * in the primary and secondary process. - */ -struct rte_eth_dev * -rte_eth_dev_attach_secondary(const char *name) -{ - uint16_t i; - struct rte_eth_dev *eth_dev = NULL; - - eth_dev_shared_data_prepare(); - - /* Synchronize port attachment to primary port creation and release. */ - rte_spinlock_lock(ð_dev_shared_data->ownership_lock); - - for (i = 0; i < RTE_MAX_ETHPORTS; i++) { - if (strcmp(eth_dev_shared_data->data[i].name, name) == 0) - break; - } - if (i == RTE_MAX_ETHPORTS) { - RTE_ETHDEV_LOG(ERR, - "Device %s is not driven by the primary process\n", - name); - } else { - eth_dev = eth_dev_get(i); - RTE_ASSERT(eth_dev->data->port_id == i); - } - - rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); - return eth_dev; -} - -int -rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) -{ - if (eth_dev == NULL) - return -EINVAL; - - eth_dev_shared_data_prepare(); - - if (eth_dev->state != RTE_ETH_DEV_UNUSED) - rte_eth_dev_callback_process(eth_dev, - RTE_ETH_EVENT_DESTROY, NULL); - - eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id); - - rte_spinlock_lock(ð_dev_shared_data->ownership_lock); - - eth_dev->state = RTE_ETH_DEV_UNUSED; - eth_dev->device = NULL; - eth_dev->process_private = NULL; - eth_dev->intr_handle = NULL; - eth_dev->rx_pkt_burst = NULL; - eth_dev->tx_pkt_burst = NULL; - eth_dev->tx_pkt_prepare = NULL; - eth_dev->rx_queue_count = NULL; - eth_dev->rx_descriptor_status = NULL; - eth_dev->tx_descriptor_status = NULL; - eth_dev->dev_ops = NULL; - - if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - rte_free(eth_dev->data->rx_queues); - rte_free(eth_dev->data->tx_queues); - rte_free(eth_dev->data->mac_addrs); - rte_free(eth_dev->data->hash_mac_addrs); - rte_free(eth_dev->data->dev_private); - pthread_mutex_destroy(ð_dev->data->flow_ops_mutex); - memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data)); - } - - rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); - - return 0; -} - int rte_eth_dev_is_valid_port(uint16_t port_id) { @@ -894,17 +649,6 @@ rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) return -ENODEV; } -struct rte_eth_dev * -rte_eth_dev_get_by_name(const char *name) -{ - uint16_t pid; - - if (rte_eth_dev_get_port_by_name(name, &pid)) - return NULL; - - return &rte_eth_devices[pid]; -} - static int eth_err(uint16_t port_id, int ret) { @@ -915,62 +659,6 @@ eth_err(uint16_t port_id, int ret) return ret; } -static void -eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid) -{ - void **rxq = dev->data->rx_queues; - - if (rxq[qid] == NULL) - return; - - if (dev->dev_ops->rx_queue_release != NULL) - (*dev->dev_ops->rx_queue_release)(dev, qid); - rxq[qid] = NULL; -} - -static void -eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid) -{ - void **txq = dev->data->tx_queues; - - if (txq[qid] == NULL) - return; - - if (dev->dev_ops->tx_queue_release != NULL) - (*dev->dev_ops->tx_queue_release)(dev, qid); - txq[qid] = NULL; -} - -static int -eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) -{ - uint16_t old_nb_queues = dev->data->nb_rx_queues; - unsigned i; - - if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */ - dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", - sizeof(dev->data->rx_queues[0]) * - RTE_MAX_QUEUES_PER_PORT, - RTE_CACHE_LINE_SIZE); - if (dev->data->rx_queues == NULL) { - dev->data->nb_rx_queues = 0; - return -(ENOMEM); - } - } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */ - for (i = nb_queues; i < old_nb_queues; i++) - eth_dev_rxq_release(dev, i); - - } else if (dev->data->rx_queues != NULL && nb_queues == 0) { - for (i = nb_queues; i < old_nb_queues; i++) - eth_dev_rxq_release(dev, i); - - rte_free(dev->data->rx_queues); - dev->data->rx_queues = NULL; - } - dev->data->nb_rx_queues = nb_queues; - return 0; -} - static int eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) { @@ -1161,36 +849,6 @@ rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); } -static int -eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) -{ - uint16_t old_nb_queues = dev->data->nb_tx_queues; - unsigned i; - - if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */ - dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", - sizeof(dev->data->tx_queues[0]) * - RTE_MAX_QUEUES_PER_PORT, - RTE_CACHE_LINE_SIZE); - if (dev->data->tx_queues == NULL) { - dev->data->nb_tx_queues = 0; - return -(ENOMEM); - } - } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */ - for (i = nb_queues; i < old_nb_queues; i++) - eth_dev_txq_release(dev, i); - - } else if (dev->data->tx_queues != NULL && nb_queues == 0) { - for (i = nb_queues; i < old_nb_queues; i++) - eth_dev_txq_release(dev, i); - - rte_free(dev->data->tx_queues); - dev->data->tx_queues = NULL; - } - dev->data->nb_tx_queues = nb_queues; - return 0; -} - uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex) { @@ -1682,21 +1340,6 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, return ret; } -void -rte_eth_dev_internal_reset(struct rte_eth_dev *dev) -{ - if (dev->data->dev_started) { - RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n", - dev->data->port_id); - return; - } - - eth_dev_rx_queue_config(dev, 0); - eth_dev_tx_queue_config(dev, 0); - - memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf)); -} - static void eth_dev_mac_restore(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) @@ -4914,52 +4557,6 @@ rte_eth_dev_callback_unregister(uint16_t port_id, return ret; } -int -rte_eth_dev_callback_process(struct rte_eth_dev *dev, - enum rte_eth_event_type event, void *ret_param) -{ - struct rte_eth_dev_callback *cb_lst; - struct rte_eth_dev_callback dev_cb; - int rc = 0; - - rte_spinlock_lock(ð_dev_cb_lock); - TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { - if (cb_lst->cb_fn == NULL || cb_lst->event != event) - continue; - dev_cb = *cb_lst; - cb_lst->active = 1; - if (ret_param != NULL) - dev_cb.ret_param = ret_param; - - rte_spinlock_unlock(ð_dev_cb_lock); - rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event, - dev_cb.cb_arg, dev_cb.ret_param); - rte_spinlock_lock(ð_dev_cb_lock); - cb_lst->active = 0; - } - rte_spinlock_unlock(ð_dev_cb_lock); - return rc; -} - -void -rte_eth_dev_probing_finish(struct rte_eth_dev *dev) -{ - if (dev == NULL) - return; - - /* - * for secondary process, at that point we expect device - * to be already 'usable', so shared data and all function pointers - * for fast-path devops have to be setup properly inside rte_eth_dev. - */ - if (rte_eal_process_type() == RTE_PROC_SECONDARY) - eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev); - - rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL); - - dev->state = RTE_ETH_DEV_ATTACHED; -} - int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) { @@ -5032,156 +4629,6 @@ rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) return fd; } -static inline int -eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id, - const char *ring_name) -{ - return snprintf(name, len, "eth_p%d_q%d_%s", - port_id, queue_id, ring_name); -} - -const struct rte_memzone * -rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, - uint16_t queue_id, size_t size, unsigned align, - int socket_id) -{ - char z_name[RTE_MEMZONE_NAMESIZE]; - const struct rte_memzone *mz; - int rc; - - rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, - queue_id, ring_name); - if (rc >= RTE_MEMZONE_NAMESIZE) { - RTE_ETHDEV_LOG(ERR, "ring name too long\n"); - rte_errno = ENAMETOOLONG; - return NULL; - } - - mz = rte_memzone_lookup(z_name); - if (mz) { - if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) || - size > mz->len || - ((uintptr_t)mz->addr & (align - 1)) != 0) { - RTE_ETHDEV_LOG(ERR, - "memzone %s does not justify the requested attributes\n", - mz->name); - return NULL; - } - - return mz; - } - - return rte_memzone_reserve_aligned(z_name, size, socket_id, - RTE_MEMZONE_IOVA_CONTIG, align); -} - -int -rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name, - uint16_t queue_id) -{ - char z_name[RTE_MEMZONE_NAMESIZE]; - const struct rte_memzone *mz; - int rc = 0; - - rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, - queue_id, ring_name); - if (rc >= RTE_MEMZONE_NAMESIZE) { - RTE_ETHDEV_LOG(ERR, "ring name too long\n"); - return -ENAMETOOLONG; - } - - mz = rte_memzone_lookup(z_name); - if (mz) - rc = rte_memzone_free(mz); - else - rc = -ENOENT; - - return rc; -} - -int -rte_eth_dev_create(struct rte_device *device, const char *name, - size_t priv_data_size, - ethdev_bus_specific_init ethdev_bus_specific_init, - void *bus_init_params, - ethdev_init_t ethdev_init, void *init_params) -{ - struct rte_eth_dev *ethdev; - int retval; - - RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL); - - if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - ethdev = rte_eth_dev_allocate(name); - if (!ethdev) - return -ENODEV; - - if (priv_data_size) { - ethdev->data->dev_private = rte_zmalloc_socket( - name, priv_data_size, RTE_CACHE_LINE_SIZE, - device->numa_node); - - if (!ethdev->data->dev_private) { - RTE_ETHDEV_LOG(ERR, - "failed to allocate private data\n"); - retval = -ENOMEM; - goto probe_failed; - } - } - } else { - ethdev = rte_eth_dev_attach_secondary(name); - if (!ethdev) { - RTE_ETHDEV_LOG(ERR, - "secondary process attach failed, ethdev doesn't exist\n"); - return -ENODEV; - } - } - - ethdev->device = device; - - if (ethdev_bus_specific_init) { - retval = ethdev_bus_specific_init(ethdev, bus_init_params); - if (retval) { - RTE_ETHDEV_LOG(ERR, - "ethdev bus specific initialisation failed\n"); - goto probe_failed; - } - } - - retval = ethdev_init(ethdev, init_params); - if (retval) { - RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n"); - goto probe_failed; - } - - rte_eth_dev_probing_finish(ethdev); - - return retval; - -probe_failed: - rte_eth_dev_release_port(ethdev); - return retval; -} - -int -rte_eth_dev_destroy(struct rte_eth_dev *ethdev, - ethdev_uninit_t ethdev_uninit) -{ - int ret; - - ethdev = rte_eth_dev_allocated(ethdev->data->name); - if (!ethdev) - return -ENODEV; - - RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL); - - ret = ethdev_uninit(ethdev); - if (ret) - return ret; - - return rte_eth_dev_release_port(ethdev); -} - int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data) @@ -6005,22 +5452,6 @@ rte_eth_dev_hairpin_capability_get(uint16_t port_id, return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); } -int -rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) -{ - if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) - return 1; - return 0; -} - -int -rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) -{ - if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN) - return 1; - return 0; -} - int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) { @@ -6042,255 +5473,6 @@ rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) return (*dev->dev_ops->pool_ops_supported)(dev, pool); } -/** - * A set of values to describe the possible states of a switch domain. - */ -enum rte_eth_switch_domain_state { - RTE_ETH_SWITCH_DOMAIN_UNUSED = 0, - RTE_ETH_SWITCH_DOMAIN_ALLOCATED -}; - -/** - * Array of switch domains available for allocation. Array is sized to - * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than - * ethdev ports in a single process. - */ -static struct rte_eth_dev_switch { - enum rte_eth_switch_domain_state state; -} eth_dev_switch_domains[RTE_MAX_ETHPORTS]; - -int -rte_eth_switch_domain_alloc(uint16_t *domain_id) -{ - uint16_t i; - - *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; - - for (i = 0; i < RTE_MAX_ETHPORTS; i++) { - if (eth_dev_switch_domains[i].state == - RTE_ETH_SWITCH_DOMAIN_UNUSED) { - eth_dev_switch_domains[i].state = - RTE_ETH_SWITCH_DOMAIN_ALLOCATED; - *domain_id = i; - return 0; - } - } - - return -ENOSPC; -} - -int -rte_eth_switch_domain_free(uint16_t domain_id) -{ - if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID || - domain_id >= RTE_MAX_ETHPORTS) - return -EINVAL; - - if (eth_dev_switch_domains[domain_id].state != - RTE_ETH_SWITCH_DOMAIN_ALLOCATED) - return -EINVAL; - - eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED; - - return 0; -} - -static int -eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in) -{ - int state; - struct rte_kvargs_pair *pair; - char *letter; - - arglist->str = strdup(str_in); - if (arglist->str == NULL) - return -ENOMEM; - - letter = arglist->str; - state = 0; - arglist->count = 0; - pair = &arglist->pairs[0]; - while (1) { - switch (state) { - case 0: /* Initial */ - if (*letter == '=') - return -EINVAL; - else if (*letter == '\0') - return 0; - - state = 1; - pair->key = letter; - /* fall-thru */ - - case 1: /* Parsing key */ - if (*letter == '=') { - *letter = '\0'; - pair->value = letter + 1; - state = 2; - } else if (*letter == ',' || *letter == '\0') - return -EINVAL; - break; - - - case 2: /* Parsing value */ - if (*letter == '[') - state = 3; - else if (*letter == ',') { - *letter = '\0'; - arglist->count++; - pair = &arglist->pairs[arglist->count]; - state = 0; - } else if (*letter == '\0') { - letter--; - arglist->count++; - pair = &arglist->pairs[arglist->count]; - state = 0; - } - break; - - case 3: /* Parsing list */ - if (*letter == ']') - state = 2; - else if (*letter == '\0') - return -EINVAL; - break; - } - letter++; - } -} - -int -rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da) -{ - struct rte_kvargs args; - struct rte_kvargs_pair *pair; - unsigned int i; - int result = 0; - - memset(eth_da, 0, sizeof(*eth_da)); - - result = eth_dev_devargs_tokenise(&args, dargs); - if (result < 0) - goto parse_cleanup; - - for (i = 0; i < args.count; i++) { - pair = &args.pairs[i]; - if (strcmp("representor", pair->key) == 0) { - if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) { - RTE_LOG(ERR, EAL, "duplicated representor key: %s\n", - dargs); - result = -1; - goto parse_cleanup; - } - result = rte_eth_devargs_parse_representor_ports( - pair->value, eth_da); - if (result < 0) - goto parse_cleanup; - } - } - -parse_cleanup: - if (args.str) - free(args.str); - - return result; -} - -int -rte_eth_representor_id_get(uint16_t port_id, - enum rte_eth_representor_type type, - int controller, int pf, int representor_port, - uint16_t *repr_id) -{ - int ret, n, count; - uint32_t i; - struct rte_eth_representor_info *info = NULL; - size_t size; - - if (type == RTE_ETH_REPRESENTOR_NONE) - return 0; - if (repr_id == NULL) - return -EINVAL; - - /* Get PMD representor range info. */ - ret = rte_eth_representor_info_get(port_id, NULL); - if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF && - controller == -1 && pf == -1) { - /* Direct mapping for legacy VF representor. */ - *repr_id = representor_port; - return 0; - } else if (ret < 0) { - return ret; - } - n = ret; - size = sizeof(*info) + n * sizeof(info->ranges[0]); - info = calloc(1, size); - if (info == NULL) - return -ENOMEM; - info->nb_ranges_alloc = n; - ret = rte_eth_representor_info_get(port_id, info); - if (ret < 0) - goto out; - - /* Default controller and pf to caller. */ - if (controller == -1) - controller = info->controller; - if (pf == -1) - pf = info->pf; - - /* Locate representor ID. */ - ret = -ENOENT; - for (i = 0; i < info->nb_ranges; ++i) { - if (info->ranges[i].type != type) - continue; - if (info->ranges[i].controller != controller) - continue; - if (info->ranges[i].id_end < info->ranges[i].id_base) { - RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n", - port_id, info->ranges[i].id_base, - info->ranges[i].id_end, i); - continue; - - } - count = info->ranges[i].id_end - info->ranges[i].id_base + 1; - switch (info->ranges[i].type) { - case RTE_ETH_REPRESENTOR_PF: - if (pf < info->ranges[i].pf || - pf >= info->ranges[i].pf + count) - continue; - *repr_id = info->ranges[i].id_base + - (pf - info->ranges[i].pf); - ret = 0; - goto out; - case RTE_ETH_REPRESENTOR_VF: - if (info->ranges[i].pf != pf) - continue; - if (representor_port < info->ranges[i].vf || - representor_port >= info->ranges[i].vf + count) - continue; - *repr_id = info->ranges[i].id_base + - (representor_port - info->ranges[i].vf); - ret = 0; - goto out; - case RTE_ETH_REPRESENTOR_SF: - if (info->ranges[i].pf != pf) - continue; - if (representor_port < info->ranges[i].sf || - representor_port >= info->ranges[i].sf + count) - continue; - *repr_id = info->ranges[i].id_base + - (representor_port - info->ranges[i].sf); - ret = 0; - goto out; - default: - break; - } - } -out: - free(info); - return ret; -} - static int eth_dev_handle_port_list(const char *cmd __rte_unused, const char *params __rte_unused, @@ -6533,61 +5715,6 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, return 0; } -int -rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue, - struct rte_hairpin_peer_info *cur_info, - struct rte_hairpin_peer_info *peer_info, - uint32_t direction) -{ - struct rte_eth_dev *dev; - - /* Current queue information is not mandatory. */ - if (peer_info == NULL) - return -EINVAL; - - /* No need to check the validity again. */ - dev = &rte_eth_devices[peer_port]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update, - -ENOTSUP); - - return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue, - cur_info, peer_info, direction); -} - -int -rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue, - struct rte_hairpin_peer_info *peer_info, - uint32_t direction) -{ - struct rte_eth_dev *dev; - - if (peer_info == NULL) - return -EINVAL; - - /* No need to check the validity again. */ - dev = &rte_eth_devices[cur_port]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind, - -ENOTSUP); - - return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue, - peer_info, direction); -} - -int -rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue, - uint32_t direction) -{ - struct rte_eth_dev *dev; - - /* No need to check the validity again. */ - dev = &rte_eth_devices[cur_port]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind, - -ENOTSUP); - - return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue, - direction); -} - int rte_eth_representor_info_get(uint16_t port_id, struct rte_eth_representor_info *info) @@ -6722,34 +5849,6 @@ rte_eth_ip_reassembly_conf_set(uint16_t port_id, (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf)); } -int -rte_eth_ip_reassembly_dynfield_register(int *field_offset, int *flag_offset) -{ - static const struct rte_mbuf_dynfield field_desc = { - .name = RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME, - .size = sizeof(rte_eth_ip_reassembly_dynfield_t), - .align = __alignof__(rte_eth_ip_reassembly_dynfield_t), - }; - static const struct rte_mbuf_dynflag ip_reassembly_dynflag = { - .name = RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME, - }; - int offset; - - offset = rte_mbuf_dynfield_register(&field_desc); - if (offset < 0) - return -1; - if (field_offset != NULL) - *field_offset = offset; - - offset = rte_mbuf_dynflag_register(&ip_reassembly_dynflag); - if (offset < 0) - return -1; - if (flag_offset != NULL) - *flag_offset = offset; - - return 0; -} - int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file) {