[dpdk-dev,1/5] xen: allow determining DOM0 at runtime

Message ID 1446768574-32310-2-git-send-email-stephen@networkplumber.org (mailing list archive)
State Accepted, archived
Headers

Commit Message

Stephen Hemminger Nov. 6, 2015, 12:09 a.m. UTC
  Add memory infrastructure for runtime Xen DOM0 support.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Acked-by: Jijiang Liu <Jijiang.liu@intel.com>

---
 lib/librte_eal/common/include/rte_memory.h   | 30 ++++++++++++++++-
 lib/librte_eal/linuxapp/eal/eal_memory.c     |  7 ++++
 lib/librte_eal/linuxapp/eal/eal_xen_memory.c |  2 +-
 lib/librte_ether/rte_ethdev.c                | 24 ++++++++++++++
 lib/librte_ether/rte_ethdev.h                | 24 ++++++++++++++
 lib/librte_mempool/rte_mempool.c             | 48 ++++++++++++++++++++--------
 lib/librte_mempool/rte_mempool.h             |  3 +-
 7 files changed, 120 insertions(+), 18 deletions(-)
  

Patch

diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h
index 1bed415..067be10 100644
--- a/lib/librte_eal/common/include/rte_memory.h
+++ b/lib/librte_eal/common/include/rte_memory.h
@@ -52,6 +52,8 @@ 
 extern "C" {
 #endif
 
+#include <rte_common.h>
+
 enum rte_page_sizes {
 	RTE_PGSIZE_4K    = 1ULL << 12,
 	RTE_PGSIZE_64K   = 1ULL << 16,
@@ -180,6 +182,13 @@  unsigned rte_memory_get_nchannel(void);
 unsigned rte_memory_get_nrank(void);
 
 #ifdef RTE_LIBRTE_XEN_DOM0
+
+/**< Internal use only - should DOM0 memory mapping be used */
+extern int is_xen_dom0_supported(void);
+
+/**< Internal use only - phys to virt mapping for xen */
+phys_addr_t rte_xen_mem_phy2mch(uint32_t, const phys_addr_t);
+
 /**
  * Return the physical address of elt, which is an element of the pool mp.
  *
@@ -191,7 +200,14 @@  unsigned rte_memory_get_nrank(void);
  * @return
  *   The physical address or error.
  */
-phys_addr_t rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr);
+static inline phys_addr_t
+rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr)
+{
+	if (is_xen_dom0_supported())
+		return rte_xen_mem_phy2mch(memseg_id, phy_addr);
+	else
+		return phy_addr;
+}
 
 /**
  * Memory init for supporting application running on Xen domain0.
@@ -214,7 +230,19 @@  int rte_xen_dom0_memory_init(void);
  *       negative: error
  */
 int rte_xen_dom0_memory_attach(void);
+#else
+static inline int is_xen_dom0_supported(void)
+{
+	return 0;
+}
+
+static inline phys_addr_t
+rte_mem_phy2mch(uint32_t memseg_id __rte_unused, const phys_addr_t phy_addr)
+{
+	return phy_addr;
+}
 #endif
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 657d19f..0de75cd 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -97,6 +97,13 @@ 
 #include "eal_filesystem.h"
 #include "eal_hugepages.h"
 
+#ifdef RTE_LIBRTE_XEN_DOM0
+int is_xen_dom0_supported(void)
+{
+	return internal_config.xen_dom0_support;
+}
+#endif
+
 /**
  * @file
  * Huge page mapping under linux
diff --git a/lib/librte_eal/linuxapp/eal/eal_xen_memory.c b/lib/librte_eal/linuxapp/eal/eal_xen_memory.c
index d228a9d..7fd9e83 100644
--- a/lib/librte_eal/linuxapp/eal/eal_xen_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_xen_memory.c
@@ -156,7 +156,7 @@  get_xen_memory_size(void)
  * Based on physical address to caculate MFN in Xen Dom0.
  */
 phys_addr_t
-rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr)
+rte_xen_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr)
 {
 	int mfn_id;
 	uint64_t mfn, mfn_offset;
diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index e0e1dca..756e894 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -2699,6 +2699,30 @@  rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
 
 	return 0;
 }
+ 
+const struct rte_memzone *
+rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
+			 uint16_t queue_id, size_t size, unsigned align,
+			 int socket_id)
+{
+	char z_name[RTE_MEMZONE_NAMESIZE];
+	const struct rte_memzone *mz;
+
+	snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+		 dev->driver->pci_drv.name, ring_name,
+		 dev->data->port_id, queue_id);
+
+	mz = rte_memzone_lookup(z_name);
+	if (mz)
+		return mz;
+
+	if (is_xen_dom0_supported())
+		return rte_memzone_reserve_bounded(z_name, size, socket_id,
+						   0, align, RTE_PGSIZE_2M);
+	else
+		return rte_memzone_reserve_aligned(z_name, size, socket_id,
+						   0, align);
+}
 
 int
 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 48a540d..785f482 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -3768,6 +3768,30 @@  extern int rte_eth_timesync_read_tx_timestamp(uint8_t port_id,
 extern void rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev);
 
 
+/**
+ * Create memzone for HW rings.
+ * malloc can't be used as the physical address is needed.
+ * If the memzone is already created, then this function returns a ptr
+ * to the old one.
+ *
+ * @param eth_dev
+ *   The *eth_dev* pointer is the address of the *rte_eth_dev* structure
+ * @param name
+ *   The name of the memory zone
+ * @param queue_id
+ *   The index of the queue to add to name
+ * @param size
+ *   The sizeof of the memory area
+ * @param align
+ *   Alignment for resulting memzone. Must be a power of 2.
+ * @param socket_id
+ *   The *socket_id* argument is the socket identifier in case of NUMA.
+ */
+const struct rte_memzone *
+rte_eth_dma_zone_reserve(const struct rte_eth_dev *eth_dev, const char *name,
+			 uint16_t queue_id, size_t size,
+			 unsigned align, int socket_id);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index e57cbbd..f53076a 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -375,6 +375,26 @@  rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
 	return usz;
 }
 
+#ifndef RTE_LIBRTE_XEN_DOM0
+/* stub if DOM0 support not configured */
+struct rte_mempool *
+rte_dom0_mempool_create(const char *name __rte_unused,
+			unsigned n __rte_unused,
+			unsigned elt_size __rte_unused,
+			unsigned cache_size __rte_unused,
+			unsigned private_data_size __rte_unused,
+			rte_mempool_ctor_t *mp_init __rte_unused,
+			void *mp_init_arg __rte_unused,
+			rte_mempool_obj_ctor_t *obj_init __rte_unused,
+			void *obj_init_arg __rte_unused,
+			int socket_id __rte_unused,
+			unsigned flags __rte_unused)
+{
+	rte_errno = EINVAL;
+	return NULL;
+}
+#endif
+
 /* create the mempool */
 struct rte_mempool *
 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
@@ -383,20 +403,20 @@  rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
 		   rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
 		   int socket_id, unsigned flags)
 {
-#ifdef RTE_LIBRTE_XEN_DOM0
-	return rte_dom0_mempool_create(name, n, elt_size,
-		cache_size, private_data_size,
-		mp_init, mp_init_arg,
-		obj_init, obj_init_arg,
-		socket_id, flags);
-#else
-	return rte_mempool_xmem_create(name, n, elt_size,
-		cache_size, private_data_size,
-		mp_init, mp_init_arg,
-		obj_init, obj_init_arg,
-		socket_id, flags,
-		NULL, NULL, MEMPOOL_PG_NUM_DEFAULT, MEMPOOL_PG_SHIFT_MAX);
-#endif
+	if (is_xen_dom0_supported())
+		return rte_dom0_mempool_create(name, n, elt_size,
+					       cache_size, private_data_size,
+					       mp_init, mp_init_arg,
+					       obj_init, obj_init_arg,
+					       socket_id, flags);
+	else
+		return rte_mempool_xmem_create(name, n, elt_size,
+					       cache_size, private_data_size,
+					       mp_init, mp_init_arg,
+					       obj_init, obj_init_arg,
+					       socket_id, flags,
+					       NULL, NULL, MEMPOOL_PG_NUM_DEFAULT,
+					       MEMPOOL_PG_SHIFT_MAX);
 }
 
 /*
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 8abeca9..6e2390a 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -640,7 +640,6 @@  rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
 		int socket_id, unsigned flags, void *vaddr,
 		const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
 
-#ifdef RTE_LIBRTE_XEN_DOM0
 /**
  * Create a new mempool named *name* in memory on Xen Dom0.
  *
@@ -728,7 +727,7 @@  rte_dom0_mempool_create(const char *name, unsigned n, unsigned elt_size,
 		rte_mempool_ctor_t *mp_init, void *mp_init_arg,
 		rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
 		int socket_id, unsigned flags);
-#endif
+
 
 /**
  * Dump the status of the mempool to the console.