@@ -2865,7 +2865,7 @@ struct rte_security_ops dpaa2_sec_security_ops = {
rte_socket_id());
if (cryptodev->data->dev_private == NULL)
- rte_panic("Cannot allocate memzone for private "
+ rte_panic_return("Cannot allocate memzone for private "
"device data");
}
@@ -2375,7 +2375,7 @@ struct rte_security_ops dpaa_sec_security_ops = {
rte_socket_id());
if (cryptodev->data->dev_private == NULL)
- rte_panic("Cannot allocate memzone for private "
+ rte_panic_return("Cannot allocate memzone for private "
"device data");
}
@@ -893,7 +893,7 @@
bond_mode_8023ad_periodic_cb, arg);
}
-void
+int
bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev,
uint16_t slave_id)
{
@@ -939,7 +939,7 @@
timer_cancel(&port->warning_timer);
if (port->mbuf_pool != NULL)
- return;
+ return 0;
RTE_ASSERT(port->rx_ring == NULL);
RTE_ASSERT(port->tx_ring == NULL);
@@ -968,7 +968,7 @@
/* Any memory allocation failure in initialization is critical because
* resources can't be free, so reinitialization is impossible. */
if (port->mbuf_pool == NULL) {
- rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
+ return rte_panic_return("Slave %u: Failed to create memory pool '%s': %s\n",
slave_id, mem_name, rte_strerror(rte_errno));
}
@@ -977,8 +977,8 @@
rte_align32pow2(BOND_MODE_8023AX_SLAVE_RX_PKTS), socket_id, 0);
if (port->rx_ring == NULL) {
- rte_panic("Slave %u: Failed to create rx ring '%s': %s\n", slave_id,
- mem_name, rte_strerror(rte_errno));
+ return rte_panic_return("Slave %u: Failed to create rx ring '%s': %s\n",
+ slave_id, mem_name, rte_strerror(rte_errno));
}
/* TX ring is at least one pkt longer to make room for marker packet. */
@@ -987,9 +987,11 @@
rte_align32pow2(BOND_MODE_8023AX_SLAVE_TX_PKTS + 1), socket_id, 0);
if (port->tx_ring == NULL) {
- rte_panic("Slave %u: Failed to create tx ring '%s': %s\n", slave_id,
- mem_name, rte_strerror(rte_errno));
+ return rte_panic_return("Slave %u: Fail to create tx ring '%s': %s\n",
+ slave_id, mem_name, rte_strerror(rte_errno));
}
+
+ return 0;
}
int
@@ -1143,9 +1145,12 @@
struct bond_dev_private *internals = bond_dev->data->dev_private;
uint8_t i;
- for (i = 0; i < internals->active_slave_count; i++)
- bond_mode_8023ad_activate_slave(bond_dev,
+ for (i = 0; i < internals->active_slave_count; i++) {
+ int rc = bond_mode_8023ad_activate_slave(bond_dev,
internals->active_slaves[i]);
+ if (rc != 0)
+ return rc;
+ }
return 0;
}
@@ -263,7 +263,7 @@ struct mode8023ad_private {
* @return
* 0 on success, negative value otherwise.
*/
-void
+int
bond_mode_8023ad_activate_slave(struct rte_eth_dev *dev, uint16_t port_id);
/**
@@ -69,14 +69,15 @@
return 0;
}
-void
+int
activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id)
{
struct bond_dev_private *internals = eth_dev->data->dev_private;
uint8_t active_count = internals->active_slave_count;
if (internals->mode == BONDING_MODE_8023AD)
- bond_mode_8023ad_activate_slave(eth_dev, port_id);
+ if (bond_mode_8023ad_activate_slave(eth_dev, port_id) != 0)
+ return -1;
if (internals->mode == BONDING_MODE_TLB
|| internals->mode == BONDING_MODE_ALB) {
@@ -349,10 +350,17 @@
bond_ethdev_primary_set(internals,
slave_port_id);
- if (find_slave_by_id(internals->active_slaves,
- internals->active_slave_count,
- slave_port_id) == internals->active_slave_count)
- activate_slave(bonded_eth_dev, slave_port_id);
+ int rc =
+ find_slave_by_id(internals->active_slaves,
+ internals->active_slave_count,
+ slave_port_id);
+
+ if (rc == internals->active_slave_count) {
+ int rc = activate_slave(bonded_eth_dev,
+ slave_port_id);
+ if (rc != 0)
+ return -1;
+ }
}
}
@@ -1740,7 +1740,7 @@ struct bwg_slave {
/* Any memory allocation failure in initialization is critical because
* resources can't be free, so reinitialization is impossible. */
if (port->slow_pool == NULL) {
- rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
+ return rte_panic_return("Slave %u: Failed to create memory pool '%s': %s\n",
slave_id, mem_name, rte_strerror(rte_errno));
}
}
@@ -2652,7 +2652,8 @@ struct bwg_slave {
mac_address_slaves_update(bonded_eth_dev);
}
- activate_slave(bonded_eth_dev, port_id);
+ if (activate_slave(bonded_eth_dev, port_id) != 0)
+ return -1;
/* If user has defined the primary port then default to using it */
if (internals->user_defined_primary_port &&
@@ -185,7 +185,7 @@ struct bond_dev_private {
void
deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id);
-void
+int
activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id);
void
@@ -405,7 +405,7 @@ int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
/*
* misc function prototypes
*/
-void igb_pf_host_init(struct rte_eth_dev *eth_dev);
+int igb_pf_host_init(struct rte_eth_dev *eth_dev);
void igb_pf_mbx_process(struct rte_eth_dev *eth_dev);
@@ -885,7 +885,8 @@ static int igb_flex_filter_uninit(struct rte_eth_dev *eth_dev)
}
/* initialize PF if max_vfs not zero */
- igb_pf_host_init(eth_dev);
+ if (igb_pf_host_init(eth_dev) != 0)
+ goto err_late;
ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
/* Set PF Reset Done bit so PF/VF Mail Ops can work */
@@ -63,7 +63,7 @@ int igb_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
return 0;
}
-void igb_pf_host_init(struct rte_eth_dev *eth_dev)
+int igb_pf_host_init(struct rte_eth_dev *eth_dev)
{
struct e1000_vf_info **vfinfo =
E1000_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
@@ -74,7 +74,7 @@ void igb_pf_host_init(struct rte_eth_dev *eth_dev)
RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
if (0 == (vf_num = dev_num_vf(eth_dev)))
- return;
+ return 0;
if (hw->mac.type == e1000_i350)
nb_queue = 1;
@@ -82,11 +82,11 @@ void igb_pf_host_init(struct rte_eth_dev *eth_dev)
/* per datasheet, it should be 2, but 1 seems correct */
nb_queue = 1;
else
- return;
+ return 0;
*vfinfo = rte_zmalloc("vf_info", sizeof(struct e1000_vf_info) * vf_num, 0);
if (*vfinfo == NULL)
- rte_panic("Cannot allocate memory for private VF data\n");
+ return rte_panic_return("Cannot allocate memory for private VF data\n");
RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS;
RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
@@ -98,7 +98,7 @@ void igb_pf_host_init(struct rte_eth_dev *eth_dev)
/* set mb interrupt mask */
igb_mb_intr_setup(eth_dev);
- return;
+ return 0;
}
void igb_pf_host_uninit(struct rte_eth_dev *dev)
@@ -1277,7 +1277,8 @@ struct rte_ixgbe_xstats_name_off {
memset(hwstrip, 0, sizeof(*hwstrip));
/* initialize PF if max_vfs not zero */
- ixgbe_pf_host_init(eth_dev);
+ if (ixgbe_pf_host_init(eth_dev) != 0)
+ return -1;
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
/* let hardware know driver is loaded */
@@ -663,7 +663,7 @@ int ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev);
-void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
+int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev);
@@ -66,7 +66,7 @@ int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
return 0;
}
-void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
+int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
{
struct ixgbe_vf_info **vfinfo =
IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
@@ -88,7 +88,7 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
*vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0);
if (*vfinfo == NULL)
- rte_panic("Cannot allocate memory for private VF data\n");
+ rte_panic_return("Cannot allocate memory for private VF data\n");
memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info));
memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
@@ -116,6 +116,8 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
/* set mb interrupt mask */
ixgbe_mb_intr_setup(eth_dev);
+
+ return 0;
}
void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
@@ -150,7 +150,7 @@ enum rte_iova_mode
* We also don't lock the whole file, so that in future we can use read-locks
* on other parts, e.g. memzones, to detect if there are running secondary
* processes. */
-static void
+static int
rte_eal_config_create(void)
{
void *rte_mem_cfg_addr;
@@ -159,24 +159,28 @@ enum rte_iova_mode
const char *pathname = eal_runtime_config_path();
if (internal_config.no_shconf)
- return;
+ return 0;
if (mem_cfg_fd < 0){
mem_cfg_fd = open(pathname, O_RDWR | O_CREAT, 0660);
- if (mem_cfg_fd < 0)
- rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
+ if (mem_cfg_fd < 0) {
+ return rte_panic_return("Cannot open '%s' for rte_mem_config\n",
+ pathname);
+ }
}
retval = ftruncate(mem_cfg_fd, sizeof(*rte_config.mem_config));
if (retval < 0){
close(mem_cfg_fd);
- rte_panic("Cannot resize '%s' for rte_mem_config\n", pathname);
+ return rte_panic_return("Cannot resize '%s' for rte_mem_config\n",
+ pathname);
}
retval = fcntl(mem_cfg_fd, F_SETLK, &wr_lock);
if (retval < 0){
close(mem_cfg_fd);
- rte_exit(EXIT_FAILURE, "Cannot create lock on '%s'. Is another primary "
+ return rte_panic_return(EXIT_FAILURE,
+ "Cannot create lock on '%s'. Is another primary "
"process running?\n", pathname);
}
@@ -184,35 +188,40 @@ enum rte_iova_mode
PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0);
if (rte_mem_cfg_addr == MAP_FAILED){
- rte_panic("Cannot mmap memory for rte_config\n");
+ return rte_panic_return("Cannot mmap memory for rte_config\n");
}
memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config));
rte_config.mem_config = rte_mem_cfg_addr;
+
+ return 0;
}
/* attach to an existing shared memory config */
-static void
+static int
rte_eal_config_attach(void)
{
void *rte_mem_cfg_addr;
const char *pathname = eal_runtime_config_path();
if (internal_config.no_shconf)
- return;
+ return 0;
if (mem_cfg_fd < 0){
mem_cfg_fd = open(pathname, O_RDWR);
if (mem_cfg_fd < 0)
- rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
+ return rte_panic_return("Cannot open '%s' for rte_mem_config\n",
+ pathname);
}
rte_mem_cfg_addr = mmap(NULL, sizeof(*rte_config.mem_config),
PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0);
close(mem_cfg_fd);
if (rte_mem_cfg_addr == MAP_FAILED)
- rte_panic("Cannot mmap memory for rte_config\n");
+ return rte_panic_return("Cannot mmap memory for rte_config\n");
rte_config.mem_config = rte_mem_cfg_addr;
+
+ return 0;
}
/* Detect if we are a primary or a secondary process */
@@ -236,23 +245,26 @@ enum rte_proc_type_t
}
/* Sets up rte_config structure with the pointer to shared memory config.*/
-static void
+static int
rte_config_init(void)
{
rte_config.process_type = internal_config.process_type;
switch (rte_config.process_type){
case RTE_PROC_PRIMARY:
- rte_eal_config_create();
+ if (rte_eal_config_create())
+ return -1;
break;
case RTE_PROC_SECONDARY:
- rte_eal_config_attach();
+ if (rte_eal_config_attach())
+ return -1;
rte_eal_mcfg_wait_complete(rte_config.mem_config);
break;
case RTE_PROC_AUTO:
case RTE_PROC_INVALID:
- rte_panic("Invalid process type\n");
+ return rte_panic_return("Invalid process type\n");
}
+ return 0;
}
/* display usage */
@@ -583,7 +595,8 @@ static void rte_eal_init_alert(const char *msg)
rte_srand(rte_rdtsc());
- rte_config_init();
+ if (rte_config_init() != 0)
+ return -1;
if (rte_mp_channel_init() < 0) {
rte_eal_init_alert("failed to init mp channel\n");
@@ -630,7 +643,8 @@ static void rte_eal_init_alert(const char *msg)
eal_check_mem_on_local_socket();
- eal_thread_init_master(rte_config.master_lcore);
+ if (eal_thread_init_master(rte_config.master_lcore) != 0)
+ return -1;
ret = eal_thread_dump_affinity(cpuset, RTE_CPU_AFFINITY_STR_LEN);
@@ -645,9 +659,9 @@ static void rte_eal_init_alert(const char *msg)
* and children
*/
if (pipe(lcore_config[i].pipe_master2slave) < 0)
- rte_panic("Cannot create pipe\n");
+ return rte_panic_return("Cannot create pipe\n");
if (pipe(lcore_config[i].pipe_slave2master) < 0)
- rte_panic("Cannot create pipe\n");
+ return rte_panic_return("Cannot create pipe\n");
lcore_config[i].state = WAIT;
@@ -655,7 +669,7 @@ static void rte_eal_init_alert(const char *msg)
ret = pthread_create(&lcore_config[i].thread_id, NULL,
eal_thread_loop, NULL);
if (ret != 0)
- rte_panic("Cannot create thread\n");
+ return rte_panic_return("Cannot create thread\n");
/* Set thread_name for aid in debugging. */
snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
@@ -62,6 +62,20 @@ void __rte_panic(const char *funcname, const char *format, ...)
abort();
}
+int __rte_experimental
+__rte_panic_no_abort(const char *funcname, const char *format, ...)
+{
+ va_list ap;
+
+ rte_log(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, "PANIC in %s():\n", funcname);
+ va_start(ap, format);
+ rte_vlog(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, format, ap);
+ va_end(ap);
+ rte_dump_stack();
+ rte_dump_registers();
+ return -1;
+}
+
/*
* Like rte_panic this terminates the application. However, no traceback is
* provided and no core-dump is generated.
@@ -52,7 +52,7 @@
while (n == 0 || (n < 0 && errno == EINTR))
n = write(m2s, &c, 1);
if (n < 0)
- rte_panic("cannot write on configuration pipe\n");
+ return rte_panic_return("cannot write on configuration pipe\n");
/* wait ack */
do {
@@ -60,7 +60,7 @@
} while (n < 0 && errno == EINTR);
if (n <= 0)
- rte_panic("cannot read on configuration pipe\n");
+ return rte_panic_return("cannot read on configuration pipe\n");
return 0;
}
@@ -78,18 +78,19 @@
return rte_thread_set_affinity(&lcore_config[lcore_id].cpuset);
}
-void eal_thread_init_master(unsigned lcore_id)
+int eal_thread_init_master(unsigned int lcore_id)
{
/* set the lcore ID in per-lcore memory area */
RTE_PER_LCORE(_lcore_id) = lcore_id;
/* set CPU affinity */
if (eal_thread_set_affinity() < 0)
- rte_panic("cannot set affinity\n");
+ return rte_panic_return("cannot set affinity\n");
+ return 0;
}
/* main loop of threads */
-__attribute__((noreturn)) void *
+void *
eal_thread_loop(__attribute__((unused)) void *arg)
{
char c;
@@ -106,8 +107,10 @@ void eal_thread_init_master(unsigned lcore_id)
if (thread_id == lcore_config[lcore_id].thread_id)
break;
}
- if (lcore_id == RTE_MAX_LCORE)
- rte_panic("cannot retrieve lcore id\n");
+ if (lcore_id == RTE_MAX_LCORE) {
+ rte_panic_return("cannot retrieve lcore id\n");
+ return NULL;
+ }
m2s = lcore_config[lcore_id].pipe_master2slave[0];
s2m = lcore_config[lcore_id].pipe_slave2master[1];
@@ -116,8 +119,10 @@ void eal_thread_init_master(unsigned lcore_id)
RTE_PER_LCORE(_lcore_id) = lcore_id;
/* set CPU affinity */
- if (eal_thread_set_affinity() < 0)
- rte_panic("cannot set affinity\n");
+ if (eal_thread_set_affinity() < 0) {
+ rte_panic_return("cannot set affinity\n");
+ return NULL;
+ }
ret = eal_thread_dump_affinity(cpuset, RTE_CPU_AFFINITY_STR_LEN);
@@ -133,8 +138,10 @@ void eal_thread_init_master(unsigned lcore_id)
n = read(m2s, &c, 1);
} while (n < 0 && errno == EINTR);
- if (n <= 0)
- rte_panic("cannot read on configuration pipe\n");
+ if (n <= 0) {
+ rte_panic_return("cannot read on configuration pipe\n");
+ return NULL;
+ }
lcore_config[lcore_id].state = RUNNING;
@@ -142,11 +149,15 @@ void eal_thread_init_master(unsigned lcore_id)
n = 0;
while (n == 0 || (n < 0 && errno == EINTR))
n = write(s2m, &c, 1);
- if (n < 0)
- rte_panic("cannot write on configuration pipe\n");
-
- if (lcore_config[lcore_id].f == NULL)
- rte_panic("NULL function pointer\n");
+ if (n < 0) {
+ rte_panic_return("cannot write on configuration pipe\n");
+ return NULL;
+ }
+
+ if (lcore_config[lcore_id].f == NULL) {
+ rte_panic_return("NULL function pointer\n");
+ return NULL;
+ }
/* call the function and store the return value */
fct_arg = lcore_config[lcore_id].arg;
@@ -314,7 +314,7 @@
if (addr == NULL)
ret = -EINVAL;
else if (mcfg->memzone_cnt == 0) {
- rte_panic("%s(): memzone address not NULL but memzone_cnt is 0!\n",
+ return rte_panic_return("%s(): memzone address not NULL but memzone_cnt is 0!\n",
__func__);
} else {
memset(&mcfg->memzone[idx], 0, sizeof(mcfg->memzone[idx]));
@@ -13,7 +13,7 @@
* @param arg
* opaque pointer
*/
-__attribute__((noreturn)) void *eal_thread_loop(void *arg);
+void *eal_thread_loop(void *arg);
/**
* Init per-lcore info for master thread
@@ -21,7 +21,7 @@
* @param lcore_id
* identifier of master lcore
*/
-void eal_thread_init_master(unsigned lcore_id);
+int eal_thread_init_master(unsigned int lcore_id);
/**
* Get the NUMA socket id from cpu id.
@@ -16,6 +16,7 @@
#include "rte_log.h"
#include "rte_branch_prediction.h"
+#include <rte_compat.h>
#ifdef __cplusplus
extern "C" {
@@ -50,6 +51,16 @@
#define rte_panic(...) rte_panic_(__func__, __VA_ARGS__, "dummy")
#define rte_panic_(func, format, ...) __rte_panic(func, format "%.0s", __VA_ARGS__)
+/**
+ * Folling set of macros replace places where panic was called
+ * in order to allow rolling up the error code and have an
+ * orderly exit
+ */
+#define rte_panic_return_(func, format, ...) \
+ __rte_panic_no_abort(func, format "%.0s", __VA_ARGS__)
+#define rte_panic_return(...) rte_panic_return_(__func__, __VA_ARGS__, "dummy")
+
+
#ifdef RTE_ENABLE_ASSERT
#define RTE_ASSERT(exp) RTE_VERIFY(exp)
#else
@@ -75,6 +86,15 @@ void __rte_panic(const char *funcname , const char *format, ...)
__attribute__((noreturn))
__attribute__((format(printf, 2, 3)));
+int __rte_experimental
+__rte_panic_no_abort(const char *funcname, const char *format, ...)
+#ifdef __GNUC__
+#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2))
+ __attribute__((cold))
+#endif
+#endif
+ __attribute__((format(printf, 2, 3)));
+
#ifdef __cplusplus
}
#endif
@@ -134,8 +134,10 @@ void rte_free(void *addr)
return rte_malloc(NULL, size, align);
struct malloc_elem *elem = malloc_elem_from_data(ptr);
- if (elem == NULL)
- rte_panic("Fatal error: memory corruption detected\n");
+ if (elem == NULL) {
+ rte_panic_return("Fatal error: memory corruption detected\n");
+ return NULL;
+ }
size = RTE_CACHE_LINE_ROUNDUP(size), align = RTE_CACHE_LINE_ROUNDUP(align);
/* check alignment matches first, and if ok, see if we can resize block */
@@ -160,7 +160,7 @@ enum rte_iova_mode
* We also don't lock the whole file, so that in future we can use read-locks
* on other parts, e.g. memzones, to detect if there are running secondary
* processes. */
-static void
+static int
rte_eal_config_create(void)
{
void *rte_mem_cfg_addr;
@@ -169,7 +169,7 @@ enum rte_iova_mode
const char *pathname = eal_runtime_config_path();
if (internal_config.no_shconf)
- return;
+ return 0;
/* map the config before hugepage address so that we don't waste a page */
if (internal_config.base_virtaddr != 0)
@@ -181,28 +181,32 @@ enum rte_iova_mode
if (mem_cfg_fd < 0){
mem_cfg_fd = open(pathname, O_RDWR | O_CREAT, 0660);
- if (mem_cfg_fd < 0)
- rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
+ if (mem_cfg_fd < 0) {
+ return rte_panic_return("Cannot open '%s' for rte_mem_config\n",
+ pathname);
+ }
}
retval = ftruncate(mem_cfg_fd, sizeof(*rte_config.mem_config));
if (retval < 0){
close(mem_cfg_fd);
- rte_panic("Cannot resize '%s' for rte_mem_config\n", pathname);
+ return rte_panic_return("Cannot resize '%s' for rte_mem_config\n",
+ pathname);
}
retval = fcntl(mem_cfg_fd, F_SETLK, &wr_lock);
if (retval < 0){
close(mem_cfg_fd);
- rte_exit(EXIT_FAILURE, "Cannot create lock on '%s'. Is another primary "
- "process running?\n", pathname);
+ return rte_panic_return(
+ "Cannot create lock on '%s'. Is another primary process running?\n",
+ pathname);
}
rte_mem_cfg_addr = mmap(rte_mem_cfg_addr, sizeof(*rte_config.mem_config),
PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0);
if (rte_mem_cfg_addr == MAP_FAILED){
- rte_panic("Cannot mmap memory for rte_config\n");
+ return rte_panic_return("Cannot mmap memory for rte_config\n");
}
memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config));
rte_config.mem_config = rte_mem_cfg_addr;
@@ -211,10 +215,11 @@ enum rte_iova_mode
* processes could later map the config into this exact location */
rte_config.mem_config->mem_cfg_addr = (uintptr_t) rte_mem_cfg_addr;
+ return 0;
}
/* attach to an existing shared memory config */
-static void
+static int
rte_eal_config_attach(void)
{
struct rte_mem_config *mem_config;
@@ -222,33 +227,37 @@ enum rte_iova_mode
const char *pathname = eal_runtime_config_path();
if (internal_config.no_shconf)
- return;
+ return 0;
if (mem_cfg_fd < 0){
mem_cfg_fd = open(pathname, O_RDWR);
- if (mem_cfg_fd < 0)
- rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
+ if (mem_cfg_fd < 0) {
+ return rte_panic_return("Cannot open '%s' for rte_mem_config\n",
+ pathname);
+ }
}
/* map it as read-only first */
mem_config = (struct rte_mem_config *) mmap(NULL, sizeof(*mem_config),
PROT_READ, MAP_SHARED, mem_cfg_fd, 0);
if (mem_config == MAP_FAILED)
- rte_panic("Cannot mmap memory for rte_config! error %i (%s)\n",
+ return rte_panic_return("Cannot mmap memory for rte_config! error %i (%s)\n",
errno, strerror(errno));
rte_config.mem_config = mem_config;
+
+ return 0;
}
/* reattach the shared config at exact memory location primary process has it */
-static void
+static int
rte_eal_config_reattach(void)
{
struct rte_mem_config *mem_config;
void *rte_mem_cfg_addr;
if (internal_config.no_shconf)
- return;
+ return 0;
/* save the address primary process has mapped shared config to */
rte_mem_cfg_addr = (void *) (uintptr_t) rte_config.mem_config->mem_cfg_addr;
@@ -263,16 +272,18 @@ enum rte_iova_mode
if (mem_config == MAP_FAILED || mem_config != rte_mem_cfg_addr) {
if (mem_config != MAP_FAILED)
/* errno is stale, don't use */
- rte_panic("Cannot mmap memory for rte_config at [%p], got [%p]"
+ return rte_panic_return("Cannot mmap memory for rte_config at [%p], got [%p]"
" - please use '--base-virtaddr' option\n",
rte_mem_cfg_addr, mem_config);
else
- rte_panic("Cannot mmap memory for rte_config! error %i (%s)\n",
+ return rte_panic_return("Cannot mmap memory for rte_config! error %i (%s)\n",
errno, strerror(errno));
}
close(mem_cfg_fd);
rte_config.mem_config = mem_config;
+
+ return 0;
}
/* Detect if we are a primary or a secondary process */
@@ -296,24 +307,29 @@ enum rte_proc_type_t
}
/* Sets up rte_config structure with the pointer to shared memory config.*/
-static void
+static int
rte_config_init(void)
{
rte_config.process_type = internal_config.process_type;
switch (rte_config.process_type){
case RTE_PROC_PRIMARY:
- rte_eal_config_create();
+ if (rte_eal_config_create() != 0)
+ return -1;
break;
case RTE_PROC_SECONDARY:
- rte_eal_config_attach();
+ if (rte_eal_config_attach() != 0)
+ return -1;
rte_eal_mcfg_wait_complete(rte_config.mem_config);
- rte_eal_config_reattach();
+ if (rte_eal_config_reattach() != 0)
+ return -1;
break;
case RTE_PROC_AUTO:
case RTE_PROC_INVALID:
- rte_panic("Invalid process type\n");
+ return rte_panic_return("Invalid process type\n");
}
+
+ return 0;
}
/* Unlocks hugepage directories that were locked by eal_hugepage_info_init */
@@ -825,7 +841,8 @@ static void rte_eal_init_alert(const char *msg)
rte_srand(rte_rdtsc());
- rte_config_init();
+ if (rte_config_init() != 0)
+ return -1;
if (rte_eal_log_init(logid, internal_config.syslog_facility) < 0) {
rte_eal_init_alert("Cannot init logging.");
@@ -886,7 +903,8 @@ static void rte_eal_init_alert(const char *msg)
eal_check_mem_on_local_socket();
- eal_thread_init_master(rte_config.master_lcore);
+ if (eal_thread_init_master(rte_config.master_lcore) != 0)
+ return -1;
ret = eal_thread_dump_affinity(cpuset, RTE_CPU_AFFINITY_STR_LEN);
@@ -906,9 +924,9 @@ static void rte_eal_init_alert(const char *msg)
* and children
*/
if (pipe(lcore_config[i].pipe_master2slave) < 0)
- rte_panic("Cannot create pipe\n");
+ return rte_panic_return("Cannot create pipe\n");
if (pipe(lcore_config[i].pipe_slave2master) < 0)
- rte_panic("Cannot create pipe\n");
+ return rte_panic_return("Cannot create pipe\n");
lcore_config[i].state = WAIT;
@@ -916,7 +934,7 @@ static void rte_eal_init_alert(const char *msg)
ret = pthread_create(&lcore_config[i].thread_id, NULL,
eal_thread_loop, NULL);
if (ret != 0)
- rte_panic("Cannot create thread\n");
+ return rte_panic_return("Cannot create thread\n");
/* Set thread_name for aid in debugging. */
snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
@@ -62,6 +62,20 @@ void __rte_panic(const char *funcname, const char *format, ...)
abort();
}
+int __rte_experimental
+__rte_panic_no_abort(const char *funcname, const char *format, ...)
+{
+ va_list ap;
+
+ rte_log(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, "PANIC in %s():\n", funcname);
+ va_start(ap, format);
+ rte_vlog(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, format, ap);
+ va_end(ap);
+ rte_dump_stack();
+ rte_dump_registers();
+ return -1;
+}
+
/*
* Like rte_panic this terminates the application. However, no traceback is
* provided and no core-dump is generated.
@@ -80,8 +80,10 @@
unsigned long long size = 0;
FILE *fd = fopen(proc_meminfo, "r");
- if (fd == NULL)
- rte_panic("Cannot open %s\n", proc_meminfo);
+ if (fd == NULL) {
+ rte_panic_return("Cannot open %s\n", proc_meminfo);
+ return 0;
+ }
while(fgets(buffer, sizeof(buffer), fd)){
if (strncmp(buffer, str_hugepagesz, hugepagesz_len) == 0){
size = rte_str_to_size(&buffer[hugepagesz_len]);
@@ -89,8 +91,11 @@
}
}
fclose(fd);
- if (size == 0)
- rte_panic("Cannot get default hugepage size from %s\n", proc_meminfo);
+ if (size == 0) {
+ rte_panic_return("Cannot get default hugepage size from %s\n",
+ proc_meminfo);
+ return 0;
+ }
return size;
}
@@ -116,8 +121,10 @@
char *retval = NULL;
FILE *fd = fopen(proc_mounts, "r");
- if (fd == NULL)
- rte_panic("Cannot open %s\n", proc_mounts);
+ if (fd == NULL) {
+ rte_panic_return("Cannot open %s\n", proc_mounts);
+ return NULL;
+ }
if (default_size == 0)
default_size = get_default_hp_size();
@@ -776,7 +776,7 @@ struct rte_intr_source {
* @return
* never return;
*/
-static __attribute__((noreturn)) void *
+static void *
eal_intr_thread_main(__rte_unused void *arg)
{
struct epoll_event ev;
@@ -794,8 +794,10 @@ static __attribute__((noreturn)) void *
/* create epoll fd */
int pfd = epoll_create(1);
- if (pfd < 0)
- rte_panic("Cannot create epoll instance\n");
+ if (pfd < 0) {
+ rte_panic_return("Cannot create epoll instance\n");
+ return NULL;
+ }
pipe_event.data.fd = intr_pipe.readfd;
/**
@@ -804,8 +806,9 @@ static __attribute__((noreturn)) void *
*/
if (epoll_ctl(pfd, EPOLL_CTL_ADD, intr_pipe.readfd,
&pipe_event) < 0) {
- rte_panic("Error adding fd to %d epoll_ctl, %s\n",
+ rte_panic_return("Error adding fd to %d epoll_ctl, %s\n",
intr_pipe.readfd, strerror(errno));
+ return NULL;
}
numfds++;
@@ -823,8 +826,9 @@ static __attribute__((noreturn)) void *
*/
if (epoll_ctl(pfd, EPOLL_CTL_ADD,
src->intr_handle.fd, &ev) < 0){
- rte_panic("Error adding fd %d epoll_ctl, %s\n",
+ rte_panic_return("Error adding fd %d epoll_ctl, %s\n",
src->intr_handle.fd, strerror(errno));
+ return NULL;
}
else
numfds++;
@@ -839,6 +843,8 @@ static __attribute__((noreturn)) void *
*/
close(pfd);
}
+
+ return NULL;
}
int
@@ -52,7 +52,7 @@
while (n == 0 || (n < 0 && errno == EINTR))
n = write(m2s, &c, 1);
if (n < 0)
- rte_panic("cannot write on configuration pipe\n");
+ return rte_panic_return("cannot write on configuration pipe\n");
/* wait ack */
do {
@@ -60,7 +60,7 @@
} while (n < 0 && errno == EINTR);
if (n <= 0)
- rte_panic("cannot read on configuration pipe\n");
+ return rte_panic_return("cannot read on configuration pipe\n");
return 0;
}
@@ -78,18 +78,19 @@
return rte_thread_set_affinity(&lcore_config[lcore_id].cpuset);
}
-void eal_thread_init_master(unsigned lcore_id)
+int eal_thread_init_master(unsigned int lcore_id)
{
/* set the lcore ID in per-lcore memory area */
RTE_PER_LCORE(_lcore_id) = lcore_id;
/* set CPU affinity */
if (eal_thread_set_affinity() < 0)
- rte_panic("cannot set affinity\n");
+ return rte_panic_return("cannot set affinity\n");
+ return 0;
}
/* main loop of threads */
-__attribute__((noreturn)) void *
+void *
eal_thread_loop(__attribute__((unused)) void *arg)
{
char c;
@@ -106,8 +107,10 @@ void eal_thread_init_master(unsigned lcore_id)
if (thread_id == lcore_config[lcore_id].thread_id)
break;
}
- if (lcore_id == RTE_MAX_LCORE)
- rte_panic("cannot retrieve lcore id\n");
+ if (lcore_id == RTE_MAX_LCORE) {
+ rte_panic_return("cannot retrieve lcore id\n");
+ return NULL;
+ }
m2s = lcore_config[lcore_id].pipe_master2slave[0];
s2m = lcore_config[lcore_id].pipe_slave2master[1];
@@ -116,8 +119,10 @@ void eal_thread_init_master(unsigned lcore_id)
RTE_PER_LCORE(_lcore_id) = lcore_id;
/* set CPU affinity */
- if (eal_thread_set_affinity() < 0)
- rte_panic("cannot set affinity\n");
+ if (eal_thread_set_affinity() < 0) {
+ rte_panic_return("cannot set affinity\n");
+ return NULL;
+ }
ret = eal_thread_dump_affinity(cpuset, RTE_CPU_AFFINITY_STR_LEN);
@@ -133,8 +138,10 @@ void eal_thread_init_master(unsigned lcore_id)
n = read(m2s, &c, 1);
} while (n < 0 && errno == EINTR);
- if (n <= 0)
- rte_panic("cannot read on configuration pipe\n");
+ if (n <= 0) {
+ rte_panic_return("cannot read on configuration pipe\n");
+ return NULL;
+ }
lcore_config[lcore_id].state = RUNNING;
@@ -142,11 +149,15 @@ void eal_thread_init_master(unsigned lcore_id)
n = 0;
while (n == 0 || (n < 0 && errno == EINTR))
n = write(s2m, &c, 1);
- if (n < 0)
- rte_panic("cannot write on configuration pipe\n");
-
- if (lcore_config[lcore_id].f == NULL)
- rte_panic("NULL function pointer\n");
+ if (n < 0) {
+ rte_panic_return("cannot write on configuration pipe\n");
+ return NULL;
+ }
+
+ if (lcore_config[lcore_id].f == NULL) {
+ rte_panic_return("NULL function pointer\n");
+ return NULL;
+ }
/* call the function and store the return value */
fct_arg = lcore_config[lcore_id].arg;
@@ -214,6 +214,7 @@ DPDK_18.02 {
EXPERIMENTAL {
global:
+ __rte_panic_no_abort;
rte_eal_cleanup;
rte_eal_devargs_insert;
rte_eal_devargs_parse;
@@ -190,7 +190,7 @@ enum {
return port_id;
}
-static void
+static int
rte_eth_dev_shared_data_prepare(void)
{
const unsigned flags = 0;
@@ -206,8 +206,10 @@ enum {
rte_socket_id(), flags);
} else
mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
- if (mz == NULL)
- rte_panic("Cannot allocate ethdev shared data\n");
+ if (mz == NULL) {
+ rte_spinlock_unlock(&rte_eth_shared_data_lock);
+ return rte_panic_return("Cannot allocate ethdev shared data\n");
+ }
rte_eth_dev_shared_data = mz->addr;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
@@ -220,6 +222,8 @@ enum {
}
rte_spinlock_unlock(&rte_eth_shared_data_lock);
+
+ return 0;
}
struct rte_eth_dev *
@@ -67,7 +67,7 @@
rte_socket_id());
if (eventdev->data->dev_private == NULL)
- rte_panic("Cannot allocate memzone for private "
+ return rte_panic_return("Cannot allocate memzone for private "
"device data");
}
@@ -61,9 +61,11 @@
RTE_CACHE_LINE_SIZE,
socket_id);
- if (eventdev->data->dev_private == NULL)
- rte_panic("Cannot allocate memzone for private device"
+ if (eventdev->data->dev_private == NULL) {
+ return rte_panic_return("Cannot allocate memzone for private device"
" data");
+ return NULL;
+ }
}
return eventdev;
@@ -353,37 +353,43 @@ struct rte_kni *
/* TX RING */
mz = slot->m_tx_q;
ctx->tx_q = mz->addr;
- kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX);
+ if (kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX))
+ return NULL;
dev_info.tx_phys = mz->phys_addr;
/* RX RING */
mz = slot->m_rx_q;
ctx->rx_q = mz->addr;
- kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX);
+ if (kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX))
+ return NULL;
dev_info.rx_phys = mz->phys_addr;
/* ALLOC RING */
mz = slot->m_alloc_q;
ctx->alloc_q = mz->addr;
- kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX);
+ if (kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX))
+ return NULL;
dev_info.alloc_phys = mz->phys_addr;
/* FREE RING */
mz = slot->m_free_q;
ctx->free_q = mz->addr;
- kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX);
+ if (kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX))
+ return NULL;
dev_info.free_phys = mz->phys_addr;
/* Request RING */
mz = slot->m_req_q;
ctx->req_q = mz->addr;
- kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX);
+ if (kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX))
+ return NULL;
dev_info.req_phys = mz->phys_addr;
/* Response RING */
mz = slot->m_resp_q;
ctx->resp_q = mz->addr;
- kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX);
+ if (kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX))
+ return NULL;
dev_info.resp_phys = mz->phys_addr;
/* Req/Resp sync mem area */
@@ -7,17 +7,19 @@
/**
* Initializes the kni fifo structure
*/
-static void
+static int
kni_fifo_init(struct rte_kni_fifo *fifo, unsigned size)
{
/* Ensure size is power of 2 */
if (size & (size - 1))
- rte_panic("KNI fifo size must be power of 2\n");
+ return rte_panic_return("KNI fifo size must be power of 2\n");
fifo->write = 0;
fifo->read = 0;
fifo->len = size;
fifo->elem_size = sizeof(void *);
+
+ return 0;
}
/**