@@ -461,7 +461,7 @@ main_loop(struct cperf_cyclecount_ctx *ctx, enum rte_comp_xform_type type)
if (test_data->perf_comp_force_stop) {
RTE_LOG(ERR, USER1,
- "lcore: %d Perf. test has been aborted by user\n",
+ "lcore: %d Perf. test has been canceled by user\n",
mem->lcore_id);
res = -1;
}
@@ -323,7 +323,7 @@ main_loop(struct cperf_benchmark_ctx *ctx, enum rte_comp_xform_type type)
if (test_data->perf_comp_force_stop) {
RTE_LOG(ERR, USER1,
- "lcore: %d Perf. test has been aborted by user\n",
+ "lcore: %d Perf. test has been canceled by user\n",
mem->lcore_id);
res = -1;
}
@@ -382,7 +382,7 @@ main_loop(struct cperf_verify_ctx *ctx, enum rte_comp_xform_type type)
if (test_data->perf_comp_force_stop) {
RTE_LOG(ERR, USER1,
- "lcore: %d Perf. test has been aborted by user\n",
+ "lcore: %d Perf. test has been canceled by user\n",
mem->lcore_id);
res = -1;
}
@@ -57,7 +57,7 @@ runtest(const char *printable, int (*test_fn)(int16_t dev_id, uint16_t vchan), i
if (stats.completed != stats.submitted)
ERR_RETURN("\nError, not all submitted jobs are reported as completed\n");
if (check_err_stats && stats.errors != 0)
- ERR_RETURN("\nErrors reported during op processing, aborting tests\n");
+ ERR_RETURN("\nErrors reported during op processing, stopping tests\n");
}
printf("\n");
return 0;
@@ -40,7 +40,7 @@ packet. Registers ``R1-R5`` are scratch registers
and must not be used to store the data across these instructions.
These instructions have implicit program exit condition as well. When
eBPF program is trying to access the data beyond the packet boundary,
-the interpreter will abort the execution of the program. JIT compilers
+the interpreter will cancel the execution of the program. JIT compilers
therefore must preserve this property. ``src_reg`` and ``imm32`` fields are
explicit inputs to these instructions.
For example, ``(BPF_IND | BPF_W | BPF_LD)`` means:
@@ -214,7 +214,7 @@ typically performs the following steps:
with immediate effect once the port is started.
This function fails when the currently configured hierarchy is not supported by
-the Ethernet port, in which case the user can abort or try out another
+the Ethernet port, in which case the user can retry with another
hierarchy configuration (e.g. a hierarchy with less leaf nodes), which can be
built from scratch or by modifying the existing hierarchy configuration. Note
that this function can still fail due to other causes (e.g. not enough memory
@@ -753,8 +753,8 @@ hisi_dma_convert_status(uint16_t status)
return RTE_DMA_STATUS_INVALID_OPCODE;
case HISI_DMA_STATUS_INVALID_LENGTH:
return RTE_DMA_STATUS_INVALID_LENGTH;
- case HISI_DMA_STATUS_USER_ABORT:
- return RTE_DMA_STATUS_USER_ABORT;
+ case HISI_DMA_STATUS_USER_CANCEL:
+ return RTE_DMA_STATUS_USER_CANCEL;
case HISI_DMA_STATUS_REMOTE_READ_ERROR:
case HISI_DMA_STATUS_AXI_READ_ERROR:
return RTE_DMA_STATUS_BUS_READ_ERROR;
@@ -152,7 +152,7 @@ enum {
#define HISI_DMA_STATUS_SUCCESS 0x0
#define HISI_DMA_STATUS_INVALID_OPCODE 0x1
#define HISI_DMA_STATUS_INVALID_LENGTH 0x2
-#define HISI_DMA_STATUS_USER_ABORT 0x4
+#define HISI_DMA_STATUS_USER_CANCEL 0x4
#define HISI_DMA_STATUS_REMOTE_READ_ERROR 0x10
#define HISI_DMA_STATUS_AXI_READ_ERROR 0x20
#define HISI_DMA_STATUS_AXI_WRITE_ERROR 0x40
@@ -776,7 +776,7 @@ eth_monitor_callback(const uint64_t value,
const uint64_t v = opaque[CLB_VAL_IDX];
const uint64_t m = (uint32_t)~0;
- /* if the value has changed, abort entering power optimized state */
+ /* if the value has changed, cancel entering power optimized state */
return (value & m) == v ? 0 : -1;
}
@@ -50,7 +50,7 @@ parse_rt_tokens(char **tokens, uint32_t n_tokens,
route_ipv4 = &rt_ip4[*n_rts];
APP_CHECK(*n_rts <= RT_IPV4_MAX_RULES - 1, status,
- "too many rt rules, abort insertion\n");
+ "too many rt rules, cancel insertion\n");
if (status->status < 0)
return;
@@ -59,7 +59,7 @@ parse_rt_tokens(char **tokens, uint32_t n_tokens,
route_ipv6 = &rt_ip6[*n_rts];
APP_CHECK(*n_rts <= RT_IPV6_MAX_RULES - 1, status,
- "too many rt rules, abort insertion\n");
+ "too many rt rules, cancel insertion\n");
if (status->status < 0)
return;
} else {
@@ -728,13 +728,13 @@ int rte_dma_dump(int16_t dev_id, FILE *f);
enum rte_dma_status_code {
/** The operation completed successfully. */
RTE_DMA_STATUS_SUCCESSFUL,
- /** The operation failed to complete due abort by user.
+ /** The operation failed to complete due being cancel by user.
* This is mainly used when processing dev_stop, user could modify the
- * descriptors (e.g. change one bit to tell hardware abort this job),
+ * descriptors (e.g. change one bit to tell hardware to cancel this job),
* it allows outstanding requests to be complete as much as possible,
* so reduce the time to stop the device.
*/
- RTE_DMA_STATUS_USER_ABORT,
+ RTE_DMA_STATUS_USER_CANCEL,
/** The operation failed to complete due to following scenarios:
* The jobs in a particular batch are not attempted because they
* appeared after a fence where a previous job failed. In some HW
@@ -24,7 +24,7 @@
/**
* Callback definition for monitoring conditions. Callbacks with this signature
* will be used by `rte_power_monitor()` to check if the entering of power
- * optimized state should be aborted.
+ * optimized state should not continue.
*
* @param val
* The value read from memory.
@@ -33,7 +33,7 @@
*
* @return
* 0 if entering of power optimized state should proceed
- * -1 if entering of power optimized state should be aborted
+ * -1 if entering of power optimized state should not continue
*/
typedef int (*rte_power_monitor_clb_t)(const uint64_t val,
const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ]);
@@ -47,7 +47,7 @@ struct rte_power_monitor_cond {
*/
rte_power_monitor_clb_t fn; /**< Callback to be used to check if
* entering power optimized state should
- * be aborted.
+ * not happen.
*/
uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ];
/**< Callback-specific data */
@@ -66,7 +66,7 @@ struct rte_power_monitor_cond {
* size (`pmc->size`) are provided in the `pmc` power monitoring condition. If
* the mask is non-zero, the current value pointed to by the `pmc->addr` pointer
* will be read and compared against the expected value, and if they match, the
- * entering of optimized power state will be aborted. This is intended to
+ * entering of optimized power state will be canceled. This is intended to
* prevent the CPU from entering optimized power state and waiting on a write
* that has already happened by the time this API is called.
*
@@ -141,7 +141,7 @@ int rte_power_pause(const uint64_t tsc_timestamp);
* Additionally, `expected` 64-bit values and 64-bit masks are provided. If
* mask is non-zero, the current value pointed to by the `p` pointer will be
* checked against the expected value, and if they do not match, the entering of
- * optimized power state may be aborted.
+ * optimized power state may be canceled.
*
* @warning It is responsibility of the user to check if this function is
* supported at runtime using `rte_cpu_get_intrinsics_support()` API call.
@@ -258,7 +258,7 @@ rte_rwlock_write_is_locked(rte_rwlock_t *rwl)
* fails or not available take a read lock
*
* NOTE: An attempt to perform a HW I/O operation inside a hardware memory
- * transaction always aborts the transaction since the CPU is not able to
+ * transaction always cancels the transaction since the CPU is not able to
* roll-back should the transaction fail. Therefore, hardware transactional
* locks are not advised to be used around rte_eth_rx_burst() and
* rte_eth_tx_burst() calls.
@@ -285,7 +285,7 @@ rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
* fails or not available take a write lock
*
* NOTE: An attempt to perform a HW I/O operation inside a hardware memory
- * transaction always aborts the transaction since the CPU is not able to
+ * transaction always cancels the transaction since the CPU is not able to
* roll-back should the transaction fail. Therefore, hardware transactional
* locks are not advised to be used around rte_eth_rx_burst() and
* rte_eth_tx_burst() calls.
@@ -144,7 +144,7 @@ static inline int rte_tm_supported(void);
* if it fails or not available take the spinlock.
*
* NOTE: An attempt to perform a HW I/O operation inside a hardware memory
- * transaction always aborts the transaction since the CPU is not able to
+ * transaction always cancels the transaction since the CPU is not able to
* roll-back should the transaction fail. Therefore, hardware transactional
* locks are not advised to be used around rte_eth_rx_burst() and
* rte_eth_tx_burst() calls.
@@ -172,7 +172,7 @@ rte_spinlock_unlock_tm(rte_spinlock_t *sl)
* if it fails or not available try to take the lock.
*
* NOTE: An attempt to perform a HW I/O operation inside a hardware memory
- * transaction always aborts the transaction since the CPU is not able to
+ * transaction always cancels the transaction since the CPU is not able to
* roll-back should the transaction fail. Therefore, hardware transactional
* locks are not advised to be used around rte_eth_rx_burst() and
* rte_eth_tx_burst() calls.
@@ -277,7 +277,7 @@ static inline int rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr)
* if it fails or not available take the recursive spinlocks
*
* NOTE: An attempt to perform a HW I/O operation inside a hardware memory
- * transaction always aborts the transaction since the CPU is not able to
+ * transaction always cancels the transaction since the CPU is not able to
* roll-back should the transaction fail. Therefore, hardware transactional
* locks are not advised to be used around rte_eth_rx_burst() and
* rte_eth_tx_burst() calls.
@@ -303,7 +303,7 @@ static inline void rte_spinlock_recursive_unlock_tm(
* if it fails or not available try to take the recursive lock
*
* NOTE: An attempt to perform a HW I/O operation inside a hardware memory
- * transaction always aborts the transaction since the CPU is not able to
+ * transaction always cancels the transaction since the CPU is not able to
* roll-back should the transaction fail. Therefore, hardware transactional
* locks are not advised to be used around rte_eth_rx_burst() and
* rte_eth_tx_burst() calls.
@@ -60,7 +60,7 @@ extern "C" {
* // Loads may be atomic or non-atomic, as in this example.
* *param_x = config->param_x;
* strcpy(param_y, config->param_y);
- * // An alternative to an immediate retry is to abort and
+ * // An alternative to an immediate retry is to quit and
* // try again at some later time, assuming progress is
* // possible without the data.
* } while (rte_seqlock_read_retry(&config->lock, sn));
@@ -17,13 +17,13 @@ extern "C" {
#define RTE_XBEGIN_STARTED (~0u)
-#define RTE_XABORT_EXPLICIT (1 << 0)
-#define RTE_XABORT_RETRY (1 << 1)
-#define RTE_XABORT_CONFLICT (1 << 2)
-#define RTE_XABORT_CAPACITY (1 << 3)
-#define RTE_XABORT_DEBUG (1 << 4)
-#define RTE_XABORT_NESTED (1 << 5)
-#define RTE_XABORT_CODE(x) (((x) >> 24) & 0xff)
+#define RTE_XCANCEL_EXPLICIT (1 << 0)
+#define RTE_XCANCEL_RETRY (1 << 1)
+#define RTE_XCANCEL_CONFLICT (1 << 2)
+#define RTE_XCANCEL_CAPACITY (1 << 3)
+#define RTE_XCANCEL_DEBUG (1 << 4)
+#define RTE_XCANCEL_NESTED (1 << 5)
+#define RTE_XCANCEL_CODE(x) (((x) >> 24) & 0xff)
static __rte_always_inline
unsigned int rte_xbegin(void)
@@ -41,7 +41,7 @@ void rte_xend(void)
}
/* not an inline function to workaround a clang bug with -O0 */
-#define rte_xabort(status) do { \
+#define rte_xcancel(status) do { \
asm volatile(".byte 0xc6,0xf8,%P0" :: "i" (status) : "memory"); \
} while (0)
@@ -18,7 +18,7 @@ extern "C" {
#include "rte_cycles.h"
#define RTE_RTM_MAX_RETRIES (20)
-#define RTE_XABORT_LOCK_BUSY (0xff)
+#define RTE_XCANCEL_LOCK_BUSY (0xff)
#ifndef RTE_FORCE_INTRINSICS
static inline void
@@ -93,16 +93,16 @@ rte_try_tm(volatile int *lock)
if (likely(RTE_XBEGIN_STARTED == status)) {
if (unlikely(*lock))
- rte_xabort(RTE_XABORT_LOCK_BUSY);
+ rte_xcancel(RTE_XCANCEL_LOCK_BUSY);
else
return 1;
}
while (*lock)
rte_pause();
- if ((status & RTE_XABORT_CONFLICT) ||
- ((status & RTE_XABORT_EXPLICIT) &&
- (RTE_XABORT_CODE(status) == RTE_XABORT_LOCK_BUSY))) {
+ if ((status & RTE_XCANCEL_CONFLICT) ||
+ ((status & RTE_XCANCEL_EXPLICIT) &&
+ (RTE_XCANCEL_CODE(status) == RTE_XCANCEL_LOCK_BUSY))) {
/* add a small delay before retrying, basing the
* delay on the number of times we've already tried,
* to give a back-off type of behaviour. We
@@ -116,7 +116,7 @@ rte_try_tm(volatile int *lock)
continue;
}
- if ((status & RTE_XABORT_RETRY) == 0) /* do not retry */
+ if ((status & RTE_XCANCEL_RETRY) == 0) /* do not retry */
break;
}
return 0;
@@ -118,7 +118,7 @@ rte_power_monitor(const struct rte_power_monitor_cond *pmc,
cur_value = __get_umwait_val(pmc->addr, pmc->size);
- /* check if callback indicates we should abort */
+ /* check if callback indicates we should not proceed */
if (pmc->fn(cur_value, pmc->opaque) != 0)
goto end;
@@ -242,7 +242,7 @@ rte_power_monitor_multi(const struct rte_power_monitor_cond pmc[],
/* start new transaction region */
rc = rte_xbegin();
- /* transaction abort, possible write to one of wait addresses */
+ /* transaction canceled, possible write to one of wait addresses */
if (rc != RTE_XBEGIN_STARTED)
return 0;
@@ -251,7 +251,7 @@ rte_power_monitor_multi(const struct rte_power_monitor_cond pmc[],
* the read set. This means that when we trigger a wakeup from another
* thread, even if we don't have a defined wakeup address and thus don't
* actually cause any writes, the act of locking our lock will itself
- * trigger the wakeup and abort the transaction.
+ * trigger the wakeup and cancel the transaction.
*/
rte_spinlock_is_locked(&s->lock);
@@ -271,7 +271,7 @@ rte_power_monitor_multi(const struct rte_power_monitor_cond pmc[],
const uint64_t val = __get_umwait_val(c->addr, c->size);
- /* abort if callback indicates that we need to stop */
+ /* cancel if callback indicates that we need to stop */
if (c->fn(val, c->opaque) != 0)
break;
}
@@ -1710,7 +1710,7 @@ rte_tm_node_resume(uint16_t port_id,
* the current port, with immediate effect once the port is started.
*
* This function fails when the currently configured hierarchy is not supported
- * by the Ethernet port, in which case the user can abort or try out another
+ * by the Ethernet port, in which case the user can retry out another
* hierarchy configuration (e.g. a hierarchy with less leaf nodes), which can be
* build from scratch (when *clear_on_fail* is enabled) or by modifying the
* existing hierarchy configuration (when *clear_on_fail* is disabled).
@@ -124,7 +124,7 @@ const rte_hash_cmp_eq_t cmp_jump_table[NUM_KEY_CMP_CASES] = {
#define RTE_HASH_BFS_QUEUE_MAX_LEN 1000
-#define RTE_XABORT_CUCKOO_PATH_INVALIDED 0x4
+#define RTE_CANCEL_CUCKOO_PATH_INVALIDED 0x4
#define RTE_HASH_TSX_MAX_RETRY 10
@@ -23,7 +23,7 @@
* This function should be used only for single thread producer/consumer.
* Check that user didn't request to move tail above the head.
* In that situation:
- * - return zero, that will cause abort any pending changes and
+ * - return zero, that will result in canceling any pending changes and
* return head to its previous position.
* - throw an assert in debug mode.
*/
@@ -67,7 +67,7 @@ __rte_ring_st_set_head_tail(struct rte_ring_headtail *ht, uint32_t tail,
* This function should be used only for producer/consumer in MT_HTS mode.
* Check that user didn't request to move tail above the head.
* In that situation:
- * - return zero, that will cause abort any pending changes and
+ * - return zero, that will result in canceling any pending changes and
* return head to its previous position.
* - throw an assert in debug mode.
*/