@@ -630,7 +630,7 @@ metrics_display(int port_id)
names = rte_malloc(NULL, sizeof(struct rte_metric_name) * len, 0);
if (names == NULL) {
- printf("Cannot allocate memory for metrcis names\n");
+ printf("Cannot allocate memory for metrics names\n");
rte_free(metrics);
return;
}
@@ -1109,7 +1109,7 @@ show_tm(void)
caplevel.n_nodes_max,
caplevel.n_nodes_nonleaf_max,
caplevel.n_nodes_leaf_max);
- printf("\t -- indetical: non leaf %u leaf %u\n",
+ printf("\t -- identical: non leaf %u leaf %u\n",
caplevel.non_leaf_nodes_identical,
caplevel.leaf_nodes_identical);
@@ -1263,7 +1263,7 @@ show_ring(char *name)
printf(" - Name (%s) on socket (%d)\n"
" - flags:\n"
"\t -- Single Producer Enqueue (%u)\n"
- "\t -- Single Consmer Dequeue (%u)\n",
+ "\t -- Single Consumer Dequeue (%u)\n",
ptr->name,
ptr->memzone->socket_id,
ptr->flags & RING_F_SP_ENQ,
@@ -386,8 +386,8 @@ parse_cb_ipv4_trace(char *str, struct ipv4_5tuple *v)
}
/*
- * Parses IPV6 address, exepcts the following format:
- * XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX (where X - is a hexedecimal digit).
+ * Parses IPV6 address, expects the following format:
+ * XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX (where X - is a hexadecimal digit).
*/
static int
parse_ipv6_addr(const char *in, const char **end, uint32_t v[IPV6_ADDR_U32],
@@ -994,7 +994,7 @@ print_usage(const char *prgname)
"should be either 1 or multiple of %zu, "
"but not greater then %u]\n"
"[--" OPT_MAX_SIZE
- "=<size limit (in bytes) for runtime ACL strucutures> "
+ "=<size limit (in bytes) for runtime ACL structures> "
"leave 0 for default behaviour]\n"
"[--" OPT_ITER_NUM "=<number of iterations to perform>]\n"
"[--" OPT_VERBOSE "=<verbose level>]\n"
@@ -180,7 +180,7 @@ main_loop(struct cperf_cyclecount_ctx *ctx, enum rte_comp_xform_type type)
if (ops == NULL) {
RTE_LOG(ERR, USER1,
- "Can't allocate memory for ops strucures\n");
+ "Can't allocate memory for ops structures\n");
return -1;
}
@@ -72,7 +72,7 @@ main_loop(struct cperf_benchmark_ctx *ctx, enum rte_comp_xform_type type)
if (ops == NULL) {
RTE_LOG(ERR, USER1,
- "Can't allocate memory for ops strucures\n");
+ "Can't allocate memory for ops structures\n");
return -1;
}
@@ -75,7 +75,7 @@ main_loop(struct cperf_verify_ctx *ctx, enum rte_comp_xform_type type)
if (ops == NULL) {
RTE_LOG(ERR, USER1,
- "Can't allocate memory for ops strucures\n");
+ "Can't allocate memory for ops structures\n");
return -1;
}
@@ -67,7 +67,7 @@ comp_perf_check_capabilities(struct comp_test_data *test_data, uint8_t cdev_id)
uint64_t comp_flags = cap->comp_feature_flags;
- /* Huffman enconding */
+ /* Huffman encoding */
if (test_data->huffman_enc == RTE_COMP_HUFFMAN_FIXED &&
(comp_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0) {
RTE_LOG(ERR, USER1,
@@ -334,7 +334,7 @@ pmd_cyclecount_bench_burst_sz(
* queue, so we never get any failed enqs unless the driver won't accept
* the exact number of descriptors we requested, or the driver won't
* wrap around the end of the TX ring. However, since we're only
- * dequeueing once we've filled up the queue, we have to benchmark it
+ * dequeuing once we've filled up the queue, we have to benchmark it
* piecemeal and then average out the results.
*/
cur_op = 0;
@@ -336,7 +336,7 @@ usage(char *program)
"\t--deq_tmo_nsec : global dequeue timeout\n"
"\t--prod_type_ethdev : use ethernet device as producer.\n"
"\t--prod_type_timerdev : use event timer device as producer.\n"
- "\t expity_nsec would be the timeout\n"
+ "\t expiry_nsec would be the timeout\n"
"\t in ns.\n"
"\t--prod_type_timerdev_burst : use timer device as producer\n"
"\t burst mode.\n"
@@ -253,7 +253,7 @@ void
order_opt_dump(struct evt_options *opt)
{
evt_dump_producer_lcores(opt);
- evt_dump("nb_wrker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
+ evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
evt_dump_worker_lcores(opt);
evt_dump("nb_evdev_ports", "%d", order_nb_event_ports(opt));
}
@@ -624,7 +624,7 @@ print_usage(void)
"(if -f is not specified)>]\n"
"[-r <percentage ratio of random ip's to lookup"
"(if -t is not specified)>]\n"
- "[-c <do comarison with LPM library>]\n"
+ "[-c <do comparison with LPM library>]\n"
"[-6 <do tests with ipv6 (default ipv4)>]\n"
"[-s <shuffle randomly generated routes>]\n"
"[-a <check nexthops for all ipv4 address space"
@@ -641,7 +641,7 @@ print_usage(void)
"[-g <number of tbl8's for dir24_8 or trie FIBs>]\n"
"[-w <path to the file to dump routing table>]\n"
"[-u <path to the file to dump ip's for lookup>]\n"
- "[-v <type of loookup function:"
+ "[-v <type of lookup function:"
"\ts1, s2, s3 (3 types of scalar), v (vector) -"
" for DIR24_8 based FIB\n"
"\ts, v - for TRIE based ipv6 FIB>]\n",
@@ -28,7 +28,7 @@
#define PORT_ID_DST 1
#define TEID_VALUE 1
-/* Flow items/acctions max size */
+/* Flow items/actions max size */
#define MAX_ITEMS_NUM 32
#define MAX_ACTIONS_NUM 32
#define MAX_ATTRS_NUM 16
@@ -1519,7 +1519,7 @@ dump_used_cpu_time(const char *item,
* threads time.
*
* Throughput: total count of rte rules divided
- * over the average of the time cosumed by all
+ * over the average of the time consumed by all
* threads time.
*/
double insertion_latency_time;
@@ -561,7 +561,7 @@ static void cmd_help_long_parsed(void *parsed_result,
" Set the option to enable display of RX and TX bursts.\n"
"set port (port_id) vf (vf_id) rx|tx on|off\n"
- " Enable/Disable a VF receive/tranmit from a port\n\n"
+ " Enable/Disable a VF receive/transmit from a port\n\n"
"set port (port_id) vf (vf_id) rxmode (AUPE|ROPE|BAM"
"|MPE) (on|off)\n"
@@ -2162,7 +2162,7 @@ static const struct token token_list[] = {
},
[COMMON_POLICY_ID] = {
.name = "{policy_id}",
- .type = "POLCIY_ID",
+ .type = "POLICY_ID",
.help = "policy id",
.call = parse_int,
.comp = comp_none,
@@ -2370,7 +2370,7 @@ static const struct token token_list[] = {
},
[TUNNEL_DESTROY] = {
.name = "destroy",
- .help = "destroy tunel",
+ .help = "destroy tunnel",
.next = NEXT(NEXT_ENTRY(TUNNEL_DESTROY_ID),
NEXT_ENTRY(COMMON_PORT_ID)),
.args = ARGS(ARGS_ENTRY(struct buffer, port)),
@@ -2378,7 +2378,7 @@ static const struct token token_list[] = {
},
[TUNNEL_DESTROY_ID] = {
.name = "id",
- .help = "tunnel identifier to testroy",
+ .help = "tunnel identifier to destroy",
.next = NEXT(NEXT_ENTRY(COMMON_UNSIGNED)),
.args = ARGS(ARGS_ENTRY(struct tunnel_ops, id)),
.call = parse_tunnel,
@@ -69,7 +69,7 @@ print_err_msg(struct rte_tm_error *error)
[RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS]
= "num shared shapers field (node params)",
[RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE]
- = "wfq weght mode field (node params)",
+ = "wfq weight mode field (node params)",
[RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES]
= "num strict priorities field (node params)",
[RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN]
@@ -479,7 +479,7 @@ static void cmd_show_port_tm_level_cap_parsed(void *parsed_result,
cmdline_parse_inst_t cmd_show_port_tm_level_cap = {
.f = cmd_show_port_tm_level_cap_parsed,
.data = NULL,
- .help_str = "Show Port TM Hierarhical level Capabilities",
+ .help_str = "Show Port TM Hierarchical level Capabilities",
.tokens = {
(void *)&cmd_show_port_tm_level_cap_show,
(void *)&cmd_show_port_tm_level_cap_port,
@@ -796,7 +796,7 @@ pkt_copy_split(const struct rte_mbuf *pkt)
*
* The testpmd command line for this forward engine sets the flags
* TESTPMD_TX_OFFLOAD_* in ports[tx_port].tx_ol_flags. They control
- * wether a checksum must be calculated in software or in hardware. The
+ * whether a checksum must be calculated in software or in hardware. The
* IP, UDP, TCP and SCTP flags always concern the inner layer. The
* OUTER_IP is only useful for tunnel packets.
*/
@@ -110,7 +110,7 @@ usage(char* progname)
"If the drop-queue doesn't exist, the packet is dropped. "
"By default drop-queue=127.\n");
#ifdef RTE_LIB_LATENCYSTATS
- printf(" --latencystats=N: enable latency and jitter statistcs "
+ printf(" --latencystats=N: enable latency and jitter statistics "
"monitoring on forwarding lcore id N.\n");
#endif
printf(" --disable-crc-strip: disable CRC stripping by hardware.\n");
@@ -449,7 +449,7 @@ uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
uint8_t latencystats_enabled;
/*
- * Lcore ID to serive latency statistics.
+ * Lcore ID to service latency statistics.
*/
lcoreid_t latencystats_lcore_id = -1;
@@ -174,14 +174,14 @@ update_pkt_header(struct rte_mbuf *pkt, uint32_t total_pkt_len)
sizeof(struct rte_ether_hdr) +
sizeof(struct rte_ipv4_hdr) +
sizeof(struct rte_udp_hdr)));
- /* updata udp pkt length */
+ /* update udp pkt length */
udp_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_udp_hdr *,
sizeof(struct rte_ether_hdr) +
sizeof(struct rte_ipv4_hdr));
pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len);
- /* updata ip pkt length and csum */
+ /* update ip pkt length and csum */
ip_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
ip_hdr->hdr_checksum = 0;
@@ -11,7 +11,7 @@
* (https://en.wikipedia.org/wiki/Peterson%27s_algorithm)
* for two execution units to make sure that rte_smp_mb() prevents
* store-load reordering to happen.
- * Also when executed on a single lcore could be used as a approxiamate
+ * Also when executed on a single lcore could be used as a approximate
* estimation of number of cycles particular implementation of rte_smp_mb()
* will take.
*/
@@ -23,7 +23,7 @@
/*
* Basic functional tests for librte_bpf.
* The main procedure - load eBPF program, execute it and
- * compare restuls with expected values.
+ * compare results with expected values.
*/
struct dummy_offset {
@@ -2707,7 +2707,7 @@ test_ld_mbuf1_check(uint64_t rc, const void *arg)
}
/*
- * same as ld_mbuf1, but then trancate the mbuf by 1B,
+ * same as ld_mbuf1, but then truncate the mbuf by 1B,
* so load of last 4B fail.
*/
static void
@@ -1256,7 +1256,7 @@ test_deflate_comp_run(const struct interim_data_params *int_data,
/*
* Store original operation index in private data,
* since ordering does not have to be maintained,
- * when dequeueing from compressdev, so a comparison
+ * when dequeuing from compressdev, so a comparison
* at the end of the test can be done.
*/
priv_data = (struct priv_op_data *) (ops[i] + 1);
@@ -6870,7 +6870,7 @@ test_snow3g_decryption_with_digest_test_case_1(void)
}
/*
- * Function prepare data for hash veryfication test case.
+ * Function prepare data for hash verification test case.
* Digest is allocated in 4 last bytes in plaintext, pattern.
*/
snow3g_hash_test_vector_setup(&snow3g_test_case_7, &snow3g_hash_data);
@@ -346,7 +346,7 @@ test_fib_perf(void)
fib = rte_fib_create(__func__, SOCKET_ID_ANY, &config);
TEST_FIB_ASSERT(fib != NULL);
- /* Measue add. */
+ /* Measure add. */
begin = rte_rdtsc();
for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
@@ -326,7 +326,7 @@ test_kni_register_handler_mp(void)
/* Check with the invalid parameters */
if (rte_kni_register_handlers(kni, NULL) == 0) {
- printf("Unexpectedly register successuflly "
+ printf("Unexpectedly register successfully "
"with NULL ops pointer\n");
exit(-1);
}
@@ -475,7 +475,7 @@ test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
/**
* Check multiple processes support on
- * registerring/unregisterring handlers.
+ * registering/unregistering handlers.
*/
if (test_kni_register_handler_mp() < 0) {
printf("fail to check multiple process support\n");
@@ -11,7 +11,7 @@
#include "test.h"
-/* incrementd in handler, to check it is properly called once per
+/* incremented in handler, to check it is properly called once per
* key/value association */
static unsigned count;
@@ -107,14 +107,14 @@ static int test_valid_kvargs(void)
goto fail;
}
count = 0;
- /* call check_handler() for all entries with key="unexistant_key" */
- if (rte_kvargs_process(kvlist, "unexistant_key", check_handler, NULL) < 0) {
+ /* call check_handler() for all entries with key="nonexistent_key" */
+ if (rte_kvargs_process(kvlist, "nonexistent_key", check_handler, NULL) < 0) {
printf("rte_kvargs_process() error\n");
rte_kvargs_free(kvlist);
goto fail;
}
if (count != 0) {
- printf("invalid count value %d after rte_kvargs_process(unexistant_key)\n",
+ printf("invalid count value %d after rte_kvargs_process(nonexistent_key)\n",
count);
rte_kvargs_free(kvlist);
goto fail;
@@ -135,10 +135,10 @@ static int test_valid_kvargs(void)
rte_kvargs_free(kvlist);
goto fail;
}
- /* count all entries with key="unexistant_key" */
- count = rte_kvargs_count(kvlist, "unexistant_key");
+ /* count all entries with key="nonexistent_key" */
+ count = rte_kvargs_count(kvlist, "nonexistent_key");
if (count != 0) {
- printf("invalid count value %d after rte_kvargs_count(unexistant_key)\n",
+ printf("invalid count value %d after rte_kvargs_count(nonexistent_key)\n",
count);
rte_kvargs_free(kvlist);
goto fail;
@@ -156,7 +156,7 @@ static int test_valid_kvargs(void)
/* call check_handler() on all entries with key="check", it
* should fail as the value is not recognized by the handler */
if (rte_kvargs_process(kvlist, "check", check_handler, NULL) == 0) {
- printf("rte_kvargs_process() is success bu should not\n");
+ printf("rte_kvargs_process() is success but should not\n");
rte_kvargs_free(kvlist);
goto fail;
}
@@ -22,7 +22,7 @@ struct ips_tbl_entry {
* in previous test_lpm6_routes.h . Because this table has only 1000
* lines, keeping it doesn't make LPM6 test case so large and also
* make the algorithm to generate rule table unnecessary and the
- * algorithm to genertate test input IPv6 and associated expected
+ * algorithm to generate test input IPv6 and associated expected
* next_hop much simple.
*/
@@ -459,7 +459,7 @@ static int test_member_multimatch(void)
MAX_MATCH, set_ids_cache);
/*
* For cache mode, keys overwrite when signature same.
- * the mutimatch should work like single match.
+ * the multimatch should work like single match.
*/
TEST_ASSERT(ret_ht == M_MATCH_CNT && ret_vbf == M_MATCH_CNT &&
ret_cache == 1,
@@ -304,7 +304,7 @@ static int test_mempool_single_consumer(void)
}
/*
- * test function for mempool test based on singple consumer and single producer,
+ * test function for mempool test based on single consumer and single producer,
* can run on one lcore only
*/
static int
@@ -322,7 +322,7 @@ my_mp_init(struct rte_mempool *mp, __rte_unused void *arg)
}
/*
- * it tests the mempool operations based on singple producer and single consumer
+ * it tests the mempool operations based on single producer and single consumer
*/
static int
test_mempool_sp_sc(void)
@@ -543,7 +543,7 @@ test_memzone_reserve_max(void)
}
if (mz->len != maxlen) {
- printf("Memzone reserve with 0 size did not return bigest block\n");
+ printf("Memzone reserve with 0 size did not return biggest block\n");
printf("Expected size = %zu, actual size = %zu\n",
maxlen, mz->len);
rte_dump_physmem_layout(stdout);
@@ -606,7 +606,7 @@ test_memzone_reserve_max_aligned(void)
if (mz->len < minlen || mz->len > maxlen) {
printf("Memzone reserve with 0 size and alignment %u did not return"
- " bigest block\n", align);
+ " biggest block\n", align);
printf("Expected size = %zu-%zu, actual size = %zu\n",
minlen, maxlen, mz->len);
rte_dump_physmem_layout(stdout);
@@ -1054,7 +1054,7 @@ test_memzone_basic(void)
if (mz != memzone1)
return -1;
- printf("test duplcate zone name\n");
+ printf("test duplicate zone name\n");
mz = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone1"), 100,
SOCKET_ID_ANY, 0);
if (mz != NULL)
@@ -121,7 +121,7 @@ test_metrics_update_value(void)
err = rte_metrics_update_value(RTE_METRICS_GLOBAL, KEY, VALUE);
TEST_ASSERT(err >= 0, "%s, %d", __func__, __LINE__);
- /* Successful Test: Valid port_id otherthan RTE_METRICS_GLOBAL, key
+ /* Successful Test: Valid port_id other than RTE_METRICS_GLOBAL, key
* and value
*/
err = rte_metrics_update_value(9, KEY, VALUE);
@@ -109,7 +109,7 @@ test_setup(void)
return -1;
}
- /* Make a pool for cloned packeets */
+ /* Make a pool for cloned packets */
mp = rte_pktmbuf_pool_create_by_ops("pcapng_test_pool", NUM_PACKETS,
0, 0,
rte_pcapng_mbuf_size(pkt_len),
@@ -659,7 +659,7 @@ test_power_cpufreq(void)
/* test of exit power management for an invalid lcore */
ret = rte_power_exit(TEST_POWER_LCORE_INVALID);
if (ret == 0) {
- printf("Unpectedly exit power management successfully for "
+ printf("Unexpectedly exit power management successfully for "
"lcore %u\n", TEST_POWER_LCORE_INVALID);
rte_power_unset_env();
return -1;
@@ -408,7 +408,7 @@ test_rcu_qsbr_synchronize_reader(void *arg)
/*
* rte_rcu_qsbr_synchronize: Wait till all the reader threads have entered
- * the queiscent state.
+ * the quiescent state.
*/
static int
test_rcu_qsbr_synchronize(void)
@@ -443,7 +443,7 @@ test_rcu_qsbr_synchronize(void)
rte_rcu_qsbr_synchronize(t[0], RTE_MAX_LCORE - 1);
rte_rcu_qsbr_thread_offline(t[0], RTE_MAX_LCORE - 1);
- /* Test if the API returns after unregisterng all the threads */
+ /* Test if the API returns after unregistering all the threads */
for (i = 0; i < RTE_MAX_LCORE; i++)
rte_rcu_qsbr_thread_unregister(t[0], i);
rte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);
@@ -1566,10 +1566,10 @@ static void ovfl_check_avg(uint32_t avg)
}
static struct test_config ovfl_test1_config = {
- .ifname = "queue avergage overflow test interface",
+ .ifname = "queue average overflow test interface",
.msg = "overflow test 1 : use one RED configuration,\n"
" increase average queue size to target level,\n"
- " check maximum number of bits requirte_red to represent avg_s\n\n",
+ " check maximum number of bits required to represent avg_s\n\n",
.htxt = "avg queue size "
"wq_log2 "
"fraction bits "
@@ -1757,12 +1757,12 @@ test_invalid_parameters(void)
printf("%i: rte_red_config_init should have failed!\n", __LINE__);
return -1;
}
- /* min_treshold == max_treshold */
+ /* min_threshold == max_threshold */
if (rte_red_config_init(&config, 0, 1, 1, 0) == 0) {
printf("%i: rte_red_config_init should have failed!\n", __LINE__);
return -1;
}
- /* min_treshold > max_treshold */
+ /* min_threshold > max_threshold */
if (rte_red_config_init(&config, 0, 2, 1, 0) == 0) {
printf("%i: rte_red_config_init should have failed!\n", __LINE__);
return -1;
@@ -237,7 +237,7 @@
* increases .called counter. Function returns value stored in .ret field
* of the structure.
* In case of some parameters in some functions the expected value is unknown
- * and cannot be detrmined prior to call. Such parameters are stored
+ * and cannot be determined prior to call. Such parameters are stored
* in structure and can be compared or analyzed later in test case code.
*
* Below structures and functions follow the rules just described.
@@ -364,7 +364,7 @@ setup_pipeline(int test_type)
.action = RTE_PIPELINE_ACTION_PORT,
{.port_id = port_out_id[i^1]},
};
- printf("Setting secont table to output to port\n");
+ printf("Setting second table to output to port\n");
/* Add the default action for the table. */
ret = rte_pipeline_table_default_entry_add(p,
@@ -684,7 +684,7 @@ test_predictable_rss_multirange(void)
/*
* calculate hashes, complements, then adjust keys with
- * complements and recalsulate hashes
+ * complements and recalculate hashes
*/
for (i = 0; i < RTE_DIM(rng_arr); i++) {
for (k = 0; k < 100; k++) {
@@ -1,5 +1,5 @@
#! /usr/bin/env python3
-# SPDX-License-Identitifer: BSD-3-Clause
+# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2020 Intel Corporation
import subprocess
@@ -25,7 +25,7 @@ build_map_changes()
# Triggering this rule, which starts a line and ends it
# with a { identifies a versioned section. The section name is
- # the rest of the line with the + and { symbols remvoed.
+ # the rest of the line with the + and { symbols removed.
# Triggering this rule sets in_sec to 1, which actives the
# symbol rule below
/^.*{/ {
@@ -35,7 +35,7 @@ build_map_changes()
}
}
- # This rule idenfies the end of a section, and disables the
+ # This rule identifies the end of a section, and disables the
# symbol rule
/.*}/ {in_sec=0}
@@ -100,7 +100,7 @@ check_for_rule_violations()
# Just inform the user of this occurrence, but
# don't flag it as an error
echo -n "INFO: symbol $symname is added but "
- echo -n "patch has insuficient context "
+ echo -n "patch has insufficient context "
echo -n "to determine the section name "
echo -n "please ensure the version is "
echo "EXPERIMENTAL"
@@ -465,7 +465,7 @@
v:mID="63"
id="shape63-63"><title
id="title149">Sheet.63</title><desc
- id="desc151">Contanier/App</desc><v:textBlock
+ id="desc151">Container/App</desc><v:textBlock
v:margins="rect(4,4,4,4)" /><v:textRect
height="22.5"
width="90"
@@ -9,7 +9,7 @@ packets. This Linux-specific PMD binds to an AF_PACKET socket and allows
a DPDK application to send and receive raw packets through the Kernel.
In order to improve Rx and Tx performance this implementation makes use of
-PACKET_MMAP, which provides a mmap'ed ring buffer, shared between user space
+PACKET_MMAP, which provides a mmapped ring buffer, shared between user space
and kernel, that's used to send and receive packets. This helps reducing system
calls and the copies needed between user space and Kernel.
@@ -178,7 +178,7 @@ DPDK and must be installed separately:
- mlx4_core: hardware driver managing Mellanox ConnectX-3 devices.
- mlx4_en: Ethernet device driver that provides kernel network interfaces.
- - mlx4_ib: InifiniBand device driver.
+ - mlx4_ib: InfiniBand device driver.
- ib_uverbs: user space driver for verbs (entry point for libibverbs).
- **Firmware update**
@@ -649,7 +649,7 @@ Driver options
A timeout value is set in the driver to control the waiting time before
dropping a packet. Once the timer is expired, the delay drop will be
- deactivated for all the Rx queues with this feature enable. To re-activeate
+ deactivated for all the Rx queues with this feature enable. To re-activate
it, a rearming is needed and it is part of the kernel driver starting from
OFED 5.5.
@@ -1033,7 +1033,7 @@ Driver options
For the MARK action the last 16 values in the full range are reserved for
internal PMD purposes (to emulate FLAG action). The valid range for the
- MARK action values is 0-0xFFEF for the 16-bit mode and 0-xFFFFEF
+ MARK action values is 0-0xFFEF for the 16-bit mode and 0-0xFFFFEF
for the 24-bit mode, the flows with the MARK action value outside
the specified range will be rejected.
@@ -1317,7 +1317,7 @@ DPDK and must be installed separately:
- mlx5_core: hardware driver managing Mellanox
ConnectX-4/ConnectX-5/ConnectX-6/BlueField devices and related Ethernet kernel
network devices.
- - mlx5_ib: InifiniBand device driver.
+ - mlx5_ib: InfiniBand device driver.
- ib_uverbs: user space driver for Verbs (entry point for libibverbs).
- **Firmware update**
@@ -751,7 +751,7 @@ feature is useful when the user wants to abandon partially enqueued operations
for a failed enqueue burst operation and try enqueuing in a whole later.
Similar as enqueue, there are two dequeue functions:
-``rte_cryptodev_raw_dequeue`` for dequeing single operation, and
+``rte_cryptodev_raw_dequeue`` for dequeuing single operation, and
``rte_cryptodev_raw_dequeue_burst`` for dequeuing a burst of operations (e.g.
all operations in a ``struct rte_crypto_sym_vec`` descriptor). The
``rte_cryptodev_raw_dequeue_burst`` function allows the user to provide callback
@@ -433,7 +433,7 @@ and decides on a preferred IOVA mode.
- if all buses report RTE_IOVA_PA, then the preferred IOVA mode is RTE_IOVA_PA,
- if all buses report RTE_IOVA_VA, then the preferred IOVA mode is RTE_IOVA_VA,
-- if all buses report RTE_IOVA_DC, no bus expressed a preferrence, then the
+- if all buses report RTE_IOVA_DC, no bus expressed a preference, then the
preferred mode is RTE_IOVA_DC,
- if the buses disagree (at least one wants RTE_IOVA_PA and at least one wants
RTE_IOVA_VA), then the preferred IOVA mode is RTE_IOVA_DC (see below with the
@@ -658,7 +658,7 @@ Known Issues
+ rte_ring
rte_ring supports multi-producer enqueue and multi-consumer dequeue.
- However, it is non-preemptive, this has a knock on effect of making rte_mempool non-preemptable.
+ However, it is non-preemptive, this has a knock on effect of making rte_mempool non-preemptible.
.. note::
@@ -460,7 +460,7 @@
height="14.642858"
x="39.285713"
y="287.16254" /></flowRegion><flowPara
- id="flowPara4817">offse</flowPara></flowRoot> <text
+ id="flowPara4817">offset</flowPara></flowRoot> <text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
x="74.16684"
@@ -649,7 +649,7 @@
height="14.642858"
x="39.285713"
y="287.16254" /></flowRegion><flowPara
- id="flowPara4817">offse</flowPara></flowRoot> <text
+ id="flowPara4817">offset</flowPara></flowRoot> <text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#a8d08d;fill-opacity:1;stroke:none;stroke-width:0.23616144"
x="16.351753"
@@ -1196,12 +1196,12 @@ In the case of severe congestion, the dropper resorts to tail drop.
This occurs when a packet queue has reached maximum capacity and cannot store any more packets.
In this situation, all arriving packets are dropped.
-The flow through the dropper is illustrated in :numref:`figure_flow_tru_droppper`.
+The flow through the dropper is illustrated in :numref:`figure_flow_tru_dropper`.
The RED/WRED/PIE algorithm is exercised first and tail drop second.
-.. _figure_flow_tru_droppper:
+.. _figure_flow_tru_dropper:
-.. figure:: img/flow_tru_droppper.*
+.. figure:: img/flow_tru_dropper.*
Flow Through the Dropper
@@ -1379,7 +1379,7 @@ Matches a network service header (RFC 8300).
- ``ttl``: maximum SFF hopes (6 bits).
- ``length``: total length in 4 bytes words (6 bits).
- ``reserved1``: reserved1 bits (4 bits).
-- ``mdtype``: ndicates format of NSH header (4 bits).
+- ``mdtype``: indicates format of NSH header (4 bits).
- ``next_proto``: indicates protocol type of encap data (8 bits).
- ``spi``: service path identifier (3 bytes).
- ``sindex``: service index (1 byte).
@@ -37,7 +37,7 @@ using ``rte_rawdev_queue_conf_get()``.
To perform data transfer use standard ``rte_rawdev_enqueue_buffers()`` and
``rte_rawdev_dequeue_buffers()`` APIs. Not all messages produce sensible
-responses hence dequeueing is not always necessary.
+responses hence dequeuing is not always necessary.
BPHY CGX/RPM PMD
----------------
@@ -22,7 +22,7 @@ PCRE back tracking ctrl
Support PCRE back tracking ctrl.
PCRE call outs
- Support PCRE call outes.
+ Support PCRE call routes.
PCRE forward reference
Support Forward reference.
@@ -192,7 +192,7 @@ EAL
* **igb_uio: Fixed possible mmap failure for Linux >= 4.5.**
- The mmaping of the iomem range of the PCI device fails for kernels that
+ The mmapping of the iomem range of the PCI device fails for kernels that
enabled the ``CONFIG_IO_STRICT_DEVMEM`` option. The error seen by the
user is as similar to the following::
@@ -232,7 +232,7 @@ API Changes
* The ``rte_cryptodev_configure()`` function does not create the session
mempool for the device anymore.
* The ``rte_cryptodev_queue_pair_attach_sym_session()`` and
- ``rte_cryptodev_queue_pair_dettach_sym_session()`` functions require
+ ``rte_cryptodev_queue_pair_detach_sym_session()`` functions require
the new parameter ``device id``.
* Parameters of ``rte_cryptodev_sym_session_create()`` were modified to
accept ``mempool``, instead of ``device id`` and ``rte_crypto_sym_xform``.
@@ -671,7 +671,7 @@ Resolved Issues
value 0.
- Fixes: 40b966a211ab ("ivshmem: library changes for mmaping using ivshmem")
+ Fixes: 40b966a211ab ("ivshmem: library changes for mmapping using ivshmem")
* **ixgbe/base: Fix SFP probing.**
@@ -154,7 +154,7 @@ each RX queue uses its own mempool.
.. literalinclude:: ../../../examples/ip_reassembly/main.c
:language: c
- :start-after: mbufs stored int the gragment table. 8<
+ :start-after: mbufs stored int the fragment table. 8<
:end-before: >8 End of mbufs stored int the fragmentation table.
:dedent: 1
@@ -176,7 +176,7 @@ function. The value returned is the number of parsed arguments:
.. literalinclude:: ../../../examples/l2fwd-cat/l2fwd-cat.c
:language: c
:start-after: Initialize the Environment Abstraction Layer (EAL). 8<
- :end-before: >8 End of initializion the Environment Abstraction Layer (EAL).
+ :end-before: >8 End of initialization the Environment Abstraction Layer (EAL).
:dedent: 1
The next task is to initialize the PQoS library and configure CAT. The
@@ -191,7 +191,7 @@ flow is not handled by the node.
.. literalinclude:: ../../../examples/server_node_efd/node/node.c
:language: c
:start-after: Packets dequeued from the shared ring. 8<
- :end-before: >8 End of packets dequeueing.
+ :end-before: >8 End of packets dequeuing.
Finally, note that both processes updates statistics, such as transmitted, received
and dropped packets, which are shown and refreshed by the server app.
@@ -54,7 +54,7 @@ function. The value returned is the number of parsed arguments:
.. literalinclude:: ../../../examples/skeleton/basicfwd.c
:language: c
:start-after: Initializion the Environment Abstraction Layer (EAL). 8<
- :end-before: >8 End of initializion the Environment Abstraction Layer (EAL).
+ :end-before: >8 End of initialization the Environment Abstraction Layer (EAL).
:dedent: 1
@@ -681,7 +681,7 @@ The following is an example JSON string for a power management request.
"resource_id": 10
}}
-To query the available frequences of an lcore, use the query_cpu_freq command.
+To query the available frequencies of an lcore, use the query_cpu_freq command.
Where {core_num} is the lcore to query.
Before using this command, please enable responses via the set_query command on the host.
@@ -3510,7 +3510,7 @@ Tunnel offload
Indicate tunnel offload rule type
- ``tunnel_set {tunnel_id}``: mark rule as tunnel offload decap_set type.
-- ``tunnel_match {tunnel_id}``: mark rule as tunel offload match type.
+- ``tunnel_match {tunnel_id}``: mark rule as tunnel offload match type.
Matching pattern
^^^^^^^^^^^^^^^^
@@ -2097,7 +2097,7 @@ dequeue_enc_one_op_cb(struct fpga_queue *q, struct rte_bbdev_enc_op **op,
rte_bbdev_log_debug("DMA response desc %p", desc);
*op = desc->enc_req.op_addr;
- /* Check the decriptor error field, return 1 on error */
+ /* Check the descriptor error field, return 1 on error */
desc_error = check_desc_error(desc->enc_req.error);
(*op)->status = desc_error << RTE_BBDEV_DATA_ERROR;
@@ -2139,7 +2139,7 @@ dequeue_enc_one_op_tb(struct fpga_queue *q, struct rte_bbdev_enc_op **op,
for (cb_idx = 0; cb_idx < cbs_in_op; ++cb_idx) {
desc = q->ring_addr + ((q->head_free_desc + desc_offset +
cb_idx) & q->sw_ring_wrap_mask);
- /* Check the decriptor error field, return 1 on error */
+ /* Check the descriptor error field, return 1 on error */
desc_error = check_desc_error(desc->enc_req.error);
status |= desc_error << RTE_BBDEV_DATA_ERROR;
rte_bbdev_log_debug("DMA response desc %p", desc);
@@ -2177,7 +2177,7 @@ dequeue_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op **op,
(*op)->turbo_dec.iter_count = (desc->dec_req.iter + 2) >> 1;
/* crc_pass = 0 when decoder fails */
(*op)->status = !(desc->dec_req.crc_pass) << RTE_BBDEV_CRC_ERROR;
- /* Check the decriptor error field, return 1 on error */
+ /* Check the descriptor error field, return 1 on error */
desc_error = check_desc_error(desc->enc_req.error);
(*op)->status |= desc_error << RTE_BBDEV_DATA_ERROR;
return 1;
@@ -2221,7 +2221,7 @@ dequeue_dec_one_op_tb(struct fpga_queue *q, struct rte_bbdev_dec_op **op,
iter_count = RTE_MAX(iter_count, (uint8_t) desc->dec_req.iter);
/* crc_pass = 0 when decoder fails, one fails all */
status |= !(desc->dec_req.crc_pass) << RTE_BBDEV_CRC_ERROR;
- /* Check the decriptor error field, return 1 on error */
+ /* Check the descriptor error field, return 1 on error */
desc_error = check_desc_error(desc->enc_req.error);
status |= desc_error << RTE_BBDEV_DATA_ERROR;
rte_bbdev_log_debug("DMA response desc %p", desc);
@@ -31,7 +31,7 @@ struct bbdev_null_params {
uint16_t queues_num; /*< Null BBDEV queues number */
};
-/* Accecptable params for null BBDEV devices */
+/* Acceptable params for null BBDEV devices */
#define BBDEV_NULL_MAX_NB_QUEUES_ARG "max_nb_queues"
#define BBDEV_NULL_SOCKET_ID_ARG "socket_id"
@@ -61,7 +61,7 @@ struct turbo_sw_params {
uint16_t queues_num; /*< Turbo SW device queues number */
};
-/* Accecptable params for Turbo SW devices */
+/* Acceptable params for Turbo SW devices */
#define TURBO_SW_MAX_NB_QUEUES_ARG "max_nb_queues"
#define TURBO_SW_SOCKET_ID_ARG "socket_id"
@@ -70,7 +70,7 @@ compare_dpaa_devices(struct rte_dpaa_device *dev1,
{
int comp = 0;
- /* Segragating ETH from SEC devices */
+ /* Segregating ETH from SEC devices */
if (dev1->device_type > dev2->device_type)
comp = 1;
else if (dev1->device_type < dev2->device_type)
@@ -1353,7 +1353,7 @@ __rte_internal
int qman_irqsource_add(u32 bits);
/**
- * qman_fq_portal_irqsource_add - samilar to qman_irqsource_add, but it
+ * qman_fq_portal_irqsource_add - similar to qman_irqsource_add, but it
* takes portal (fq specific) as input rather than using the thread affined
* portal.
*/
@@ -1416,7 +1416,7 @@ __rte_internal
struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq);
/**
- * qman_dqrr_consume - Consume the DQRR entriy after volatile dequeue
+ * qman_dqrr_consume - Consume the DQRR entry after volatile dequeue
* @fq: Frame Queue on which the volatile dequeue command is issued
* @dq: DQRR entry to consume. This is the one which is provided by the
* 'qbman_dequeue' command.
@@ -2017,7 +2017,7 @@ int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
* @cgr: the 'cgr' object to deregister
*
* "Unplugs" this CGR object from the portal affine to the cpu on which this API
- * is executed. This must be excuted on the same affine portal on which it was
+ * is executed. This must be executed on the same affine portal on which it was
* created.
*/
__rte_internal
@@ -40,7 +40,7 @@ struct dpaa_raw_portal {
/* Specifies the stash request queue this portal should use */
uint8_t sdest;
- /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
+ /* Specifies a specific portal index to map or QBMAN_ANY_PORTAL_IDX
* for don't care. The portal index will be populated by the
* driver when the ioctl() successfully completes.
*/
@@ -49,7 +49,7 @@ struct dpaa_portal_map {
struct dpaa_ioctl_portal_map {
/* Input parameter, is a qman or bman portal required. */
enum dpaa_portal_type type;
- /* Specifes a specific portal index to map or 0xffffffff
+ /* Specifies a specific portal index to map or 0xffffffff
* for don't care.
*/
uint32_t index;
@@ -539,7 +539,7 @@ rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver)
fslmc_bus = driver->fslmc_bus;
- /* Cleanup the PA->VA Translation table; From whereever this function
+ /* Cleanup the PA->VA Translation table; From wherever this function
* is called from.
*/
if (rte_eal_iova_mode() == RTE_IOVA_PA)
@@ -178,7 +178,7 @@ static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev)
dpio_epoll_fd = epoll_create(1);
ret = rte_dpaa2_intr_enable(dpio_dev->intr_handle, 0);
if (ret) {
- DPAA2_BUS_ERR("Interrupt registeration failed");
+ DPAA2_BUS_ERR("Interrupt registration failed");
return -1;
}
@@ -156,7 +156,7 @@ struct dpaa2_queue {
struct rte_cryptodev_data *crypto_data;
};
uint32_t fqid; /*!< Unique ID of this queue */
- uint16_t flow_id; /*!< To be used by DPAA2 frmework */
+ uint16_t flow_id; /*!< To be used by DPAA2 framework */
uint8_t tc_index; /*!< traffic class identifier */
uint8_t cgid; /*! < Congestion Group id for this queue */
uint64_t rx_pkts;
@@ -510,7 +510,7 @@ int qbman_result_has_new_result(struct qbman_swp *s,
struct qbman_result *dq);
/**
- * qbman_check_command_complete() - Check if the previous issued dq commnd
+ * qbman_check_command_complete() - Check if the previous issued dq command
* is completed and results are available in memory.
* @s: the software portal object.
* @dq: the dequeue result read from the memory.
@@ -687,7 +687,7 @@ uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq);
/**
* qbman_result_DQ_odpid() - Get the seqnum field in dequeue response
- * odpid is valid only if ODPVAILD flag is TRUE.
+ * odpid is valid only if ODPVALID flag is TRUE.
* @dq: the dequeue result.
*
* Return odpid.
@@ -743,7 +743,7 @@ const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq);
* qbman_result_SCN_state() - Get the state field in State-change notification
* @scn: the state change notification.
*
- * Return the state in the notifiation.
+ * Return the state in the notification.
*/
__rte_internal
uint8_t qbman_result_SCN_state(const struct qbman_result *scn);
@@ -825,7 +825,7 @@ uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn);
/* Parsing CGCU */
/**
- * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid
+ * qbman_result_cgcu_cgid() - Check CGCU resource id, i.e. cgid
* @scn: the state change notification.
*
* Return the CGCU resource id.
@@ -903,14 +903,14 @@ void qbman_eq_desc_clear(struct qbman_eq_desc *d);
__rte_internal
void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
/**
- * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor
+ * qbman_eq_desc_set_orp() - Set order-restoration in the enqueue descriptor
* @d: the enqueue descriptor.
* @response_success: 1 = enqueue with response always; 0 = enqueue with
* rejections returned on a FQ.
* @opr_id: the order point record id.
* @seqnum: the order restoration sequence number.
- * @incomplete: indiates whether this is the last fragments using the same
- * sequeue number.
+ * @incomplete: indicates whether this is the last fragments using the same
+ * sequence number.
*/
__rte_internal
void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
@@ -1052,10 +1052,10 @@ __rte_internal
uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp);
/**
- * qbman_result_eqresp_rc() - determines if enqueue command is sucessful.
+ * qbman_result_eqresp_rc() - determines if enqueue command is successful.
* @eqresp: enqueue response.
*
- * Return 0 when command is sucessful.
+ * Return 0 when command is successful.
*/
__rte_internal
uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp);
@@ -1250,7 +1250,7 @@ int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid);
/**
* These functions change the FQ flow-control stuff between XON/XOFF. (The
* default is XON.) This setting doesn't affect enqueues to the FQ, just
- * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when
+ * dequeues. XOFF FQs will remain in the tentatively-scheduled state, even when
* non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is
* changed to XOFF after it had already become truly-scheduled to a channel, and
* a pull dequeue of that channel occurs that selects that FQ for dequeuing,
@@ -815,7 +815,7 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev)
continue;
}
- /* skip non-mmapable BARs */
+ /* skip non-mmappable BARs */
if ((reg->flags & VFIO_REGION_INFO_FLAG_MMAP) == 0) {
free(reg);
continue;
@@ -197,7 +197,7 @@ rte_vdev_remove_custom_scan(rte_vdev_scan_callback callback, void *user_arg);
int rte_vdev_init(const char *name, const char *args);
/**
- * Uninitalize a driver specified by name.
+ * Uninitialize a driver specified by name.
*
* @param name
* The pointer to a driver name to be uninitialized.
@@ -134,7 +134,7 @@ vmbus_probe_one_driver(struct rte_vmbus_driver *dr,
/*
* If device class GUID matches, call the probe function of
- * registere drivers for the vmbus device.
+ * register drivers for the vmbus device.
* Return -1 if initialization failed,
* and 1 if no driver found for this device.
*/
@@ -14,7 +14,7 @@
#define CGX_CMRX_INT_OVERFLW BIT_ULL(1)
/*
* CN10K stores number of lmacs in 4 bit filed
- * in contraty to CN9K which uses only 3 bits.
+ * in contrary to CN9K which uses only 3 bits.
*
* In theory masks should differ yet on CN9K
* bits beyond specified range contain zeros.
@@ -138,7 +138,7 @@ nix_lf_bpf_dump(__io struct nix_band_prof_s *bpf)
{
plt_dump("W0: cir_mantissa \t\t\t%d\nW0: pebs_mantissa \t\t\t0x%03x",
bpf->cir_mantissa, bpf->pebs_mantissa);
- plt_dump("W0: peir_matissa \t\t\t\t%d\nW0: cbs_exponent \t\t\t%d",
+ plt_dump("W0: peir_mantissa \t\t\t\t%d\nW0: cbs_exponent \t\t\t%d",
bpf->peir_mantissa, bpf->cbs_exponent);
plt_dump("W0: cir_exponent \t\t\t%d\nW0: pebs_exponent \t\t\t%d",
bpf->cir_exponent, bpf->pebs_exponent);
@@ -107,7 +107,7 @@ nix_tm_adjust_shaper_pps_rate(struct nix_tm_shaper_profile *profile)
if (profile->peak.rate && min_rate > profile->peak.rate)
min_rate = profile->peak.rate;
- /* Each packet accomulate single count, whereas HW
+ /* Each packet accumulate single count, whereas HW
* considers each unit as Byte, so we need convert
* user pps to bps
*/
@@ -234,7 +234,7 @@ npc_get_kex_capability(struct npc *npc)
/* Ethtype: Offset 12B, len 2B */
kex_cap.bit.ethtype_0 = npc_is_kex_enabled(
npc, NPC_LID_LA, NPC_LT_LA_ETHER, 12 * 8, 2 * 8);
- /* QINQ VLAN Ethtype: ofset 8B, len 2B */
+ /* QINQ VLAN Ethtype: offset 8B, len 2B */
kex_cap.bit.ethtype_x = npc_is_kex_enabled(
npc, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8 * 8, 2 * 8);
/* VLAN ID0 : Outer VLAN: Offset 2B, len 2B */
@@ -363,7 +363,7 @@ struct npc {
uint32_t rss_grps; /* rss groups supported */
uint16_t flow_prealloc_size; /* Pre allocated mcam size */
uint16_t flow_max_priority; /* Max priority for flow */
- uint16_t switch_header_type; /* Suppprted switch header type */
+ uint16_t switch_header_type; /* Supported switch header type */
uint32_t mark_actions; /* Number of mark actions */
uint32_t vtag_strip_actions; /* vtag insert/strip actions */
uint16_t pf_func; /* pf_func of device */
@@ -246,7 +246,7 @@ cpt_fc_ciph_set_key(struct cpt_ctx *cpt_ctx, cipher_type_t type,
if (cpt_ctx->fc_type == FC_GEN) {
/*
* We need to always say IV is from DPTR as user can
- * sometimes iverride IV per operation.
+ * sometimes override IV per operation.
*/
fctx->enc.iv_source = CPT_FROM_DPTR;
@@ -3035,7 +3035,7 @@ prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
tailroom = rte_pktmbuf_tailroom(pkt);
if (likely((headroom >= 24) &&
(tailroom >= 8))) {
- /* In 83XX this is prerequivisit for Direct mode */
+ /* In 83XX this is prerequisite for Direct mode */
*flags |= SINGLE_BUF_HEADTAILROOM;
}
param->bufs[0].vaddr = seg_data;
@@ -779,7 +779,7 @@ cpt_ecdsa_verify_prep(struct rte_crypto_ecdsa_op_param *ecdsa,
* Set dlen = sum(sizeof(fpm address), ROUNDUP8(message len),
* ROUNDUP8(sign len(r and s), public key len(x and y coordinates),
* prime len, order len)).
- * Please note sign, public key and order can not excede prime length
+ * Please note sign, public key and order can not exede prime length
* i.e. 6 * p_align
*/
dlen = sizeof(fpm_table_iova) + m_align + (8 * p_align);
@@ -67,7 +67,7 @@ cnstr_shdsc_zuce(uint32_t *descbuf, bool ps, bool swap,
* @authlen: size of digest
*
* The IV prepended before hmac payload must be 8 bytes consisting
- * of COUNT||BEAERER||DIR. The COUNT is of 32-bits, bearer is of 5 bits and
+ * of COUNT||BEARER||DIR. The COUNT is of 32-bits, bearer is of 5 bits and
* direction is of 1 bit - totalling to 38 bits.
*
* Return: size of descriptor written in words or negative number on error
@@ -492,10 +492,10 @@ pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
/* Set the variable size of data the register will write */
if (dir == OP_TYPE_ENCAP_PROTOCOL) {
- /* We will add the interity data so add its length */
+ /* We will add the integrity data so add its length */
MATHI(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
} else {
- /* We will check the interity data so remove its length */
+ /* We will check the integrity data so remove its length */
MATHI(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
/* Do not take the ICV in the out-snooping configuration */
MATHI(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQINSZ, 4, IMMED2);
@@ -803,7 +803,7 @@ static inline int pdcp_sdap_insert_no_snoop_op(
CLRW_CLR_C1MODE,
CLRW, 0, 4, IMMED);
- /* Load the key for authentcation */
+ /* Load the key for authentication */
KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
authdata->keylen, INLINE_KEY(authdata));
@@ -261,7 +261,7 @@ dpaax_iova_table_depopulate(void)
rte_free(dpaax_iova_table_p->entries);
dpaax_iova_table_p = NULL;
- DPAAX_DEBUG("IOVA Table cleanedup");
+ DPAAX_DEBUG("IOVA Table cleaned");
}
int
@@ -1006,7 +1006,7 @@ struct iavf_profile_tlv_section_record {
u8 data[12];
};
-/* Generic AQ section in proflie */
+/* Generic AQ section in profile */
struct iavf_profile_aq_section {
u16 opcode;
u16 flags;
@@ -233,7 +233,7 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
case VIRTCHNL_OP_DCF_CMD_DESC:
return "VIRTCHNL_OP_DCF_CMD_DESC";
case VIRTCHNL_OP_DCF_CMD_BUFF:
- return "VIRTCHHNL_OP_DCF_CMD_BUFF";
+ return "VIRTCHNL_OP_DCF_CMD_BUFF";
case VIRTCHNL_OP_DCF_DISABLE:
return "VIRTCHNL_OP_DCF_DISABLE";
case VIRTCHNL_OP_DCF_GET_VSI_MAP:
@@ -854,7 +854,7 @@ static void mlx5_common_driver_init(void)
static bool mlx5_common_initialized;
/**
- * One time innitialization routine for run-time dependency on glue library
+ * One time initialization routine for run-time dependency on glue library
* for multiple PMDs. Each mlx5 PMD that depends on mlx5_common module,
* must invoke in its constructor.
*/
@@ -1541,7 +1541,7 @@ mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n,
* Destroy a mempool registration object.
*
* @param standalone
- * Whether @p mpr owns its MRs excludively, i.e. they are not shared.
+ * Whether @p mpr owns its MRs exclusively, i.e. they are not shared.
*/
static void
mlx5_mempool_reg_destroy(struct mlx5_mr_share_cache *share_cache,
@@ -1822,7 +1822,7 @@ mlx5_devx_cmd_create_td(void *ctx)
* Pointer to file stream.
*
* @return
- * 0 on success, a nagative value otherwise.
+ * 0 on success, a negative value otherwise.
*/
int
mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused,
@@ -58,7 +58,7 @@ static struct mlx5_sys_mem mlx5_sys_mem = {
* Check if the address belongs to memory seg list.
*
* @param addr
- * Memory address to be ckeced.
+ * Memory address to be checked.
* @param msl
* Memory seg list.
*
@@ -109,7 +109,7 @@ mlx5_mem_update_msl(void *addr)
* Check if the address belongs to rte memory.
*
* @param addr
- * Memory address to be ckeced.
+ * Memory address to be checked.
*
* @return
* True if it belongs, false otherwise.
@@ -19,7 +19,7 @@ extern "C" {
enum mlx5_mem_flags {
MLX5_MEM_ANY = 0,
- /* Memory will be allocated dpends on sys_mem_en. */
+ /* Memory will be allocated depends on sys_mem_en. */
MLX5_MEM_SYS = 1 << 0,
/* Memory should be allocated from system. */
MLX5_MEM_RTE = 1 << 1,
@@ -4172,7 +4172,7 @@ mlx5_flow_mark_get(uint32_t val)
* timestamp format supported by the queue.
*
* @return
- * Converted timstamp format settings.
+ * Converted timestamp format settings.
*/
static inline uint32_t
mlx5_ts_format_conv(uint32_t ts_format)
@@ -302,7 +302,7 @@ mlx5_os_umem_dereg(void *pumem)
}
/**
- * Register mr. Given protection doamin pointer, pointer to addr and length
+ * Register mr. Given protection domain pointer, pointer to addr and length
* register the memory region.
*
* @param[in] pd
@@ -310,7 +310,7 @@ mlx5_os_umem_dereg(void *pumem)
* @param[in] addr
* Pointer to memory start address (type devx_device_ctx).
* @param[in] length
- * Lengtoh of the memory to register.
+ * Length of the memory to register.
* @param[out] pmd_mr
* pmd_mr struct set with lkey, address, length, pointer to mr object, mkey
*
@@ -21,7 +21,7 @@
/**
* This API allocates aligned or non-aligned memory. The free can be on either
* aligned or nonaligned memory. To be protected - even though there may be no
- * alignment - in Windows this API will unconditioanlly call _aligned_malloc()
+ * alignment - in Windows this API will unconditionally call _aligned_malloc()
* with at least a minimal alignment size.
*
* @param[in] align
@@ -72,7 +72,7 @@
#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
-/* Minimum ring bufer size for memory allocation */
+/* Minimum ring buffer size for memory allocation */
#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
ADF_RING_SIZE_4K : SIZE)
#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
@@ -616,7 +616,7 @@ typedef struct efsys_bar_s {
#define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0)
-/* Just avoid store and compiler (impliciltly) reordering */
+/* Just avoid store and compiler (implicitly) reordering */
#define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb()
/* TIMESTAMP */
@@ -195,7 +195,7 @@ union zip_inst_s {
uint64_t bf : 1;
/** Comp/decomp operation */
uint64_t op : 2;
- /** Data sactter */
+ /** Data scatter */
uint64_t ds : 1;
/** Data gather */
uint64_t dg : 1;
@@ -376,7 +376,7 @@ union zip_inst_s {
uint64_t bf : 1;
/** Comp/decomp operation */
uint64_t op : 2;
- /** Data sactter */
+ /** Data scatter */
uint64_t ds : 1;
/** Data gather */
uint64_t dg : 1;
@@ -31,7 +31,7 @@ extern int octtx_zip_logtype_driver;
/**< PCI device id of ZIP VF */
#define PCI_DEVICE_ID_OCTEONTX_ZIPVF 0xA037
-/* maxmum number of zip vf devices */
+/* maximum number of zip vf devices */
#define ZIP_MAX_VFS 8
/* max size of one chunk */
@@ -463,7 +463,7 @@ qat_comp_create_stream_pool(struct qat_comp_dev_private *comp_dev,
} else if (info.error) {
rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL);
QAT_LOG(ERR,
- "Destoying mempool %s as at least one element failed initialisation",
+ "Destroying mempool %s as at least one element failed initialisation",
stream_pool_name);
rte_mempool_free(mp);
mp = NULL;
@@ -32,7 +32,7 @@ enum bcmfs_device_type {
BCMFS_UNKNOWN
};
-/* A table to store registered queue pair opertations */
+/* A table to store registered queue pair operations */
struct bcmfs_hw_queue_pair_ops_table {
rte_spinlock_t tl;
/* Number of used ops structs in the table. */
@@ -212,7 +212,7 @@ bcmfs_qp_setup(struct bcmfs_qp **qp_addr,
nb_descriptors = FS_RM_MAX_REQS;
if (qp_conf->iobase == NULL) {
- BCMFS_LOG(ERR, "IO onfig space null");
+ BCMFS_LOG(ERR, "IO config space null");
return -EINVAL;
}
@@ -20,11 +20,11 @@ struct bcmfs_sym_request;
/** Crypto Request processing successful. */
#define BCMFS_SYM_RESPONSE_SUCCESS (0)
-/** Crypot Request processing protocol failure. */
+/** Crypto Request processing protocol failure. */
#define BCMFS_SYM_RESPONSE_PROTO_FAILURE (1)
-/** Crypot Request processing completion failure. */
+/** Crypto Request processing completion failure. */
#define BCMFS_SYM_RESPONSE_COMPL_ERROR (2)
-/** Crypot Request processing hash tag check error. */
+/** Crypto Request processing hash tag check error. */
#define BCMFS_SYM_RESPONSE_HASH_TAG_ERROR (3)
/** Maximum threshold length to adjust AAD in continuation
@@ -12,7 +12,7 @@
#include "bcmfs_sym_defs.h"
#include "bcmfs_sym_req.h"
-/* structure to hold element's arrtibutes */
+/* structure to hold element's attributes */
struct fsattr {
void *va;
uint64_t pa;
@@ -441,7 +441,7 @@ static void bcmfs5_write_doorbell(struct bcmfs_qp *qp)
{
struct bcmfs_queue *txq = &qp->tx_q;
- /* sync in bfeore ringing the door-bell */
+ /* sync in before ringing the door-bell */
rte_wmb();
FS_MMIO_WRITE32(txq->descs_inflight,
@@ -376,7 +376,7 @@ struct sec_job_ring_t {
void *register_base_addr; /* Base address for SEC's
* register memory for this job ring.
*/
- uint8_t coalescing_en; /* notifies if coelescing is
+ uint8_t coalescing_en; /* notifies if coalescing is
* enabled for the job ring
*/
sec_job_ring_state_t jr_state; /* The state of this job ring */
@@ -479,7 +479,7 @@ void hw_job_ring_error_print(struct sec_job_ring_t *job_ring, int code);
/* @brief Set interrupt coalescing parameters on the Job Ring.
* @param [in] job_ring The job ring
- * @param [in] irq_coalesing_timer Interrupt coalescing timer threshold.
+ * @param [in] irq_coalescing_timer Interrupt coalescing timer threshold.
* This value determines the maximum
* amount of time after processing a
* descriptor before raising an interrupt.
@@ -169,7 +169,7 @@ struct sec4_sg_entry {
/* Structure encompassing a job descriptor which is to be processed
* by SEC. User should also initialise this structure with the callback
- * function pointer which will be called by driver after recieving proccessed
+ * function pointer which will be called by driver after receiving processed
* descriptor from SEC. User data is also passed in this data structure which
* will be sent as an argument to the user callback function.
*/
@@ -288,7 +288,7 @@ int caam_jr_enable_irqs(int uio_fd);
* value that indicates an IRQ disable action into UIO file descriptor
* of this job ring.
*
- * @param [in] uio_fd UIO File descripto
+ * @param [in] uio_fd UIO File descriptor
* @retval 0 for success
* @retval -1 value for error
*
@@ -227,7 +227,7 @@ caam_jr_enable_irqs(int uio_fd)
* value that indicates an IRQ disable action into UIO file descriptor
* of this job ring.
*
- * @param [in] uio_fd UIO File descripto
+ * @param [in] uio_fd UIO File descriptor
* @retval 0 for success
* @retval -1 value for error
*
@@ -1299,7 +1299,7 @@ ccp_auth_slot(struct ccp_session *session)
case CCP_AUTH_ALGO_SHA512_HMAC:
/**
* 1. Load PHash1 = H(k ^ ipad); to LSB
- * 2. generate IHash = H(hash on meassage with PHash1
+ * 2. generate IHash = H(hash on message with PHash1
* as init values);
* 3. Retrieve IHash 2 slots for 384/512
* 4. Load Phash2 = H(k ^ opad); to LSB
@@ -70,7 +70,7 @@
/* Maximum length for digest */
#define DIGEST_LENGTH_MAX 64
-/* SHA LSB intialiazation values */
+/* SHA LSB initialization values */
#define SHA1_H0 0x67452301UL
#define SHA1_H1 0xefcdab89UL
@@ -19,7 +19,7 @@
#include <rte_crypto_sym.h>
#include <cryptodev_pmd.h>
-/**< CCP sspecific */
+/**< CCP specific */
#define MAX_HW_QUEUES 5
#define CCP_MAX_TRNG_RETRIES 10
#define CCP_ALIGN(x, y) ((((x) + (y - 1)) / y) * y)
@@ -723,7 +723,7 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
}
ops[pkts++] = op;
- /* report op status to sym->op and then free the ctx memeory */
+ /* report op status to sym->op and then free the ctx memory */
rte_mempool_put(ctx->ctx_pool, (void *)ctx);
qman_dqrr_consume(fq, dq);
@@ -296,7 +296,7 @@ cpt_vq_init(struct cpt_vf *cptvf, uint8_t group)
/* CPT VF device initialization */
otx_cpt_vfvq_init(cptvf);
- /* Send msg to PF to assign currnet Q to required group */
+ /* Send msg to PF to assign current Q to required group */
cptvf->vfgrp = group;
err = otx_cpt_send_vf_grp_msg(cptvf, group);
if (err) {
@@ -70,7 +70,7 @@ void
otx_cpt_handle_mbox_intr(struct cpt_vf *cptvf);
/*
- * Checks if VF is able to comminicate with PF
+ * Checks if VF is able to communicate with PF
* and also gets the CPT number this VF is associated to.
*/
int
@@ -558,7 +558,7 @@ otx_cpt_enq_single_sym(struct cpt_instance *instance,
&mdata, (void **)&prep_req);
if (unlikely(ret)) {
- CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
+ CPT_LOG_DP_ERR("prep crypto req : op %p, cpt_op 0x%x "
"ret 0x%x", op, (unsigned int)cpt_op, ret);
return NULL;
}
@@ -109,7 +109,7 @@ static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
static int qat_asym_check_nonzero(rte_crypto_param n)
{
if (n.length < 8) {
- /* Not a case for any cryptograpic function except for DH
+ /* Not a case for any cryptographic function except for DH
* generator which very often can be of one byte length
*/
size_t i;
@@ -419,7 +419,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
/* In case of AES-CCM this may point to user selected
- * memory or iv offset in cypto_op
+ * memory or iv offset in crypto_op
*/
uint8_t *aad_data = op->sym->aead.aad.data;
/* This is true AAD length, it not includes 18 bytes of
@@ -145,7 +145,7 @@ virtqueue_notify(struct virtqueue *vq)
{
/*
* Ensure updated avail->idx is visible to host.
- * For virtio on IA, the notificaiton is through io port operation
+ * For virtio on IA, the notification is through io port operation
* which is a serialization instruction itself.
*/
VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
@@ -169,7 +169,7 @@ vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc)
struct rte_ring *completed;
uint16_t i;
- desc = rte_zmalloc_socket("dma_skelteon_desc",
+ desc = rte_zmalloc_socket("dma_skeleton_desc",
nb_desc * sizeof(struct skeldma_desc),
RTE_CACHE_LINE_SIZE, hw->socket_id);
if (desc == NULL) {
@@ -140,7 +140,7 @@ _eventdev_setup(int mode)
struct rte_event_dev_info info;
int i, ret;
- /* Create and destrory pool for each test case to make it standalone */
+ /* Create and destroy pool for each test case to make it standalone */
eventdev_test_mempool = rte_pktmbuf_pool_create(
pool_name, MAX_EVENTS, 0, 0, 512, rte_socket_id());
if (!eventdev_test_mempool) {
@@ -1543,7 +1543,7 @@ cnxk_sso_selftest(const char *dev_name)
cn9k_sso_set_rsrc(dev);
if (cnxk_sso_testsuite_run(dev_name))
return rc;
- /* Verift dual ws mode. */
+ /* Verify dual ws mode. */
printf("Verifying CN9K Dual workslot mode\n");
dev->dual_ws = 1;
cn9k_sso_set_rsrc(dev);
@@ -2145,7 +2145,7 @@ dlb2_event_queue_detach_ldb(struct dlb2_eventdev *dlb2,
}
/* This is expected with eventdev API!
- * It blindly attemmpts to unmap all queues.
+ * It blindly attempts to unmap all queues.
*/
if (i == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
DLB2_LOG_DBG("dlb2: ignoring LB QID %d not mapped for qm_port %d.\n",
@@ -519,7 +519,7 @@ struct dlb2_eventdev_port {
bool setup_done;
/* enq_configured is set when the qm port is created */
bool enq_configured;
- uint8_t implicit_release; /* release events before dequeueing */
+ uint8_t implicit_release; /* release events before dequeuing */
} __rte_cache_aligned;
struct dlb2_queue {
@@ -223,7 +223,7 @@ test_stop_flush(struct test *t) /* test to check we can properly flush events */
0,
RTE_EVENT_PORT_ATTR_DEQ_DEPTH,
&dequeue_depth)) {
- printf("%d: Error retrieveing dequeue depth\n", __LINE__);
+ printf("%d: Error retrieving dequeue depth\n", __LINE__);
goto err;
}
@@ -24,7 +24,7 @@ extern "C" {
* Selects the token pop mode for a DLB2 port.
*/
enum dlb2_token_pop_mode {
- /* Pop the CQ tokens immediately after dequeueing. */
+ /* Pop the CQ tokens immediately after dequeuing. */
AUTO_POP,
/* Pop CQ tokens after (dequeue_depth - 1) events are released.
* Supported on load-balanced ports only.
@@ -118,7 +118,7 @@ _eventdev_setup(int mode)
struct rte_event_dev_info info;
const char *pool_name = "evdev_dpaa2_test_pool";
- /* Create and destrory pool for each test case to make it standalone */
+ /* Create and destroy pool for each test case to make it standalone */
eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
MAX_EVENTS,
0 /*MBUF_CACHE_SIZE*/,
@@ -24,7 +24,7 @@
/* Multiple 24-bit flow ids will map to the same DSW-level flow. The
* number of DSW flows should be high enough make it unlikely that
* flow ids of several large flows hash to the same DSW-level flow.
- * Such collisions will limit parallism and thus the number of cores
+ * Such collisions will limit parallelism and thus the number of cores
* that may be utilized. However, configuring a large number of DSW
* flows might potentially, depending on traffic and actual
* application flow id value range, result in each such DSW-level flow
@@ -104,7 +104,7 @@
/* Only one outstanding migration per port is allowed */
#define DSW_MAX_PAUSED_FLOWS (DSW_MAX_PORTS*DSW_MAX_FLOWS_PER_MIGRATION)
-/* Enough room for paus request/confirm and unpaus request/confirm for
+/* Enough room for pause request/confirm and unpaus request/confirm for
* all possible senders.
*/
#define DSW_CTL_IN_RING_SIZE ((DSW_MAX_PORTS-1)*4)
@@ -1096,7 +1096,7 @@ dsw_port_ctl_process(struct dsw_evdev *dsw, struct dsw_port *port)
static void
dsw_port_note_op(struct dsw_port *port, uint16_t num_events)
{
- /* To pull the control ring reasonbly often on busy ports,
+ /* To pull the control ring reasonably often on busy ports,
* each dequeued/enqueued event is considered an 'op' too.
*/
port->ops_since_bg_task += (num_events+1);
@@ -1180,7 +1180,7 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port,
* addition, a port cannot be left "unattended" (e.g. unused)
* for long periods of time, since that would stall
* migration. Eventdev API extensions to provide a cleaner way
- * to archieve both of these functions should be
+ * to archive both of these functions should be
* considered.
*/
if (unlikely(events_len == 0)) {
@@ -88,7 +88,7 @@
/*
* In Cavium OCTEON TX SoC, all accesses to the device registers are
- * implictly strongly ordered. So, The relaxed version of IO operation is
+ * implicitly strongly ordered. So, The relaxed version of IO operation is
* safe to use with out any IO memory barriers.
*/
#define ssovf_read64 rte_read64_relaxed
@@ -151,7 +151,7 @@ _eventdev_setup(int mode)
struct rte_event_dev_info info;
const char *pool_name = "evdev_octeontx_test_pool";
- /* Create and destrory pool for each test case to make it standalone */
+ /* Create and destroy pool for each test case to make it standalone */
eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
MAX_EVENTS,
0 /*MBUF_CACHE_SIZE*/,
@@ -139,7 +139,7 @@ _eventdev_setup(int mode)
struct rte_event_dev_info info;
int i, ret;
- /* Create and destrory pool for each test case to make it standalone */
+ /* Create and destroy pool for each test case to make it standalone */
eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name, MAX_EVENTS,
0, 0, 512,
rte_socket_id());
@@ -74,7 +74,7 @@ otx2_ssogws_dual_get_work(struct otx2_ssogws_state *ws,
event.flow_id, flags, lookup_mem);
/* Extracting tstamp, if PTP enabled. CGX will prepend
* the timestamp at starting of packet data and it can
- * be derieved from WQE 9 dword which corresponds to SG
+ * be derived from WQE 9 dword which corresponds to SG
* iova.
* rte_pktmbuf_mtod_offset can be used for this purpose
* but it brings down the performance as it reads
@@ -703,7 +703,7 @@ opdl_probe(struct rte_vdev_device *vdev)
}
PMD_DRV_LOG(INFO, "DEV_ID:[%02d] : "
- "Success - creating eventdev device %s, numa_node:[%d], do_valdation:[%s]"
+ "Success - creating eventdev device %s, numa_node:[%d], do_validation:[%s]"
" , self_test:[%s]\n",
dev->data->dev_id,
name,
@@ -864,7 +864,7 @@ qid_basic(struct test *t)
}
- /* Start the devicea */
+ /* Start the device */
if (!err) {
if (rte_event_dev_start(evdev) < 0) {
PMD_DRV_LOG(ERR, "%s:%d: Error with start call\n",
@@ -180,7 +180,7 @@ struct sw_port {
uint16_t outstanding_releases __rte_cache_aligned;
uint16_t inflight_max; /* app requested max inflights for this port */
uint16_t inflight_credits; /* num credits this port has right now */
- uint8_t implicit_release; /* release events before dequeueing */
+ uint8_t implicit_release; /* release events before dequeuing */
uint16_t last_dequeue_burst_sz; /* how big the burst was */
uint64_t last_dequeue_ticks; /* used to track burst processing time */
@@ -1109,7 +1109,7 @@ xstats_tests(struct test *t)
NULL,
0);
- /* Verify that the resetable stats are reset, and others are not */
+ /* Verify that the resettable stats are reset, and others are not */
static const uint64_t queue_expected_zero[] = {
0 /* rx */,
0 /* tx */,
@@ -258,7 +258,7 @@ dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
}
/* assigning mbuf from the acquired objects */
for (i = 0; (i < ret) && bufs[i].addr; i++) {
- /* TODO-errata - objerved that bufs may be null
+ /* TODO-errata - observed that bufs may be null
* i.e. first buffer is valid, remaining 6 buffers
* may be null.
*/
@@ -669,7 +669,7 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
break;
}
- /* Imsert it into an ordered linked list */
+ /* Insert it into an ordered linked list */
for (curr = &head; curr[0] != NULL; curr = curr[0]) {
if ((uintptr_t)node <= (uintptr_t)curr[0])
break;
@@ -705,7 +705,7 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
ret = octeontx_fpapf_aura_detach(gpool);
if (ret) {
- fpavf_log_err("Failed to dettach gaura %u. error code=%d\n",
+ fpavf_log_err("Failed to detach gaura %u. error code=%d\n",
gpool, ret);
}
@@ -67,7 +67,7 @@
typedef void (*rx_user_meta_hook_fn)(struct rte_mbuf *mbuf,
const uint32_t *meta,
void *ext_user_data);
-/* TX hook poplulate *meta, with up to 20 bytes. meta_cnt
+/* TX hook populate *meta, with up to 20 bytes. meta_cnt
* returns the number of uint32_t words populated, 0 to 5
*/
typedef void (*tx_user_meta_hook_fn)(const struct rte_mbuf *mbuf,
@@ -1423,7 +1423,7 @@ atl_dev_interrupt_action(struct rte_eth_dev *dev,
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
@@ -1094,7 +1094,7 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
* register.
* Update the RDT with the value of the last processed RX descriptor
* minus 1, to guarantee that the RDT register is never equal to the
- * RDH register, which creates a "full" ring situtation from the
+ * RDH register, which creates a "full" ring situation from the
* hardware point of view...
*/
nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
@@ -281,7 +281,7 @@ int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
- /* VLAN proimisc bu defauld */
+ /* VLAN promisc by default */
hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
/* Rx Interrupts */
@@ -1046,7 +1046,7 @@ static int axgbe_config_rx_threshold(struct axgbe_port *pdata,
return 0;
}
-/*Distrubting fifo size */
+/*Distributing fifo size */
static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata)
{
unsigned int fifo_size;
@@ -284,7 +284,7 @@ static int axgbe_phy_reset(struct axgbe_port *pdata)
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
@@ -641,7 +641,7 @@ struct axgbe_port {
unsigned int kr_redrv;
- /* Auto-negotiation atate machine support */
+ /* Auto-negotiation state machine support */
unsigned int an_int;
unsigned int an_status;
enum axgbe_an an_result;
@@ -347,7 +347,7 @@ static int axgbe_phy_i2c_read(struct axgbe_port *pdata, unsigned int target,
retry = 1;
again2:
- /* Read the specfied register */
+ /* Read the specified register */
i2c_op.cmd = AXGBE_I2C_CMD_READ;
i2c_op.target = target;
i2c_op.len = val_len;
@@ -1093,7 +1093,7 @@ static int axgbe_phy_an_config(struct axgbe_port *pdata __rte_unused)
{
return 0;
/* Dummy API since there is no case to support
- * external phy devices registred through kerenl apis
+ * external phy devices registred through kernel apis
*/
}
@@ -11,7 +11,7 @@
#include <rte_mempool.h>
#include <rte_mbuf.h>
-/* Useful to avoid shifting for every descriptor prepration*/
+/* Useful to avoid shifting for every descriptor preparation*/
#define TX_DESC_CTRL_FLAGS 0xb000000000000000
#define TX_DESC_CTRL_FLAG_TMST 0x40000000
#define TX_FREE_BULK 8
@@ -926,7 +926,7 @@ storm_memset_eq_prod(struct bnx2x_softc *sc, uint16_t eq_prod, uint16_t pfid)
* block.
*
* RAMROD_CMD_ID_ETH_UPDATE
- * Used to update the state of the leading connection, usually to udpate
+ * Used to update the state of the leading connection, usually to update
* the RSS indirection table. Completes on the RCQ of the leading
* connection. (Not currently used under FreeBSD until OS support becomes
* available.)
@@ -941,7 +941,7 @@ storm_memset_eq_prod(struct bnx2x_softc *sc, uint16_t eq_prod, uint16_t pfid)
* the RCQ of the leading connection.
*
* RAMROD_CMD_ID_ETH_CFC_DEL
- * Used when tearing down a conneciton prior to driver unload. Completes
+ * Used when tearing down a connection prior to driver unload. Completes
* on the RCQ of the leading connection (since the current connection
* has been completely removed from controller memory).
*
@@ -1072,7 +1072,7 @@ bnx2x_sp_post(struct bnx2x_softc *sc, int command, int cid, uint32_t data_hi,
/*
* It's ok if the actual decrement is issued towards the memory
- * somewhere between the lock and unlock. Thus no more explict
+ * somewhere between the lock and unlock. Thus no more explicit
* memory barrier is needed.
*/
if (common) {
@@ -1190,7 +1190,7 @@ bnx2x_sp_event(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
break;
case (RAMROD_CMD_ID_ETH_TERMINATE):
- PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] teminate ramrod", cid);
+ PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] terminate ramrod", cid);
drv_cmd = ECORE_Q_CMD_TERMINATE;
break;
@@ -1476,7 +1476,7 @@ bnx2x_fill_accept_flags(struct bnx2x_softc *sc, uint32_t rx_mode,
case BNX2X_RX_MODE_ALLMULTI_PROMISC:
case BNX2X_RX_MODE_PROMISC:
/*
- * According to deffinition of SI mode, iface in promisc mode
+ * According to definition of SI mode, iface in promisc mode
* should receive matched and unmatched (in resolution of port)
* unicast packets.
*/
@@ -1944,7 +1944,7 @@ static void bnx2x_disable_close_the_gate(struct bnx2x_softc *sc)
/*
* Cleans the object that have internal lists without sending
- * ramrods. Should be run when interrutps are disabled.
+ * ramrods. Should be run when interrupts are disabled.
*/
static void bnx2x_squeeze_objects(struct bnx2x_softc *sc)
{
@@ -2043,7 +2043,7 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
/*
* Nothing to do during unload if previous bnx2x_nic_load()
- * did not completed successfully - all resourses are released.
+ * did not completed successfully - all resources are released.
*/
if ((sc->state == BNX2X_STATE_CLOSED) || (sc->state == BNX2X_STATE_ERROR)) {
return 0;
@@ -2084,7 +2084,7 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
/*
* Prevent transactions to host from the functions on the
* engine that doesn't reset global blocks in case of global
- * attention once gloabl blocks are reset and gates are opened
+ * attention once global blocks are reset and gates are opened
* (the engine which leader will perform the recovery
* last).
*/
@@ -2101,7 +2101,7 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
/*
* At this stage no more interrupts will arrive so we may safely clean
- * the queue'able objects here in case they failed to get cleaned so far.
+ * the queueable objects here in case they failed to get cleaned so far.
*/
if (IS_PF(sc)) {
bnx2x_squeeze_objects(sc);
@@ -2151,7 +2151,7 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
}
/*
- * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
+ * Encapsulate an mbuf cluster into the tx bd chain and makes the memory
* visible to the controller.
*
* If an mbuf is submitted to this routine and cannot be given to the
@@ -2719,7 +2719,7 @@ static uint8_t bnx2x_clear_pf_load(struct bnx2x_softc *sc)
return val1 != 0;
}
-/* send load requrest to mcp and analyze response */
+/* send load request to mcp and analyze response */
static int bnx2x_nic_load_request(struct bnx2x_softc *sc, uint32_t * load_code)
{
PMD_INIT_FUNC_TRACE(sc);
@@ -5325,7 +5325,7 @@ static void bnx2x_func_init(struct bnx2x_softc *sc, struct bnx2x_func_init_param
* sum of vn_min_rates.
* or
* 0 - if all the min_rates are 0.
- * In the later case fainess algorithm should be deactivated.
+ * In the later case fairness algorithm should be deactivated.
* If all min rates are not zero then those that are zeroes will be set to 1.
*/
static void bnx2x_calc_vn_min(struct bnx2x_softc *sc, struct cmng_init_input *input)
@@ -6564,7 +6564,7 @@ bnx2x_pf_tx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
txq_init->fw_sb_id = fp->fw_sb_id;
/*
- * set the TSS leading client id for TX classfication to the
+ * set the TSS leading client id for TX classification to the
* leading RSS client id
*/
txq_init->tss_leading_cl_id = BNX2X_FP(sc, 0, cl_id);
@@ -7634,8 +7634,8 @@ static uint8_t bnx2x_is_pcie_pending(struct bnx2x_softc *sc)
}
/*
-* Walk the PCI capabiites list for the device to find what features are
-* supported. These capabilites may be enabled/disabled by firmware so it's
+* Walk the PCI capabilities list for the device to find what features are
+* supported. These capabilities may be enabled/disabled by firmware so it's
* best to walk the list rather than make assumptions.
*/
static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc)
@@ -8425,7 +8425,7 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
} else {
sc->devinfo.int_block = INT_BLOCK_IGU;
-/* do not allow device reset during IGU info preocessing */
+/* do not allow device reset during IGU info processing */
bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
@@ -9765,7 +9765,7 @@ int bnx2x_attach(struct bnx2x_softc *sc)
sc->igu_base_addr = IS_VF(sc) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
- /* get PCI capabilites */
+ /* get PCI capabilities */
bnx2x_probe_pci_caps(sc);
if (sc->devinfo.pcie_msix_cap_reg != 0) {
@@ -10284,7 +10284,7 @@ static int bnx2x_init_hw_common(struct bnx2x_softc *sc)
* stay set)
* f. If this is VNIC 3 of a port then also init
* first_timers_ilt_entry to zero and last_timers_ilt_entry
- * to the last enrty in the ILT.
+ * to the last entry in the ILT.
*
* Notes:
* Currently the PF error in the PGLC is non recoverable.
@@ -11090,7 +11090,7 @@ static void bnx2x_hw_enable_status(struct bnx2x_softc *sc)
/**
* bnx2x_pf_flr_clnup
* a. re-enable target read on the PF
- * b. poll cfc per function usgae counter
+ * b. poll cfc per function usage counter
* c. poll the qm perfunction usage counter
* d. poll the tm per function usage counter
* e. poll the tm per function scan-done indication
@@ -681,13 +681,13 @@ struct bnx2x_slowpath {
}; /* struct bnx2x_slowpath */
/*
- * Port specifc data structure.
+ * Port specific data structure.
*/
struct bnx2x_port {
/*
* Port Management Function (for 57711E only).
* When this field is set the driver instance is
- * responsible for managing port specifc
+ * responsible for managing port specific
* configurations such as handling link attentions.
*/
uint32_t pmf;
@@ -732,7 +732,7 @@ struct bnx2x_port {
/*
* MCP scratchpad address for port specific statistics.
- * The device is responsible for writing statistcss
+ * The device is responsible for writing statisticss
* back to the MCP for use with management firmware such
* as UMP/NC-SI.
*/
@@ -937,8 +937,8 @@ struct bnx2x_devinfo {
* already registered for this port (which means that the user wants storage
* services).
* 2. During cnic-related load, to know if offload mode is already configured
- * in the HW or needs to be configrued. Since the transition from nic-mode to
- * offload-mode in HW causes traffic coruption, nic-mode is configured only
+ * in the HW or needs to be configured. Since the transition from nic-mode to
+ * offload-mode in HW causes traffic corruption, nic-mode is configured only
* in ports on which storage services where never requested.
*/
#define CONFIGURE_NIC_MODE(sc) (!CHIP_IS_E1x(sc) && !CNIC_ENABLED(sc))
@@ -1358,7 +1358,7 @@ bnx2x_prep_fw_stats_req(struct bnx2x_softc *sc)
/*
* Prepare the first stats ramrod (will be completed with
- * the counters equal to zero) - init counters to somethig different.
+ * the counters equal to zero) - init counters to something different.
*/
memset(&sc->fw_stats_data->storm_counters, 0xff,
sizeof(struct stats_counter));
@@ -314,7 +314,7 @@ struct bnx2x_eth_stats_old {
};
struct bnx2x_eth_q_stats_old {
- /* Fields to perserve over fw reset*/
+ /* Fields to preserve over fw reset*/
uint32_t total_unicast_bytes_received_hi;
uint32_t total_unicast_bytes_received_lo;
uint32_t total_broadcast_bytes_received_hi;
@@ -328,7 +328,7 @@ struct bnx2x_eth_q_stats_old {
uint32_t total_multicast_bytes_transmitted_hi;
uint32_t total_multicast_bytes_transmitted_lo;
- /* Fields to perserve last of */
+ /* Fields to preserve last of */
uint32_t total_bytes_received_hi;
uint32_t total_bytes_received_lo;
uint32_t total_bytes_transmitted_hi;
@@ -73,7 +73,7 @@ bnx2x_add_tlv(__rte_unused struct bnx2x_softc *sc, void *tlvs_list,
tl->length = length;
}
-/* Initiliaze header of the first tlv and clear mailbox*/
+/* Initialize header of the first tlv and clear mailbox*/
static void
bnx2x_vf_prep(struct bnx2x_softc *sc, struct vf_first_tlv *first_tlv,
uint16_t type, uint16_t length)
@@ -241,7 +241,7 @@ struct vf_close_tlv {
uint8_t pad[2];
};
-/* rlease the VF's acquired resources */
+/* release the VF's acquired resources */
struct vf_release_tlv {
struct vf_first_tlv first_tlv;
uint16_t vf_id; /* for debug */
@@ -379,7 +379,7 @@
/* temporarily used for RTT */
#define XSEMI_CLK1_RESUL_CHIP (1e-3)
-/* used for Host Coallescing */
+/* used for Host Coalescing */
#define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))
#define TSDM_TIMER_TICK_RESUL_CHIP (1 * (1e-6))
@@ -1062,7 +1062,7 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
#define PORT_FEATURE_MBA_LINK_SPEED_20G 0x20000000
/* Secondary MBA configuration,
- * see mba_config for the fileds defination.
+ * see mba_config for the fields definition.
*/
uint32_t mba_config2;
@@ -1075,7 +1075,7 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
#define PORT_FEATURE_BOFM_CFGD_VEN 0x00080000
/* Secondary MBA configuration,
- * see mba_vlan_cfg for the fileds defination.
+ * see mba_vlan_cfg for the fields definition.
*/
uint32_t mba_vlan_cfg2;
@@ -1429,7 +1429,7 @@ struct extended_dev_info_shared_cfg { /* NVRAM OFFSET */
#define EXTENDED_DEV_INFO_SHARED_CFG_DBG_GEN3_COMPLI_ENA 0x00080000
/* Override Rx signal detect threshold when enabled the threshold
- * will be set staticaly
+ * will be set statically
*/
#define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_RX_SIG_MASK 0x00100000
#define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_RX_SIG_SHIFT 20
@@ -2189,9 +2189,9 @@ struct eee_remote_vals {
* elements on a per byte or word boundary.
*
* example: an array with 8 entries each 4 bit wide. This array will fit into
- * a single dword. The diagrmas below show the array order of the nibbles.
+ * a single dword. The diagrams below show the array order of the nibbles.
*
- * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering:
+ * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the standard ordering:
*
* | | | |
* 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
@@ -2519,17 +2519,17 @@ struct shmem_lfa {
};
/*
- * Used to suppoert NSCI get OS driver version
+ * Used to support NSCI get OS driver version
* On driver load the version value will be set
* On driver unload driver value of 0x0 will be set
*/
struct os_drv_ver {
#define DRV_VER_NOT_LOADED 0
- /*personalites orrder is importent */
+ /*personalities order is important */
#define DRV_PERS_ETHERNET 0
#define DRV_PERS_ISCSI 1
#define DRV_PERS_FCOE 2
- /*shmem2 struct is constatnt can't add more personalites here*/
+ /*shmem2 struct is constant can't add more personalities here*/
#define MAX_DRV_PERS 3
uint32_t versions[MAX_DRV_PERS];
};
@@ -2821,7 +2821,7 @@ struct shmem2_region {
/* Flag to the driver that PF's drv_info_host_addr buffer was read */
uint32_t mfw_drv_indication; /* Offset 0x19c */
- /* We use inidcation for each PF (0..3) */
+ /* We use indication for each PF (0..3) */
#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_))
union { /* For various OEMs */ /* Offset 0x1a0 */
@@ -6195,7 +6195,7 @@ struct hc_sb_data {
/*
- * Segment types for host coaslescing
+ * Segment types for host coalescing
*/
enum hc_segment {
HC_REGULAR_SEGMENT,
@@ -6242,7 +6242,7 @@ struct hc_status_block_data_e2 {
/*
- * IGU block operartion modes (in Everest2)
+ * IGU block operation modes (in Everest2)
*/
enum igu_mode {
HC_IGU_BC_MODE,
@@ -6508,7 +6508,7 @@ struct stats_query_header {
/*
- * Types of statistcis query entry
+ * Types of statistics query entry
*/
enum stats_query_type {
STATS_TYPE_QUEUE,
@@ -6542,7 +6542,7 @@ enum storm_id {
/*
- * Taffic types used in ETS and flow control algorithms
+ * Traffic types used in ETS and flow control algorithms
*/
enum traffic_type {
LLFC_TRAFFIC_TYPE_NW,
@@ -534,7 +534,7 @@ static void ecore_init_pxp_arb(struct bnx2x_softc *sc, int r_order,
REG_WR(sc, PXP2_REG_WR_CDU_MPS, val);
}
- /* Validate number of tags suppoted by device */
+ /* Validate number of tags supported by device */
#define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980
val = REG_RD(sc, PCIE_REG_PCIER_TL_HDR_FC_ST);
val &= 0xFF;
@@ -714,7 +714,7 @@ static void ecore_ilt_client_init_op_ilt(struct bnx2x_softc *sc,
for (i = ilt_cli->start; i <= ilt_cli->end; i++)
ecore_ilt_line_init_op(sc, ilt, i, initop);
- /* init/clear the ILT boundries */
+ /* init/clear the ILT boundaries */
ecore_ilt_boundary_init_op(sc, ilt_cli, ilt->start_line, initop);
}
@@ -765,7 +765,7 @@ static void ecore_ilt_init_client_psz(struct bnx2x_softc *sc, int cli_num,
/*
* called during init common stage, ilt clients should be initialized
- * prioir to calling this function
+ * prior to calling this function
*/
static void ecore_ilt_init_page_size(struct bnx2x_softc *sc, uint8_t initop)
{
@@ -19,7 +19,7 @@
#define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT (0x1 << 3)
#define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR (0x1 << 4)
#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND (0x1 << 1)
-/* [R 1] ATC initalization done */
+/* [R 1] ATC initialization done */
#define ATC_REG_ATC_INIT_DONE 0x1100bc
/* [RW 6] Interrupt mask register #0 read/write */
#define ATC_REG_ATC_INT_MASK 0x1101c8
@@ -56,7 +56,7 @@
#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078
/* [RW 10] Write client 0: Assert pause threshold. Not Functional */
#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068
-/* [R 24] The number of full blocks occpied by port. */
+/* [R 24] The number of full blocks occupied by port. */
#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094
/* [R 5] Used to read the value of the XX protection CAM occupancy counter. */
#define CCM_REG_CAM_OCCUP 0xd0188
@@ -456,7 +456,7 @@
#define IGU_REG_PCI_PF_MSIX_FUNC_MASK 0x130148
#define IGU_REG_PCI_PF_MSI_EN 0x130140
/* [WB_R 32] Each bit represent the pending bits status for that SB. 0 = no
- * pending; 1 = pending. Pendings means interrupt was asserted; and write
+ * pending; 1 = pending. Pending means interrupt was asserted; and write
* done was not received. Data valid only in addresses 0-4. all the rest are
* zero.
*/
@@ -1059,14 +1059,14 @@
/* [R 28] this field hold the last information that caused reserved
* attention. bits [19:0] - address; [22:20] function; [23] reserved;
* [27:24] the master that caused the attention - according to the following
- * encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
+ * encoding:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
* dbu; 8 = dmae
*/
#define MISC_REG_GRC_RSV_ATTN 0xa3c0
/* [R 28] this field hold the last information that caused timeout
* attention. bits [19:0] - address; [22:20] function; [23] reserved;
* [27:24] the master that caused the attention - according to the following
- * encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
+ * encoding:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
* dbu; 8 = dmae
*/
#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4
@@ -1567,7 +1567,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P0_LLH_PTP_PARAM_MASK 0x187a0
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -1672,7 +1672,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P0_TLLH_PTP_PARAM_MASK 0x187f0
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -1839,7 +1839,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P1_LLH_PTP_PARAM_MASK 0x187c8
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -1926,7 +1926,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P1_TLLH_PTP_PARAM_MASK 0x187f8
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -2306,7 +2306,7 @@
#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8
/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
#define PBF_REG_HDRS_AFTER_TAG_0 0x15c0b8
-/* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest
+/* [R 1] Removed for E3 B0 - Indicates which COS is connected to the highest
* priority in the command arbiter.
*/
#define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c
@@ -2366,7 +2366,7 @@
*/
#define PBF_REG_NUM_STRICT_ARB_SLOTS 0x15c064
/* [R 11] Removed for E3 B0 - Port 0 threshold used by arbiter in 16 byte
- * lines used when pause not suppoterd.
+ * lines used when pause not supported.
*/
#define PBF_REG_P0_ARB_THRSH 0x1400e4
/* [R 11] Removed for E3 B0 - Current credit for port 0 in the tx port
@@ -3503,7 +3503,7 @@
* queues.
*/
#define QM_REG_OVFERROR 0x16805c
-/* [RC 6] the Q were the qverflow occurs */
+/* [RC 6] the Q were the overflow occurs */
#define QM_REG_OVFQNUM 0x168058
/* [R 16] Pause state for physical queues 15-0 */
#define QM_REG_PAUSESTATE0 0x168410
@@ -4890,7 +4890,7 @@
if set, generate pcie_err_attn output when this error is seen. WC \
*/
#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2 \
- (1 << 3) /* Receive UR Statusfor Function 2. If set, generate \
+ (1 << 3) /* Receive UR Status for Function 2. If set, generate \
pcie_err_attn output when this error is seen. WC */
#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2 \
(1 << 2) /* Completer Timeout Status Status for Function 2, if \
@@ -4986,7 +4986,7 @@
if set, generate pcie_err_attn output when this error is seen. WC \
*/
#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5 \
- (1 << 3) /* Receive UR Statusfor Function 5. If set, generate \
+ (1 << 3) /* Receive UR Status for Function 5. If set, generate \
pcie_err_attn output when this error is seen. WC */
#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5 \
(1 << 2) /* Completer Timeout Status Status for Function 5, if \
@@ -1338,7 +1338,7 @@ static int __ecore_vlan_mac_execute_step(struct bnx2x_softc *sc,
if (rc != ECORE_SUCCESS) {
__ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
- /** Calling function should not diffrentiate between this case
+ /** Calling function should not differentiate between this case
* and the case in which there is already a pending ramrod
*/
rc = ECORE_PENDING;
@@ -2246,7 +2246,7 @@ struct ecore_pending_mcast_cmd {
union {
ecore_list_t macs_head;
uint32_t macs_num; /* Needed for DEL command */
- int next_bin; /* Needed for RESTORE flow with aprox match */
+ int next_bin; /* Needed for RESTORE flow with approx match */
} data;
int done; /* set to TRUE, when the command has been handled,
@@ -3424,7 +3424,7 @@ void ecore_init_mac_credit_pool(struct bnx2x_softc *sc,
} else {
/*
- * CAM credit is equaly divided between all active functions
+ * CAM credit is equally divided between all active functions
* on the PATH.
*/
if (func_num > 0) {
@@ -430,7 +430,7 @@ enum {
RAMROD_RESTORE,
/* Execute the next command now */
RAMROD_EXEC,
- /* Don't add a new command and continue execution of posponed
+ /* Don't add a new command and continue execution of postponed
* commands. If not set a new command will be added to the
* pending commands list.
*/
@@ -1173,7 +1173,7 @@ struct ecore_rss_config_obj {
/* Last configured indirection table */
uint8_t ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
- /* flags for enabling 4-tupple hash on UDP */
+ /* flags for enabling 4-tuple hash on UDP */
uint8_t udp_rss_v4;
uint8_t udp_rss_v6;
@@ -1285,7 +1285,7 @@ enum ecore_q_type {
#define ECORE_MULTI_TX_COS_E3B0 3
#define ECORE_MULTI_TX_COS 3 /* Maximum possible */
#define MAC_PAD (ECORE_ALIGN(ETH_ALEN, sizeof(uint32_t)) - ETH_ALEN)
-/* DMAE channel to be used by FW for timesync workaroun. A driver that sends
+/* DMAE channel to be used by FW for timesync workaround. A driver that sends
* timesync-related ramrods must not use this DMAE command ID.
*/
#define FW_DMAE_CMD_ID 6
@@ -1460,7 +1460,7 @@ static void elink_ets_e3b0_pbf_disabled(const struct elink_params *params)
}
/******************************************************************************
* Description:
- * E3B0 disable will return basicly the values to init values.
+ * E3B0 disable will return basically the values to init values.
*.
******************************************************************************/
static elink_status_t elink_ets_e3b0_disabled(const struct elink_params *params,
@@ -1483,7 +1483,7 @@ static elink_status_t elink_ets_e3b0_disabled(const struct elink_params *params,
/******************************************************************************
* Description:
- * Disable will return basicly the values to init values.
+ * Disable will return basically the values to init values.
*
******************************************************************************/
elink_status_t elink_ets_disabled(struct elink_params *params,
@@ -1506,7 +1506,7 @@ elink_status_t elink_ets_disabled(struct elink_params *params,
/******************************************************************************
* Description
- * Set the COS mappimg to SP and BW until this point all the COS are not
+ * Set the COS mapping to SP and BW until this point all the COS are not
* set as SP or BW.
******************************************************************************/
static elink_status_t elink_ets_e3b0_cli_map(const struct elink_params *params,
@@ -1652,7 +1652,7 @@ static elink_status_t elink_ets_e3b0_get_total_bw(
}
ELINK_DEBUG_P0(sc,
"elink_ets_E3B0_config total BW should be 100");
- /* We can handle a case whre the BW isn't 100 this can happen
+ /* We can handle a case where the BW isn't 100 this can happen
* if the TC are joined.
*/
}
@@ -2608,7 +2608,7 @@ static elink_status_t elink_emac_enable(struct elink_params *params,
REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + port * 4, 1);
#ifdef ELINK_INCLUDE_EMUL
- /* for paladium */
+ /* for palladium */
if (CHIP_REV_IS_EMUL(sc)) {
/* Use lane 1 (of lanes 0-3) */
REG_WR(sc, NIG_REG_XGXS_LANE_SEL_P0 + port * 4, 1);
@@ -2850,7 +2850,7 @@ static void elink_update_pfc_bmac2(struct elink_params *params,
/* Set Time (based unit is 512 bit time) between automatic
* re-sending of PP packets amd enable automatic re-send of
- * Per-Priroity Packet as long as pp_gen is asserted and
+ * Per-Priority Packet as long as pp_gen is asserted and
* pp_disable is low.
*/
val = 0x8000;
@@ -3369,7 +3369,7 @@ static elink_status_t elink_pbf_update(struct elink_params *params,
}
/**
- * elink_get_emac_base - retrive emac base address
+ * elink_get_emac_base - retrieve emac base address
*
* @bp: driver handle
* @mdc_mdio_access: access type
@@ -4518,7 +4518,7 @@ static void elink_warpcore_enable_AN_KR2(struct elink_phy *phy,
elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
- /* Start KR2 work-around timer which handles BNX2X8073 link-parner */
+ /* Start KR2 work-around timer which handles BNX2X8073 link-partner */
params->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
elink_update_link_attr(params, params->link_attr_sync);
}
@@ -7824,7 +7824,7 @@ elink_status_t elink_link_update(struct elink_params *params,
* hence its link is expected to be down
* - SECOND_PHY means that first phy should not be able
* to link up by itself (using configuration)
- * - DEFAULT should be overridden during initialiazation
+ * - DEFAULT should be overridden during initialization
*/
ELINK_DEBUG_P1(sc, "Invalid link indication"
" mpc=0x%x. DISABLING LINK !!!",
@@ -10991,7 +10991,7 @@ static elink_status_t elink_84858_cmd_hdlr(struct elink_phy *phy,
ELINK_DEBUG_P0(sc, "FW cmd failed.");
return ELINK_STATUS_ERROR;
}
- /* Step5: Once the command has completed, read the specficied DATA
+ /* Step5: Once the command has completed, read the specified DATA
* registers for any saved results for the command, if applicable
*/
@@ -3727,7 +3727,7 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
int rc;
if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
+ PMD_DRV_LOG(ERR, "Attempt to allocate VFs on a VF!\n");
return -EINVAL;
}
@@ -52,7 +52,7 @@ tfp_send_msg_direct(struct bnxt *bp,
}
/**
- * Allocates zero'ed memory from the heap.
+ * Allocates zeroed memory from the heap.
*
* Returns success or failure code.
*/
@@ -150,7 +150,7 @@ tfp_msg_hwrm_oem_cmd(struct tf *tfp,
uint32_t max_flows);
/**
- * Allocates zero'ed memory from the heap.
+ * Allocates zeroed memory from the heap.
*
* NOTE: Also performs virt2phy address conversion by default thus is
* can be expensive to invoke.
@@ -20,7 +20,7 @@
/** Maximum number of LACP packets from one slave queued in TX ring. */
#define BOND_MODE_8023AX_SLAVE_TX_PKTS 1
/**
- * Timeouts deffinitions (5.4.4 in 802.1AX documentation).
+ * Timeouts definitions (5.4.4 in 802.1AX documentation).
*/
#define BOND_8023AD_FAST_PERIODIC_MS 900
#define BOND_8023AD_SLOW_PERIODIC_MS 29000
@@ -139,7 +139,7 @@ struct bond_dev_private {
uint16_t slave_count; /**< Number of bonded slaves */
struct bond_slave_details slaves[RTE_MAX_ETHPORTS];
- /**< Arary of bonded slaves details */
+ /**< Array of bonded slaves details */
struct mode8023ad_private mode4;
uint16_t tlb_slaves_order[RTE_MAX_ETHPORTS];
@@ -243,7 +243,7 @@ record_default(struct port *port)
{
/* Record default parameters for partner. Partner admin parameters
* are not implemented so set them to arbitrary default (last known) and
- * mark actor that parner is in defaulted state. */
+ * mark actor that partner is in defaulted state. */
port->partner_state = STATE_LACP_ACTIVE;
ACTOR_STATE_SET(port, DEFAULTED);
}
@@ -300,7 +300,7 @@ rx_machine(struct bond_dev_private *internals, uint16_t slave_id,
MODE4_DEBUG("LACP -> CURRENT\n");
BOND_PRINT_LACP(lacp);
/* Update selected flag. If partner parameters are defaulted assume they
- * are match. If not defaulted compare LACP actor with ports parner
+ * are match. If not defaulted compare LACP actor with ports partner
* params. */
if (!ACTOR_STATE(port, DEFAULTED) &&
(ACTOR_STATE(port, AGGREGATION) != PARTNER_STATE(port, AGGREGATION)
@@ -399,16 +399,16 @@ periodic_machine(struct bond_dev_private *internals, uint16_t slave_id)
PARTNER_STATE(port, LACP_ACTIVE);
uint8_t is_partner_fast, was_partner_fast;
- /* No periodic is on BEGIN, LACP DISABLE or when both sides are pasive */
+ /* No periodic is on BEGIN, LACP DISABLE or when both sides are passive */
if (SM_FLAG(port, BEGIN) || !SM_FLAG(port, LACP_ENABLED) || !active) {
timer_cancel(&port->periodic_timer);
timer_force_expired(&port->tx_machine_timer);
SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);
MODE4_DEBUG("-> NO_PERIODIC ( %s%s%s)\n",
- SM_FLAG(port, BEGIN) ? "begind " : "",
+ SM_FLAG(port, BEGIN) ? "begin " : "",
SM_FLAG(port, LACP_ENABLED) ? "" : "LACP disabled ",
- active ? "LACP active " : "LACP pasive ");
+ active ? "LACP active " : "LACP passive ");
return;
}
@@ -495,10 +495,10 @@ mux_machine(struct bond_dev_private *internals, uint16_t slave_id)
if ((ACTOR_STATE(port, DISTRIBUTING) || ACTOR_STATE(port, COLLECTING)) &&
!PARTNER_STATE(port, SYNCHRONIZATION)) {
/* If in COLLECTING or DISTRIBUTING state and partner becomes out of
- * sync transit to ATACHED state. */
+ * sync transit to ATTACHED state. */
ACTOR_STATE_CLR(port, DISTRIBUTING);
ACTOR_STATE_CLR(port, COLLECTING);
- /* Clear actor sync to activate transit ATACHED in condition bellow */
+ /* Clear actor sync to activate transit ATTACHED in condition bellow */
ACTOR_STATE_CLR(port, SYNCHRONIZATION);
MODE4_DEBUG("Out of sync -> ATTACHED\n");
}
@@ -696,7 +696,7 @@ selection_logic(struct bond_dev_private *internals, uint16_t slave_id)
/* Search for aggregator suitable for this port */
for (i = 0; i < slaves_count; ++i) {
agg = &bond_mode_8023ad_ports[slaves[i]];
- /* Skip ports that are not aggreagators */
+ /* Skip ports that are not aggregators */
if (agg->aggregator_port_id != slaves[i])
continue;
@@ -921,7 +921,7 @@ bond_mode_8023ad_periodic_cb(void *arg)
SM_FLAG_SET(port, BEGIN);
- /* LACP is disabled on half duples or link is down */
+ /* LACP is disabled on half duplex or link is down */
if (SM_FLAG(port, LACP_ENABLED)) {
/* If port was enabled set it to BEGIN state */
SM_FLAG_CLR(port, LACP_ENABLED);
@@ -1069,7 +1069,7 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev,
port->partner_state = STATE_LACP_ACTIVE | STATE_AGGREGATION;
port->sm_flags = SM_FLAGS_BEGIN;
- /* use this port as agregator */
+ /* use this port as aggregator */
port->aggregator_port_id = slave_id;
if (bond_mode_8023ad_register_lacp_mac(slave_id) < 0) {
@@ -68,7 +68,7 @@ struct port_params {
struct rte_ether_addr system;
/**< System ID - Slave MAC address, same as bonding MAC address */
uint16_t key;
- /**< Speed information (implementation dependednt) and duplex. */
+ /**< Speed information (implementation dependent) and duplex. */
uint16_t port_priority;
/**< Priority of this (unused in current implementation) */
uint16_t port_number;
@@ -317,7 +317,7 @@ rte_eth_bond_8023ad_dedicated_queues_disable(uint16_t port_id);
* @param port_id Bonding device id
*
* @return
- * agregator mode on success, negative value otherwise
+ * aggregator mode on success, negative value otherwise
*/
int
rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id);
@@ -96,7 +96,7 @@ bond_mode_alb_arp_xmit(struct rte_ether_hdr *eth_h, uint16_t offset,
* @param internals Bonding data.
*
* @return
- * Index of slawe on which packet should be sent.
+ * Index of slave on which packet should be sent.
*/
uint16_t
bond_mode_alb_arp_upd(struct client_data *client_info,
@@ -375,7 +375,7 @@ eth_bond_slave_inherit_dev_info_rx_next(struct bond_dev_private *internals,
* value. Thus, the new internal value of default Rx queue offloads
* has to be masked by rx_queue_offload_capa to make sure that only
* commonly supported offloads are preserved from both the previous
- * value and the value being inhereted from the new slave device.
+ * value and the value being inherited from the new slave device.
*/
rxconf_i->offloads = (rxconf_i->offloads | rxconf->offloads) &
internals->rx_queue_offload_capa;
@@ -413,7 +413,7 @@ eth_bond_slave_inherit_dev_info_tx_next(struct bond_dev_private *internals,
* value. Thus, the new internal value of default Tx queue offloads
* has to be masked by tx_queue_offload_capa to make sure that only
* commonly supported offloads are preserved from both the previous
- * value and the value being inhereted from the new slave device.
+ * value and the value being inherited from the new slave device.
*/
txconf_i->offloads = (txconf_i->offloads | txconf->offloads) &
internals->tx_queue_offload_capa;
@@ -53,7 +53,7 @@ struct cn10k_outb_priv_data {
void *userdata;
/* Rlen computation data */
struct cnxk_ipsec_outb_rlens rlens;
- /* Back pinter to eth sec session */
+ /* Back pointer to eth sec session */
struct cnxk_eth_sec_sess *eth_sec;
/* SA index */
uint32_t sa_idx;
@@ -736,7 +736,7 @@ cn10k_nix_xmit_prepare_tstamp(uintptr_t lmt_addr, const uint64_t *cmd,
/* Retrieving the default desc values */
lmt[off] = cmd[2];
- /* Using compiler barier to avoid voilation of C
+ /* Using compiler barrier to avoid violation of C
* aliasing rules.
*/
rte_compiler_barrier();
@@ -745,7 +745,7 @@ cn10k_nix_xmit_prepare_tstamp(uintptr_t lmt_addr, const uint64_t *cmd,
/* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, tx tstamp
* should not be recorded, hence changing the alg type to
* NIX_SENDMEMALG_SET and also changing send mem addr field to
- * next 8 bytes as it corrpt the actual tx tstamp registered
+ * next 8 bytes as it corrupts the actual tx tstamp registered
* address.
*/
send_mem->w0.subdc = NIX_SUBDC_MEM;
@@ -2254,7 +2254,7 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
}
if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
- /* Tx ol_flag for timestam. */
+ /* Tx ol_flag for timestamp. */
const uint64x2_t olf = {RTE_MBUF_F_TX_IEEE1588_TMST,
RTE_MBUF_F_TX_IEEE1588_TMST};
/* Set send mem alg to SUB. */
@@ -304,7 +304,7 @@ cn9k_nix_xmit_prepare_tstamp(uint64_t *cmd, const uint64_t *send_mem_desc,
/* Retrieving the default desc values */
cmd[off] = send_mem_desc[6];
- /* Using compiler barier to avoid voilation of C
+ /* Using compiler barrier to avoid violation of C
* aliasing rules.
*/
rte_compiler_barrier();
@@ -313,7 +313,7 @@ cn9k_nix_xmit_prepare_tstamp(uint64_t *cmd, const uint64_t *send_mem_desc,
/* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, tx tstamp
* should not be recorded, hence changing the alg type to
* NIX_SENDMEMALG_SET and also changing send mem addr field to
- * next 8 bytes as it corrpt the actual tx tstamp registered
+ * next 8 bytes as it corrupts the actual tx tstamp registered
* address.
*/
send_mem->w0.cn9k.alg =
@@ -1531,7 +1531,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
}
if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
- /* Tx ol_flag for timestam. */
+ /* Tx ol_flag for timestamp. */
const uint64x2_t olf = {RTE_MBUF_F_TX_IEEE1588_TMST,
RTE_MBUF_F_TX_IEEE1588_TMST};
/* Set send mem alg to SUB. */
@@ -12,7 +12,7 @@ cnxk_nix_read_clock(struct rte_eth_dev *eth_dev, uint64_t *clock)
/* This API returns the raw PTP HI clock value. Since LFs do not
* have direct access to PTP registers and it requires mbox msg
* to AF for this value. In fastpath reading this value for every
- * packet (which involes mbox call) becomes very expensive, hence
+ * packet (which involves mbox call) becomes very expensive, hence
* we should be able to derive PTP HI clock value from tsc by
* using freq_mult and clk_delta calculated during configure stage.
*/
@@ -1378,7 +1378,7 @@ cxgbe_flow_validate(struct rte_eth_dev *dev,
}
/*
- * @ret : > 0 filter destroyed succsesfully
+ * @ret : > 0 filter destroyed successfully
* < 0 error destroying filter
* == 1 filter not active / not found
*/
@@ -44,7 +44,7 @@ static void size_nports_qsets(struct adapter *adapter)
*/
pmask_nports = hweight32(adapter->params.vfres.pmask);
if (pmask_nports < adapter->params.nports) {
- dev_warn(adapter->pdev_dev, "only using %d of %d provissioned"
+ dev_warn(adapter->pdev_dev, "only using %d of %d provisioned"
" virtual interfaces; limited by Port Access Rights"
" mask %#x\n", pmask_nports, adapter->params.nports,
adapter->params.vfres.pmask);
@@ -211,7 +211,7 @@ static inline unsigned int fl_cap(const struct sge_fl *fl)
* @fl: the Free List
*
* Tests specified Free List to see whether the number of buffers
- * available to the hardware has falled below our "starvation"
+ * available to the hardware has fallen below our "starvation"
* threshold.
*/
static inline bool fl_starving(const struct adapter *adapter,
@@ -678,7 +678,7 @@ static void write_sgl(struct rte_mbuf *mbuf, struct sge_txq *q,
* @q: the Tx queue
* @n: number of new descriptors to give to HW
*
- * Ring the doorbel for a Tx queue.
+ * Ring the doorbell for a Tx queue.
*/
static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q)
{
@@ -877,7 +877,7 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
}
/**
- * should_tx_packet_coalesce - decides wether to coalesce an mbuf or not
+ * should_tx_packet_coalesce - decides whether to coalesce an mbuf or not
* @txq: tx queue where the mbuf is sent
* @mbuf: mbuf to be sent
* @nflits: return value for number of flits needed
@@ -1846,7 +1846,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
* for its status page) along with the associated software
* descriptor ring. The free list size needs to be a multiple
* of the Egress Queue Unit and at least 2 Egress Units larger
- * than the SGE's Egress Congrestion Threshold
+ * than the SGE's Egress Congestion Threshold
* (fl_starve_thres - 1).
*/
if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
@@ -1030,7 +1030,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
QM_FQCTRL_CTXASTASHING |
QM_FQCTRL_PREFERINCACHE;
opts.fqd.context_a.stashing.exclusive = 0;
- /* In muticore scenario stashing becomes a bottleneck on LS1046.
+ /* In multicore scenario stashing becomes a bottleneck on LS1046.
* So do not enable stashing in this case
*/
if (dpaa_svr_family != SVR_LS1046A_FAMILY)
@@ -1866,7 +1866,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
dpaa_intf->name = dpaa_device->name;
- /* save fman_if & cfg in the interface struture */
+ /* save fman_if & cfg in the interface structure */
eth_dev->process_private = fman_intf;
dpaa_intf->ifid = dev_id;
dpaa_intf->cfg = cfg;
@@ -2169,7 +2169,7 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
if (dpaa_svr_family == SVR_LS1043A_FAMILY)
dpaa_push_mode_max_queue = 0;
- /* if push mode queues to be enabled. Currenly we are allowing
+ /* if push mode queues to be enabled. Currently we are allowing
* only one queue per thread.
*/
if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
@@ -600,8 +600,8 @@ void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs)
void *ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dq->fd));
/* In case of LS1046, annotation stashing is disabled due to L2 cache
- * being bottleneck in case of multicore scanario for this platform.
- * So we prefetch the annoation beforehand, so that it is available
+ * being bottleneck in case of multicore scenario for this platform.
+ * So we prefetch the annotation beforehand, so that it is available
* in cache when accessed.
*/
rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
@@ -176,7 +176,7 @@ typedef struct t_fm_prs_result {
#define FM_FD_ERR_PRS_HDR_ERR 0x00000020
/**< Header error was identified during parsing */
#define FM_FD_ERR_BLOCK_LIMIT_EXCEEDED 0x00000008
- /**< Frame parsed beyind 256 first bytes */
+ /**< Frame parsed beyond 256 first bytes */
#define FM_FD_TX_STATUS_ERR_MASK (FM_FD_ERR_UNSUPPORTED_FORMAT | \
FM_FD_ERR_LENGTH | \
@@ -276,7 +276,7 @@ typedef struct ioc_fm_pcd_counters_params_t {
} ioc_fm_pcd_counters_params_t;
/*
- * @Description structure for FM exception definitios
+ * @Description structure for FM exception definitions
*/
typedef struct ioc_fm_pcd_exception_params_t {
ioc_fm_pcd_exceptions exception; /**< The requested exception */
@@ -883,7 +883,7 @@ typedef enum ioc_fm_pcd_manip_hdr_rmv_specific_l2 {
e_IOC_FM_PCD_MANIP_HDR_RMV_ETHERNET, /**< Ethernet/802.3 MAC */
e_IOC_FM_PCD_MANIP_HDR_RMV_STACKED_QTAGS, /**< stacked QTags */
e_IOC_FM_PCD_MANIP_HDR_RMV_ETHERNET_AND_MPLS,
- /**< MPLS and Ethernet/802.3 MAC header unitl the header
+ /**< MPLS and Ethernet/802.3 MAC header until the header
* which follows the MPLS header
*/
e_IOC_FM_PCD_MANIP_HDR_RMV_MPLS
@@ -3293,7 +3293,7 @@ typedef struct ioc_fm_pcd_cc_tbl_get_stats_t {
/*
* @Function fm_pcd_net_env_characteristics_delete
*
- * @Description Deletes a set of Network Environment Charecteristics.
+ * @Description Deletes a set of Network Environment Characteristics.
*
* @Param[in] ioc_fm_obj_t The id of a Network Environment object.
*
@@ -3493,7 +3493,7 @@ typedef struct ioc_fm_pcd_cc_tbl_get_stats_t {
* @Return 0 on success; Error code otherwise.
*
* @Cautions Allowed only following fm_pcd_match_table_set() not only of
- * the relevnt node but also the node that points to this node.
+ * the relevant node but also the node that points to this node.
*/
#define FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_AND_NEXT_ENGINE \
_IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(35), \
@@ -498,7 +498,7 @@ typedef struct ioc_fm_port_pcd_prs_params_t {
/**< Number of bytes from beginning of packet to start parsing
*/
ioc_net_header_type first_prs_hdr;
- /**< The type of the first header axpected at 'parsing_offset'
+ /**< The type of the first header expected at 'parsing_offset'
*/
bool include_in_prs_statistics;
/**< TRUE to include this port in the parser statistics */
@@ -524,7 +524,7 @@ typedef struct ioc_fm_port_pcd_prs_params_t {
} ioc_fm_port_pcd_prs_params_t;
/*
- * @Description A structure for defining coarse alassification parameters
+ * @Description A structure for defining coarse classification parameters
* (Must match t_fm_portPcdCcParams defined in fm_port_ext.h)
*/
typedef struct ioc_fm_port_pcd_cc_params_t {
@@ -602,7 +602,7 @@ typedef struct ioc_fm_pcd_prs_start_t {
/**< Number of bytes from beginning of packet to start parsing
*/
ioc_net_header_type first_prs_hdr;
- /**< The type of the first header axpected at 'parsing_offset'
+ /**< The type of the first header expected at 'parsing_offset'
*/
} ioc_fm_pcd_prs_start_t;
@@ -1356,7 +1356,7 @@ typedef uint32_t fm_port_frame_err_select_t;
#define FM_PORT_FRM_ERR_PRS_HDR_ERR FM_FD_ERR_PRS_HDR_ERR
/**< Header error was identified during parsing */
#define FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED FM_FD_ERR_BLOCK_LIMIT_EXCEEDED
- /**< Frame parsed beyind 256 first bytes */
+ /**< Frame parsed beyond 256 first bytes */
#define FM_PORT_FRM_ERR_PROCESS_TIMEOUT 0x00000001
/**< FPM Frame Processing Timeout Exceeded */
/* @} */
@@ -1390,7 +1390,7 @@ typedef void (t_fm_port_exception_callback) (t_handle h_app,
* @Param[in] length length of received data
* @Param[in] status receive status and errors
* @Param[in] position position of buffer in frame
- * @Param[in] h_buf_context A handle of the user acossiated with this buffer
+ * @Param[in] h_buf_context A handle of the user associated with this buffer
*
* @Retval e_RX_STORE_RESPONSE_CONTINUE
* order the driver to continue Rx operation for all ready data.
@@ -1414,7 +1414,7 @@ typedef e_rx_store_response(t_fm_port_im_rx_store_callback) (t_handle h_app,
* @Param[in] p_data A pointer to data received
* @Param[in] status transmit status and errors
* @Param[in] last_buffer is last buffer in frame
- * @Param[in] h_buf_context A handle of the user acossiated with this buffer
+ * @Param[in] h_buf_context A handle of the user associated with this buffer
*/
typedef void (t_fm_port_im_tx_conf_callback) (t_handle h_app,
uint8_t *p_data,
@@ -2585,7 +2585,7 @@ typedef struct t_fm_port_congestion_grps {
bool pfc_prio_enable[FM_NUM_CONG_GRPS][FM_MAX_PFC_PRIO];
/**< a matrix that represents the map between the CG ids
* defined in 'congestion_grps_to_consider' to the
- * priorties mapping array.
+ * priorities mapping array.
*/
} t_fm_port_congestion_grps;
@@ -143,7 +143,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
PMD_INIT_FUNC_TRACE();
if (mask & RTE_ETH_VLAN_FILTER_MASK) {
- /* VLAN Filter not avaialble */
+ /* VLAN Filter not available */
if (!priv->max_vlan_filters) {
DPAA2_PMD_INFO("VLAN filter not available");
return -ENOTSUP;
@@ -916,7 +916,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
cong_notif_cfg.threshold_entry = nb_tx_desc;
/* Notify that the queue is not congested when the data in
- * the queue is below this thershold.(90% of value)
+ * the queue is below this threshold.(90% of value)
*/
cong_notif_cfg.threshold_exit = (nb_tx_desc * 9) / 10;
cong_notif_cfg.message_ctx = 0;
@@ -1058,7 +1058,7 @@ dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
* Dpaa2 link Interrupt handler
*
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
@@ -2236,7 +2236,7 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
ocfg.oa = 1;
/* Late arrival window size disabled */
ocfg.olws = 0;
- /* ORL resource exhaustaion advance NESN disabled */
+ /* ORL resource exhaustion advance NESN disabled */
ocfg.oeane = 0;
/* Loose ordering enabled */
ocfg.oloe = 1;
@@ -2720,13 +2720,13 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
}
eth_dev->tx_pkt_burst = dpaa2_dev_tx;
- /*Init fields w.r.t. classficaition*/
+ /*Init fields w.r.t. classification*/
memset(&priv->extract.qos_key_extract, 0,
sizeof(struct dpaa2_key_extract));
priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);
if (!priv->extract.qos_extract_param) {
DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
- " classificaiton ", ret);
+ " classification ", ret);
goto init_err;
}
priv->extract.qos_key_extract.key_info.ipv4_src_offset =
@@ -2744,7 +2744,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
priv->extract.tc_extract_param[i] =
(size_t)rte_malloc(NULL, 256, 64);
if (!priv->extract.tc_extract_param[i]) {
- DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton",
+ DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classification",
ret);
goto init_err;
}
@@ -117,7 +117,7 @@ extern int dpaa2_timestamp_dynfield_offset;
#define DPAA2_FLOW_MAX_KEY_SIZE 16
-/*Externaly defined*/
+/*Externally defined*/
extern const struct rte_flow_ops dpaa2_flow_ops;
extern const struct rte_tm_ops dpaa2_tm_ops;
@@ -1451,7 +1451,7 @@ dpaa2_configure_flow_generic_ip(
flow, pattern, &local_cfg,
device_configured, group);
if (ret) {
- DPAA2_PMD_ERR("IP discrimation failed!");
+ DPAA2_PMD_ERR("IP discrimination failed!");
return -1;
}
@@ -3349,7 +3349,7 @@ dpaa2_flow_verify_action(
(actions[j].conf);
if (rss_conf->queue_num > priv->dist_queues) {
DPAA2_PMD_ERR(
- "RSS number exceeds the distrbution size");
+ "RSS number exceeds the distribution size");
return -ENOTSUP;
}
for (i = 0; i < (int)rss_conf->queue_num; i++) {
@@ -3596,7 +3596,7 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
qos_cfg.keep_entries = true;
qos_cfg.key_cfg_iova =
(size_t)priv->extract.qos_extract_param;
- /* QoS table is effecitive for multiple TCs.*/
+ /* QoS table is effective for multiple TCs.*/
if (priv->num_rx_tc > 1) {
ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
priv->token, &qos_cfg);
@@ -3655,7 +3655,7 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
0, 0);
if (ret < 0) {
DPAA2_PMD_ERR(
- "Error in addnig entry to QoS table(%d)", ret);
+ "Error in adding entry to QoS table(%d)", ret);
return ret;
}
}
@@ -95,7 +95,7 @@ rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id,
mask_iova = (void *)((size_t)key_iova + DIST_PARAM_IOVA_SIZE);
/* Currently taking only IP protocol as an extract type.
- * This can be exended to other fields using pattern->type.
+ * This can be extended to other fields using pattern->type.
*/
memset(&kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
@@ -714,7 +714,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
rte_prefetch0((void *)(size_t)(dq_storage + 1));
/* Prepare next pull descriptor. This will give space for the
- * prefething done on DQRR entries
+ * prefetching done on DQRR entries
*/
q_storage->toggle ^= 1;
dq_storage1 = q_storage->dq_storage[q_storage->toggle];
@@ -1510,7 +1510,7 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
if (*dpaa2_seqn(*bufs)) {
/* Use only queue 0 for Tx in case of atomic/
* ordered packets as packets can get unordered
- * when being tranmitted out from the interface
+ * when being transmitted out from the interface
*/
dpaa2_set_enqueue_descriptor(order_sendq,
(*bufs),
@@ -1738,7 +1738,7 @@ dpaa2_dev_loopback_rx(void *queue,
rte_prefetch0((void *)(size_t)(dq_storage + 1));
/* Prepare next pull descriptor. This will give space for the
- * prefething done on DQRR entries
+ * prefetching done on DQRR entries
*/
q_storage->toggle ^= 1;
dq_storage1 = q_storage->dq_storage[q_storage->toggle];
@@ -93,7 +93,7 @@ struct fsl_mc_io;
*/
#define DPNI_OPT_OPR_PER_TC 0x000080
/**
- * All Tx traffic classes will use a single sender (ignore num_queueus for tx)
+ * All Tx traffic classes will use a single sender (ignore num_queues for tx)
*/
#define DPNI_OPT_SINGLE_SENDER 0x000100
/**
@@ -617,7 +617,7 @@ int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
* @page_3.ceetm_reject_bytes: Cumulative count of the number of bytes in all
* frames whose enqueue was rejected
* @page_3.ceetm_reject_frames: Cumulative count of all frame enqueues rejected
- * @page_4: congestion point drops for seleted TC
+ * @page_4: congestion point drops for selected TC
* @page_4.cgr_reject_frames: number of rejected frames due to congestion point
* @page_4.cgr_reject_bytes: number of rejected bytes due to congestion point
* @page_5: policer statistics per TC
@@ -1417,7 +1417,7 @@ int dpni_get_tx_confirmation_mode(struct fsl_mc_io *mc_io,
* dpkg_prepare_key_cfg()
* @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
* '0' to use the 'default_tc' in such cases
- * @keep_entries: if set to one will not delele existing table entries. This
+ * @keep_entries: if set to one will not delete existing table entries. This
* option will work properly only for dpni objects created with
* DPNI_OPT_HAS_KEY_MASKING option. All previous QoS entries must
* be compatible with new key composition rule.
@@ -1516,7 +1516,7 @@ int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
* @flow_id: Identifies the Rx queue used for matching traffic. Supported
* values are in range 0 to num_queue-1.
* @redirect_obj_token: token that identifies the object where frame is
- * redirected when this rule is hit. This paraneter is used only when one of the
+ * redirected when this rule is hit. This parameter is used only when one of the
* flags DPNI_FS_OPT_REDIRECT_TO_DPNI_RX or DPNI_FS_OPT_REDIRECT_TO_DPNI_TX is
* set.
* The token is obtained using dpni_open() API call. The object must stay
@@ -1797,7 +1797,7 @@ int dpni_load_sw_sequence(struct fsl_mc_io *mc_io,
struct dpni_load_ss_cfg *cfg);
/**
- * dpni_eanble_sw_sequence() - Enables a software sequence in the parser
+ * dpni_enable_sw_sequence() - Enables a software sequence in the parser
* profile
* corresponding to the ingress or egress of the DPNI.
* @mc_io: Pointer to MC portal's I/O object
@@ -103,7 +103,7 @@
* Maximum number of Ring Descriptors.
*
* Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
- * desscriptors should meet the following condition:
+ * descriptors should meet the following condition:
* (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
*/
#define E1000_MIN_RING_DESC 32
@@ -252,7 +252,7 @@ struct igb_rte_flow_rss_conf {
};
/*
- * Structure to store filters'info.
+ * Structure to store filters' info.
*/
struct e1000_filter_info {
uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
@@ -1058,8 +1058,8 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
/*
* Starting with 631xESB hw supports 2 TX/RX queues per port.
- * Unfortunatelly, all these nics have just one TX context.
- * So we have few choises for TX:
+ * Unfortunately, all these nics have just one TX context.
+ * So we have few choices for TX:
* - Use just one TX queue.
* - Allow cksum offload only for one TX queue.
* - Don't allow TX cksum offload at all.
@@ -1068,7 +1068,7 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
* (Multiple Receive Queues are mutually exclusive with UDP
* fragmentation and are not supported when a legacy receive
* descriptor format is used).
- * Which means separate RX routinies - as legacy nics (82540, 82545)
+ * Which means separate RX routines - as legacy nics (82540, 82545)
* don't support extended RXD.
* To avoid it we support just one RX queue for now (no RSS).
*/
@@ -1558,7 +1558,7 @@ eth_em_interrupt_get_status(struct rte_eth_dev *dev)
}
/*
- * It executes link_update after knowing an interrupt is prsent.
+ * It executes link_update after knowing an interrupt is present.
*
* @param dev
* Pointer to struct rte_eth_dev.
@@ -1616,7 +1616,7 @@ eth_em_interrupt_action(struct rte_eth_dev *dev,
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
@@ -141,7 +141,7 @@ union em_vlan_macip {
struct em_ctx_info {
uint64_t flags; /**< ol_flags related to context build. */
uint32_t cmp_mask; /**< compare mask */
- union em_vlan_macip hdrlen; /**< L2 and L3 header lenghts */
+ union em_vlan_macip hdrlen; /**< L2 and L3 header lengths */
};
/**
@@ -829,7 +829,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* register.
* Update the RDT with the value of the last processed RX descriptor
* minus 1, to guarantee that the RDT register is never equal to the
- * RDH register, which creates a "full" ring situtation from the
+ * RDH register, which creates a "full" ring situation from the
* hardware point of view...
*/
nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
@@ -1074,7 +1074,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* register.
* Update the RDT with the value of the last processed RX descriptor
* minus 1, to guarantee that the RDT register is never equal to the
- * RDH register, which creates a "full" ring situtation from the
+ * RDH register, which creates a "full" ring situation from the
* hardware point of view...
*/
nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
@@ -1149,7 +1149,7 @@ eth_igb_configure(struct rte_eth_dev *dev)
if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
- /* multipe queue mode checking */
+ /* multiple queue mode checking */
ret = igb_check_mq_mode(dev);
if (ret != 0) {
PMD_DRV_LOG(ERR, "igb_check_mq_mode fails with %d.",
@@ -1265,7 +1265,7 @@ eth_igb_start(struct rte_eth_dev *dev)
}
}
- /* confiugre msix for rx interrupt */
+ /* configure msix for rx interrupt */
eth_igb_configure_msix_intr(dev);
/* Configure for OS presence */
@@ -2819,7 +2819,7 @@ eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
}
/*
- * It executes link_update after knowing an interrupt is prsent.
+ * It executes link_update after knowing an interrupt is present.
*
* @param dev
* Pointer to struct rte_eth_dev.
@@ -2889,7 +2889,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev,
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
@@ -3787,7 +3787,7 @@ igb_inject_2uple_filter(struct rte_eth_dev *dev,
*
* @param
* dev: Pointer to struct rte_eth_dev.
- * ntuple_filter: ponter to the filter that will be added.
+ * ntuple_filter: pointer to the filter that will be added.
*
* @return
* - On success, zero.
@@ -3868,7 +3868,7 @@ igb_delete_2tuple_filter(struct rte_eth_dev *dev,
*
* @param
* dev: Pointer to struct rte_eth_dev.
- * ntuple_filter: ponter to the filter that will be removed.
+ * ntuple_filter: pointer to the filter that will be removed.
*
* @return
* - On success, zero.
@@ -4226,7 +4226,7 @@ igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev,
*
* @param
* dev: Pointer to struct rte_eth_dev.
- * ntuple_filter: ponter to the filter that will be added.
+ * ntuple_filter: pointer to the filter that will be added.
*
* @return
* - On success, zero.
@@ -4313,7 +4313,7 @@ igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev,
*
* @param
* dev: Pointer to struct rte_eth_dev.
- * ntuple_filter: ponter to the filter that will be removed.
+ * ntuple_filter: pointer to the filter that will be removed.
*
* @return
* - On success, zero.
@@ -4831,7 +4831,7 @@ igb_timesync_disable(struct rte_eth_dev *dev)
/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0);
- /* Stop incrementating the System Time registers. */
+ /* Stop incrementing the System Time registers. */
E1000_WRITE_REG(hw, E1000_TIMINCA, 0);
return 0;
@@ -57,7 +57,7 @@ struct igb_flex_filter_list igb_filter_flex_list;
struct igb_rss_filter_list igb_filter_rss_list;
/**
- * Please aware there's an asumption for all the parsers.
+ * Please aware there's an assumption for all the parsers.
* rte_flow_item is using big endian, rte_flow_attr and
* rte_flow_action are using CPU order.
* Because the pattern is used to describe the packets,
@@ -1608,7 +1608,7 @@ igb_flow_create(struct rte_eth_dev *dev,
/**
* Check if the flow rule is supported by igb.
- * It only checkes the format. Don't guarantee the rule can be programmed into
+ * It only checks the format. Don't guarantee the rule can be programmed into
* the HW. Because there can be no enough room for the rule.
*/
static int
@@ -155,7 +155,7 @@ int igb_pf_host_configure(struct rte_eth_dev *eth_dev)
else
E1000_WRITE_REG(hw, E1000_DTXSWC, E1000_DTXSWC_VMDQ_LOOPBACK_EN);
- /* clear VMDq map to perment rar 0 */
+ /* clear VMDq map to permanent rar 0 */
rah = E1000_READ_REG(hw, E1000_RAH(0));
rah &= ~ (0xFF << E1000_RAH_POOLSEL_SHIFT);
E1000_WRITE_REG(hw, E1000_RAH(0), rah);
@@ -150,7 +150,7 @@ union igb_tx_offload {
(TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
/**
- * Strucutre to check if new context need be built
+ * Structure to check if new context need be built
*/
struct igb_advctx_info {
uint64_t flags; /**< ol_flags related to context build. */
@@ -967,7 +967,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* register.
* Update the RDT with the value of the last processed RX descriptor
* minus 1, to guarantee that the RDT register is never equal to the
- * RDH register, which creates a "full" ring situtation from the
+ * RDH register, which creates a "full" ring situation from the
* hardware point of view...
*/
nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
@@ -1229,7 +1229,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* register.
* Update the RDT with the value of the last processed RX descriptor
* minus 1, to guarantee that the RDT register is never equal to the
- * RDH register, which creates a "full" ring situtation from the
+ * RDH register, which creates a "full" ring situation from the
* hardware point of view...
*/
nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
@@ -1252,7 +1252,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* Maximum number of Ring Descriptors.
*
* Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
- * desscriptors should meet the following condition:
+ * descriptors should meet the following condition:
* (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
*/
@@ -1350,7 +1350,7 @@ igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)
sw_ring[tx_id].last_id = tx_id;
}
- /* Move to next segemnt. */
+ /* Move to next segment. */
tx_id = sw_ring[tx_id].next_id;
} while (tx_id != tx_next);
@@ -1383,7 +1383,7 @@ igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)
/* Walk the list and find the next mbuf, if any. */
do {
- /* Move to next segemnt. */
+ /* Move to next segment. */
tx_id = sw_ring[tx_id].next_id;
if (sw_ring[tx_id].mbuf)
@@ -2146,7 +2146,7 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
igb_rss_disable(dev);
- /* RCTL: eanble VLAN filter */
+ /* RCTL: enable VLAN filter */
rctl = E1000_READ_REG(hw, E1000_RCTL);
rctl |= E1000_RCTL_VFE;
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
@@ -1408,7 +1408,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
++rxq->rx_stats.refill_partial;
}
- /* When we submitted free recources to device... */
+ /* When we submitted free resources to device... */
if (likely(i > 0)) {
/* ...let HW know that it can fill buffers with data. */
ena_com_write_sq_doorbell(rxq->ena_com_io_sq);
@@ -42,7 +42,7 @@
/* While processing submitted and completed descriptors (rx and tx path
* respectively) in a loop it is desired to:
- * - perform batch submissions while populating sumbissmion queue
+ * - perform batch submissions while populating submission queue
* - avoid blocking transmission of other packets during cleanup phase
* Hence the utilization ratio of 1/8 of a queue size or max value if the size
* of the ring is very big - like 8k Rx rings.
@@ -12,7 +12,7 @@
#define RX_BD_CR ((ushort)0x0004) /* CRC or Frame error */
#define RX_BD_SH ((ushort)0x0008) /* Reserved */
#define RX_BD_NO ((ushort)0x0010) /* Rcvd non-octet aligned frame */
-#define RX_BD_LG ((ushort)0x0020) /* Rcvd frame length voilation */
+#define RX_BD_LG ((ushort)0x0020) /* Rcvd frame length violation */
#define RX_BD_FIRST ((ushort)0x0400) /* Reserved */
#define RX_BD_LAST ((ushort)0x0800) /* last buffer in the frame */
#define RX_BD_INT 0x00800000
@@ -405,7 +405,7 @@ enic_copy_item_ipv4_v1(struct copy_item_args *arg)
return ENOTSUP;
}
- /* check that the suppied mask exactly matches capabilty */
+ /* check that the supplied mask exactly matches capability */
if (!mask_exact_match((const uint8_t *)&supported_mask,
(const uint8_t *)item->mask, sizeof(*mask))) {
ENICPMD_LOG(ERR, "IPv4 exact match mask");
@@ -443,7 +443,7 @@ enic_copy_item_udp_v1(struct copy_item_args *arg)
return ENOTSUP;
}
- /* check that the suppied mask exactly matches capabilty */
+ /* check that the supplied mask exactly matches capability */
if (!mask_exact_match((const uint8_t *)&supported_mask,
(const uint8_t *)item->mask, sizeof(*mask))) {
ENICPMD_LOG(ERR, "UDP exact match mask");
@@ -482,7 +482,7 @@ enic_copy_item_tcp_v1(struct copy_item_args *arg)
return ENOTSUP;
}
- /* check that the suppied mask exactly matches capabilty */
+ /* check that the supplied mask exactly matches capability */
if (!mask_exact_match((const uint8_t *)&supported_mask,
(const uint8_t *)item->mask, sizeof(*mask))) {
ENICPMD_LOG(ERR, "TCP exact match mask");
@@ -1044,14 +1044,14 @@ fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp,
}
/**
- * Build the intenal enic filter structure from the provided pattern. The
+ * Build the internal enic filter structure from the provided pattern. The
* pattern is validated as the items are copied.
*
* @param pattern[in]
* @param items_info[in]
* Info about this NICs item support, like valid previous items.
* @param enic_filter[out]
- * NIC specfilc filters derived from the pattern.
+ * NIC specific filters derived from the pattern.
* @param error[out]
*/
static int
@@ -1123,12 +1123,12 @@ enic_copy_filter(const struct rte_flow_item pattern[],
}
/**
- * Build the intenal version 1 NIC action structure from the provided pattern.
+ * Build the internal version 1 NIC action structure from the provided pattern.
* The pattern is validated as the items are copied.
*
* @param actions[in]
* @param enic_action[out]
- * NIC specfilc actions derived from the actions.
+ * NIC specific actions derived from the actions.
* @param error[out]
*/
static int
@@ -1170,12 +1170,12 @@ enic_copy_action_v1(__rte_unused struct enic *enic,
}
/**
- * Build the intenal version 2 NIC action structure from the provided pattern.
+ * Build the internal version 2 NIC action structure from the provided pattern.
* The pattern is validated as the items are copied.
*
* @param actions[in]
* @param enic_action[out]
- * NIC specfilc actions derived from the actions.
+ * NIC specific actions derived from the actions.
* @param error[out]
*/
static int
@@ -721,7 +721,7 @@ enic_fm_copy_item_gtp(struct copy_item_args *arg)
}
/* NIC does not support GTP tunnels. No Items are allowed after this.
- * This prevents the specificaiton of further items.
+ * This prevents the specification of further items.
*/
arg->header_level = 0;
@@ -733,7 +733,7 @@ enic_fm_copy_item_gtp(struct copy_item_args *arg)
/*
* Use the raw L4 buffer to match GTP as fm_header_set does not have
- * GTP header. UDP dst port must be specifiec. Using the raw buffer
+ * GTP header. UDP dst port must be specific. Using the raw buffer
* does not affect such UDP item, since we skip UDP in the raw buffer.
*/
fm_data->fk_header_select |= FKH_L4RAW;
@@ -1846,7 +1846,7 @@ enic_fm_dump_tcam_actions(const struct fm_action *fm_action)
/* Remove trailing comma */
if (buf[0])
*(bp - 1) = '\0';
- ENICPMD_LOG(DEBUG, " Acions: %s", buf);
+ ENICPMD_LOG(DEBUG, " Actions: %s", buf);
}
static int
@@ -2364,7 +2364,7 @@ enic_action_handle_get(struct enic_flowman *fm, struct fm_action *action_in,
if (ret < 0 && ret != -ENOENT)
return rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "enic: rte_hash_lookup(aciton)");
+ NULL, "enic: rte_hash_lookup(action)");
if (ret == -ENOENT) {
/* Allocate a new action on the NIC. */
@@ -2435,7 +2435,7 @@ __enic_fm_flow_add_entry(struct enic_flowman *fm,
ENICPMD_FUNC_TRACE();
- /* Get or create an aciton handle. */
+ /* Get or create an action handle. */
ret = enic_action_handle_get(fm, action_in, error, &ah);
if (ret)
return ret;
@@ -1137,7 +1137,7 @@ int enic_disable(struct enic *enic)
}
/* If we were using interrupts, set the interrupt vector to -1
- * to disable interrupts. We are not disabling link notifcations,
+ * to disable interrupts. We are not disabling link notifications,
* though, as we want the polling of link status to continue working.
*/
if (enic->rte_dev->data->dev_conf.intr_conf.lsc)
@@ -653,7 +653,7 @@ static void enqueue_simple_pkts(struct rte_mbuf **pkts,
* The app should not send oversized
* packets. tx_pkt_prepare includes a check as
* well. But some apps ignore the device max size and
- * tx_pkt_prepare. Oversized packets cause WQ errrors
+ * tx_pkt_prepare. Oversized packets cause WQ errors
* and the NIC ends up disabling the whole WQ. So
* truncate packets..
*/
@@ -44,7 +44,7 @@
#define FM10K_TX_MAX_MTU_SEG UINT8_MAX
/*
- * byte aligment for HW RX data buffer
+ * byte alignment for HW RX data buffer
* Datasheet requires RX buffer addresses shall either be 512-byte aligned or
* be 8-byte aligned but without crossing host memory pages (4KB alignment
* boundaries). Satisfy first option.
@@ -290,7 +290,7 @@ rx_queue_free(struct fm10k_rx_queue *q)
}
/*
- * disable RX queue, wait unitl HW finished necessary flush operation
+ * disable RX queue, wait until HW finished necessary flush operation
*/
static inline int
rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
@@ -379,7 +379,7 @@ tx_queue_free(struct fm10k_tx_queue *q)
}
/*
- * disable TX queue, wait unitl HW finished necessary flush operation
+ * disable TX queue, wait until HW finished necessary flush operation
*/
static inline int
tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
@@ -453,7 +453,7 @@ fm10k_dev_configure(struct rte_eth_dev *dev)
if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
- /* multipe queue mode checking */
+ /* multiple queue mode checking */
ret = fm10k_check_mq_mode(dev);
if (ret != 0) {
PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
@@ -2553,7 +2553,7 @@ fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
@@ -2676,7 +2676,7 @@ fm10k_dev_interrupt_handler_pf(void *param)
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
@@ -3034,7 +3034,7 @@ fm10k_params_init(struct rte_eth_dev *dev)
struct fm10k_dev_info *info =
FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
- /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
+ /* Initialize bus info. Normally we would call fm10k_get_bus_info(), but
* there is no way to get link status without reading BAR4. Until this
* works, assume we have maximum bandwidth.
* @todo - fix bus info
@@ -212,7 +212,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
#ifndef RTE_FM10K_RX_OLFLAGS_ENABLE
- /* whithout rx ol_flags, no VP flag report */
+ /* without rx ol_flags, no VP flag report */
if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
return -1;
#endif
@@ -239,7 +239,7 @@ fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq)
struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
mb_def.nb_segs = 1;
- /* data_off will be ajusted after new mbuf allocated for 512-byte
+ /* data_off will be adjusted after new mbuf allocated for 512-byte
* alignment.
*/
mb_def.data_off = RTE_PKTMBUF_HEADROOM;
@@ -410,7 +410,7 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
if (!(rxdp->d.staterr & FM10K_RXD_STATUS_DD))
return 0;
- /* Vecotr RX will process 4 packets at a time, strip the unaligned
+ /* Vector RX will process 4 packets at a time, strip the unaligned
* tails in case it's not multiple of 4.
*/
nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_FM10K_DESCS_PER_LOOP);
@@ -481,7 +481,7 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
#if defined(RTE_ARCH_X86_64)
- /* B.1 load 2 64 bit mbuf poitns */
+ /* B.1 load 2 64 bit mbuf points */
mbp2 = _mm_loadu_si128((__m128i *)&mbufp[pos+2]);
#endif
@@ -573,7 +573,7 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
fm10k_desc_to_pktype_v(descs0, &rx_pkts[pos]);
- /* C.4 calc avaialbe number of desc */
+ /* C.4 calc available number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
if (likely(var != RTE_FM10K_DESCS_PER_LOOP))
@@ -255,7 +255,7 @@ static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask);
* Interrupt handler triggered by NIC for handling
* specific event.
*
- * @param: The address of parameter (struct rte_eth_dev *) regsitered before.
+ * @param: The address of parameter (struct rte_eth_dev *) registered before.
*/
static void hinic_dev_interrupt_handler(void *param)
{
@@ -336,7 +336,7 @@ static int hinic_dev_configure(struct rte_eth_dev *dev)
return err;
}
- /* init vlan offoad */
+ /* init vlan offload */
err = hinic_vlan_offload_set(dev,
RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
if (err) {
@@ -170,7 +170,7 @@ struct tag_tcam_key_mem {
/*
* tunnel packet, mask must be 0xff, spec value is 1;
* normal packet, mask must be 0, spec value is 0;
- * if tunnal packet, ucode use
+ * if tunnel packet, ucode use
* sip/dip/protocol/src_port/dst_dport from inner packet
*/
u32 tunnel_flag:8;
@@ -734,7 +734,7 @@ static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item,
* END
* other members in mask and spec should set to 0x00.
* item->last should be NULL.
- * Please aware there's an asumption for all the parsers.
+ * Please aware there's an assumption for all the parsers.
* rte_flow_item is using big endian, rte_flow_attr and
* rte_flow_action are using CPU order.
* Because the pattern is used to describe the packets,
@@ -1630,7 +1630,7 @@ static int hinic_parse_fdir_filter(struct rte_eth_dev *dev,
/**
* Check if the flow rule is supported by nic.
- * It only checkes the format. Don't guarantee the rule can be programmed into
+ * It only checks the format. Don't guarantee the rule can be programmed into
* the HW. Because there can be no enough room for the rule.
*/
static int hinic_flow_validate(struct rte_eth_dev *dev,
@@ -1144,7 +1144,7 @@ u16 hinic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 nb_pkts)
mbuf_pkt = *tx_pkts++;
queue_info = 0;
- /* 1. parse sge and tx offlod info from mbuf */
+ /* 1. parse sge and tx offload info from mbuf */
if (unlikely(!hinic_get_sge_txoff_info(mbuf_pkt,
&sqe_info, &off_info))) {
txq->txq_stats.off_errs++;
@@ -466,7 +466,7 @@ hns3_mask_capability(struct hns3_hw *hw,
for (i = 0; i < MAX_CAPS_BIT; i++) {
if (!(caps_masked & BIT_ULL(i)))
continue;
- hns3_info(hw, "mask capabiliy: id-%u, name-%s.",
+ hns3_info(hw, "mask capability: id-%u, name-%s.",
i, hns3_get_caps_name(i));
}
}
@@ -736,7 +736,7 @@ hns3_cmd_init(struct hns3_hw *hw)
return 0;
/*
- * Requiring firmware to enable some features, firber port can still
+ * Requiring firmware to enable some features, fiber port can still
* work without it, but copper port can't work because the firmware
* fails to take over the PHY.
*/
@@ -603,7 +603,7 @@ hns3_init_ring_with_vector(struct hns3_hw *hw)
hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
for (i = 0; i < hw->intr_tqps_num; i++) {
/*
- * Set gap limiter/rate limiter/quanity limiter algorithm
+ * Set gap limiter/rate limiter/quantity limiter algorithm
* configuration for interrupt coalesce of queue's interrupt.
*/
hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
@@ -25,7 +25,7 @@
* IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
* Tick * (2 ^ IR_s)
*
- * @return: 0: calculate sucessful, negative: fail
+ * @return: 0: calculate successful, negative: fail
*/
static int
hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
@@ -36,8 +36,8 @@ hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
const uint16_t tick_array[HNS3_SHAPER_LVL_CNT] = {
- 6 * 256, /* Prioriy level */
- 6 * 32, /* Prioriy group level */
+ 6 * 256, /* Priority level */
+ 6 * 32, /* Priority group level */
6 * 8, /* Port level */
6 * 256 /* Qset level */
};
@@ -1532,7 +1532,7 @@ hns3_dcb_hw_configure(struct hns3_adapter *hns)
ret = hns3_dcb_schd_setup_hw(hw);
if (ret) {
- hns3_err(hw, "dcb schdule configure failed! ret = %d", ret);
+ hns3_err(hw, "dcb schedule configure failed! ret = %d", ret);
return ret;
}
@@ -1737,7 +1737,7 @@ hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode)
* hns3_dcb_pfc_enable - Enable priority flow control
* @dev: pointer to ethernet device
*
- * Configures the pfc settings for one porority.
+ * Configures the pfc settings for one priority.
*/
int
hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
@@ -574,7 +574,7 @@ hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns,
hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B,
vcfg->vlan2_vlan_prionly ? 1 : 0);
- /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */
+ /* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */
hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B,
vcfg->strip_tag1_discard_en ? 1 : 0);
hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B,
@@ -784,7 +784,7 @@ hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns,
vcfg->insert_tag2_en ? 1 : 0);
hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0);
- /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */
+ /* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */
hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B,
vcfg->tag_shift_mode_en ? 1 : 0);
@@ -3420,7 +3420,7 @@ hns3_only_alloc_priv_buff(struct hns3_hw *hw,
* hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs
* @hw: pointer to struct hns3_hw
* @buf_alloc: pointer to buffer calculation data
- * @return: 0: calculate sucessful, negative: fail
+ * @return: 0: calculate successful, negative: fail
*/
static int
hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
@@ -4550,14 +4550,14 @@ hns3_set_firber_default_support_speed(struct hns3_hw *hw)
}
/*
- * Validity of supported_speed for firber and copper media type can be
+ * Validity of supported_speed for fiber and copper media type can be
* guaranteed by the following policy:
* Copper:
* Although the initialization of the phy in the firmware may not be
* completed, the firmware can guarantees that the supported_speed is
* an valid value.
* Firber:
- * If the version of firmware supports the acitive query way of the
+ * If the version of firmware supports the active query way of the
* HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained
* through it. If unsupported, use the SFP's speed as the value of the
* supported_speed.
@@ -5327,7 +5327,7 @@ hns3_get_autoneg_fc_mode(struct hns3_hw *hw)
/*
* Flow control auto-negotiation is not supported for fiber and
- * backpalne media type.
+ * backplane media type.
*/
case HNS3_MEDIA_TYPE_FIBER:
case HNS3_MEDIA_TYPE_BACKPLANE:
@@ -6191,7 +6191,7 @@ hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
}
/*
- * FEC mode order defined in hns3 hardware is inconsistend with
+ * FEC mode order defined in hns3 hardware is inconsistent with
* that defined in the ethdev library. So the sequence needs
* to be converted.
*/
@@ -126,7 +126,7 @@ struct hns3_tc_info {
uint8_t tc_sch_mode; /* 0: sp; 1: dwrr */
uint8_t pgid;
uint32_t bw_limit;
- uint8_t up_to_tc_map; /* user priority maping on the TC */
+ uint8_t up_to_tc_map; /* user priority mapping on the TC */
};
struct hns3_dcb_info {
@@ -571,12 +571,12 @@ struct hns3_hw {
/*
* vlan mode.
* value range:
- * HNS3_SW_SHIFT_AND_DISCARD_MODE/HNS3_HW_SHFIT_AND_DISCARD_MODE
+ * HNS3_SW_SHIFT_AND_DISCARD_MODE/HNS3_HW_SHIFT_AND_DISCARD_MODE
*
* - HNS3_SW_SHIFT_AND_DISCARD_MODE
* For some versions of hardware network engine, because of the
* hardware limitation, PMD needs to detect the PVID status
- * to work with haredware to implement PVID-related functions.
+ * to work with hardware to implement PVID-related functions.
* For example, driver need discard the stripped PVID tag to ensure
* the PVID will not report to mbuf and shift the inserted VLAN tag
* to avoid port based VLAN covering it.
@@ -724,7 +724,7 @@ enum hns3_mp_req_type {
HNS3_MP_REQ_MAX
};
-/* Pameters for IPC. */
+/* Parameters for IPC. */
struct hns3_mp_param {
enum hns3_mp_req_type type;
int port_id;
@@ -242,7 +242,7 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
if (ret == -EPERM) {
hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
old_addr);
- hns3_warn(hw, "Has permanet mac addr(%s) for vf",
+ hns3_warn(hw, "Has permanent mac addr(%s) for vf",
mac_str);
} else {
hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
@@ -318,7 +318,7 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc,
* 1. The promiscuous/allmulticast mode can be configured successfully
* only based on the trusted VF device. If based on the non trusted
* VF device, configuring promiscuous/allmulticast mode will fail.
- * The hns3 VF device can be confiruged as trusted device by hns3 PF
+ * The hns3 VF device can be configured as trusted device by hns3 PF
* kernel ethdev driver on the host by the following command:
* "ip link set <eth num> vf <vf id> turst on"
* 2. After the promiscuous mode is configured successfully, hns3 VF PMD
@@ -330,7 +330,7 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc,
* filter is still effective even in promiscuous mode. If upper
* applications don't call rte_eth_dev_vlan_filter API function to
* set vlan based on VF device, hns3 VF PMD will can't receive
- * the packets with vlan tag in promiscuoue mode.
+ * the packets with vlan tag in promiscuous mode.
*/
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
req->msg[0] = HNS3_MBX_SET_PROMISC_MODE;
@@ -139,7 +139,7 @@ struct hns3_fdir_rule {
uint32_t flags;
uint32_t fd_id; /* APP marked unique value for this rule. */
uint8_t action;
- /* VF id, avaiblable when flags with HNS3_RULE_FLAG_VF_ID. */
+ /* VF id, available when flags with HNS3_RULE_FLAG_VF_ID. */
uint8_t vf_id;
/*
* equal 0 when action is drop.
@@ -338,7 +338,7 @@ hns3_handle_action_queue_region(struct rte_eth_dev *dev,
*
* @param actions[in]
* @param rule[out]
- * NIC specfilc actions derived from the actions.
+ * NIC specific actions derived from the actions.
* @param error[out]
*/
static int
@@ -369,7 +369,7 @@ hns3_handle_actions(struct rte_eth_dev *dev,
* Queue region is implemented by FDIR + RSS in hns3 hardware,
* the FDIR's action is one queue region (start_queue_id and
* queue_num), then RSS spread packets to the queue region by
- * RSS algorigthm.
+ * RSS algorithm.
*/
case RTE_FLOW_ACTION_TYPE_RSS:
ret = hns3_handle_action_queue_region(dev, actions,
@@ -940,7 +940,7 @@ hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
- "Ver/protocal is not supported in NVGRE");
+ "Ver/protocol is not supported in NVGRE");
/* TNI must be totally masked or not. */
if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
@@ -985,7 +985,7 @@ hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
- "Ver/protocal is not supported in GENEVE");
+ "Ver/protocol is not supported in GENEVE");
/* VNI must be totally masked or not. */
if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
@@ -1309,7 +1309,7 @@ hns3_rss_input_tuple_supported(struct hns3_hw *hw,
}
/*
- * This function is used to parse rss action validatation.
+ * This function is used to parse rss action validation.
*/
static int
hns3_parse_rss_filter(struct rte_eth_dev *dev,
@@ -1682,7 +1682,7 @@ hns3_flow_args_check(const struct rte_flow_attr *attr,
/*
* Check if the flow rule is supported by hns3.
- * It only checkes the format. Don't guarantee the rule can be programmed into
+ * It only checks the format. Don't guarantee the rule can be programmed into
* the HW. Because there can be no enough room for the rule.
*/
static int
@@ -78,14 +78,14 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS;
while (wait_time < mbx_time_limit) {
if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
- hns3_err(hw, "Don't wait for mbx respone because of "
+ hns3_err(hw, "Don't wait for mbx response because of "
"disable_cmd");
return -EBUSY;
}
if (is_reset_pending(hns)) {
hw->mbx_resp.req_msg_data = 0;
- hns3_err(hw, "Don't wait for mbx respone because of "
+ hns3_err(hw, "Don't wait for mbx response because of "
"reset pending");
return -EIO;
}
@@ -22,7 +22,7 @@ enum HNS3_MBX_OPCODE {
HNS3_MBX_GET_RETA, /* (VF -> PF) get RETA */
HNS3_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */
HNS3_MBX_GET_MAC_ADDR, /* (VF -> PF) get MAC addr */
- HNS3_MBX_PF_VF_RESP, /* (PF -> VF) generate respone to VF */
+ HNS3_MBX_PF_VF_RESP, /* (PF -> VF) generate response to VF */
HNS3_MBX_GET_BDNUM, /* (VF -> PF) get BD num */
HNS3_MBX_GET_BUFSIZE, /* (VF -> PF) get buffer size */
HNS3_MBX_GET_STREAMID, /* (VF -> PF) get stream id */
@@ -41,7 +41,7 @@ struct hns3_rss_tuple_cfg {
struct hns3_rss_conf {
/* RSS parameters :algorithm, flow_types, key, queue */
struct rte_flow_action_rss conf;
- uint8_t hash_algo; /* hash function type definited by hardware */
+ uint8_t hash_algo; /* hash function type defined by hardware */
uint8_t key[HNS3_RSS_KEY_SIZE]; /* Hash key */
struct hns3_rss_tuple_cfg rss_tuple_sets;
uint16_t rss_indirection_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
@@ -1903,7 +1903,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
* For hns3 VF device, whether it needs to process PVID depends
* on the configuration of PF kernel mode netdevice driver. And the
* related PF configuration is delivered through the mailbox and finally
- * reflectd in port_base_vlan_cfg.
+ * reflected in port_base_vlan_cfg.
*/
if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state ==
@@ -3043,7 +3043,7 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
* For hns3 VF device, whether it needs to process PVID depends
* on the configuration of PF kernel mode netdev driver. And the
* related PF configuration is delivered through the mailbox and finally
- * reflectd in port_base_vlan_cfg.
+ * reflected in port_base_vlan_cfg.
*/
if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
txq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state ==
@@ -3208,7 +3208,7 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
* in Tx direction based on hns3 network engine. So when the number of
* VLANs in the packets represented by rxm plus the number of VLAN
* offload by hardware such as PVID etc, exceeds two, the packets will
- * be discarded or the original VLAN of the packets will be overwitted
+ * be discarded or the original VLAN of the packets will be overwritten
* by hardware. When the PF PVID is enabled by calling the API function
* named rte_eth_dev_set_vlan_pvid or the VF PVID is enabled by the hns3
* PF kernel ether driver, the outer VLAN tag will always be the PVID.
@@ -3393,7 +3393,7 @@ hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec,
/*
* The inner l2 length of mbuf is the sum of outer l4 length,
* tunneling header length and inner l2 length for a tunnel
- * packect. But in hns3 tx descriptor, the tunneling header
+ * packet. But in hns3 tx descriptor, the tunneling header
* length is contained in the field of outer L4 length.
* Therefore, driver need to calculate the outer L4 length and
* inner L2 length.
@@ -3409,7 +3409,7 @@ hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec,
tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE);
/*
- * For NVGRE tunnel packect, the outer L4 is empty. So only
+ * For NVGRE tunnel packet, the outer L4 is empty. So only
* fill the NVGRE header length to the outer L4 field.
*/
tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,
@@ -3452,7 +3452,7 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
* mbuf, but for hns3 descriptor, it is contained in the outer L4. So,
* there is a need that switching between them. To avoid multiple
* calculations, the length of the L2 header include the outer and
- * inner, will be filled during the parsing of tunnel packects.
+ * inner, will be filled during the parsing of tunnel packets.
*/
if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
/*
@@ -3632,7 +3632,7 @@ hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {
struct rte_udp_hdr *udp_hdr;
/*
- * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
+ * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo
* header for TSO packets
*/
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
@@ -3657,7 +3657,7 @@ hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {
struct rte_udp_hdr *udp_hdr;
/*
- * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
+ * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo
* header for TSO packets
*/
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
@@ -611,7 +611,7 @@ hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
/*
* If packet len bigger than mtu when recv with no-scattered algorithm,
- * the first n bd will without FE bit, we need process this sisution.
+ * the first n bd will without FE bit, we need process this situation.
* Note: we don't need add statistic counter because latest BD which
* with FE bit will mark HNS3_RXD_L2E_B bit.
*/
@@ -630,7 +630,7 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
cnt = hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
/*
- * Read hardware and software in adjacent positions to minumize
+ * Read hardware and software in adjacent positions to minimize
* the timing variance.
*/
rte_stats->ierrors += rxq->err_stats.l2_errors +
@@ -1289,7 +1289,7 @@ hns3_dev_xstats_get_names(struct rte_eth_dev *dev,
* A pointer to an ids array passed by application. This tells which
* statistics values function should retrieve. This parameter
* can be set to NULL if size is 0. In this case function will retrieve
- * all avalible statistics.
+ * all available statistics.
* @param values
* A pointer to a table to be filled with device statistics values.
* @param size
@@ -2483,7 +2483,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
if (ret != I40E_SUCCESS)
PMD_DRV_LOG(WARNING, "Fail to set phy mask");
- /* Call get_link_info aq commond to enable/disable LSE */
+ /* Call get_link_info aq command to enable/disable LSE */
i40e_dev_link_update(dev, 0);
}
@@ -3555,7 +3555,7 @@ static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
count++;
}
- /* Get individiual stats from i40e_hw_port struct */
+ /* Get individual stats from i40e_hw_port struct */
for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
strlcpy(xstats_names[count].name,
rte_i40e_hw_port_strings[i].name,
@@ -3613,7 +3613,7 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
count++;
}
- /* Get individiual stats from i40e_hw_port struct */
+ /* Get individual stats from i40e_hw_port struct */
for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
rte_i40e_hw_port_strings[i].offset);
@@ -5544,7 +5544,7 @@ i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
&ets_sla_config, NULL);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR,
- "VSI failed to get TC bandwdith configuration %u",
+ "VSI failed to get TC bandwidth configuration %u",
hw->aq.asq_last_status);
return ret;
}
@@ -6822,7 +6822,7 @@ i40e_handle_mdd_event(struct rte_eth_dev *dev)
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
@@ -9719,7 +9719,7 @@ i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
return 0;
}
-/* Check if there exists the ehtertype filter */
+/* Check if there exists the ethertype filter */
struct i40e_ethertype_filter *
i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
const struct i40e_ethertype_filter_input *input)
@@ -897,7 +897,7 @@ struct i40e_tunnel_filter {
TAILQ_ENTRY(i40e_tunnel_filter) rules;
struct i40e_tunnel_filter_input input;
uint8_t is_to_vf; /* 0 - to PF, 1 - to VF */
- uint16_t vf_id; /* VF id, avaiblable when is_to_vf is 1. */
+ uint16_t vf_id; /* VF id, available when is_to_vf is 1. */
uint16_t queue; /* Queue assigned to when match */
};
@@ -966,7 +966,7 @@ struct i40e_tunnel_filter_conf {
uint32_t tenant_id; /**< Tenant ID to match. VNI, GRE key... */
uint16_t queue_id; /**< Queue assigned to if match. */
uint8_t is_to_vf; /**< 0 - to PF, 1 - to VF */
- uint16_t vf_id; /**< VF id, avaiblable when is_to_vf is 1. */
+ uint16_t vf_id; /**< VF id, available when is_to_vf is 1. */
};
TAILQ_HEAD(i40e_flow_list, rte_flow);
@@ -1100,7 +1100,7 @@ struct i40e_vf_msg_cfg {
/*
* If message statistics from a VF exceed the maximal limitation,
* the PF will ignore any new message from that VF for
- * 'ignor_second' time.
+ * 'ignore_second' time.
*/
uint32_t ignore_second;
};
@@ -1257,7 +1257,7 @@ struct i40e_adapter {
};
/**
- * Strucute to store private data for each VF representor instance
+ * Structure to store private data for each VF representor instance
*/
struct i40e_vf_representor {
uint16_t switch_domain_id;
@@ -1265,7 +1265,7 @@ struct i40e_vf_representor {
uint16_t vf_id;
/**< Virtual Function ID */
struct i40e_adapter *adapter;
- /**< Private data store of assocaiated physical function */
+ /**< Private data store of associated physical function */
struct i40e_eth_stats stats_offset;
/**< Zero-point of VF statistics*/
};
@@ -142,7 +142,7 @@ i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
I40E_QRX_TAIL(rxq->vsi->base_queue);
rte_wmb();
- /* Init the RX tail regieter. */
+ /* Init the RX tail register. */
I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
return err;
@@ -430,7 +430,7 @@ i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
- PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
+ PMD_DRV_LOG(ERR, "exceeds maximal payload limit.");
return -EINVAL;
}
}
@@ -438,7 +438,7 @@ i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
memset(flex_pit, 0, sizeof(flex_pit));
num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
if (num > I40E_MAX_FLXPLD_FIED) {
- PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
+ PMD_DRV_LOG(ERR, "exceeds maximal number of flex fields.");
return -EINVAL;
}
for (i = 0; i < num; i++) {
@@ -948,7 +948,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
uint8_t pctype = fdir_input->pctype;
struct i40e_customized_pctype *cus_pctype;
- /* raw pcket template - just copy contents of the raw packet */
+ /* raw packet template - just copy contents of the raw packet */
if (fdir_input->flow_ext.pkt_template) {
memcpy(raw_pkt, fdir_input->flow.raw_flow.packet,
fdir_input->flow.raw_flow.length);
@@ -1831,7 +1831,7 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
&check_filter.fdir.input);
if (!node) {
PMD_DRV_LOG(ERR,
- "There's no corresponding flow firector filter!");
+ "There's no corresponding flow director filter!");
return -EINVAL;
}
@@ -3043,7 +3043,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
- "Exceeds maxmial payload limit.");
+ "Exceeds maximal payload limit.");
return -rte_errno;
}
@@ -343,7 +343,7 @@ i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, uint8_t *msg,
vf->request_caps = *(uint32_t *)msg;
/* enable all RSS by default,
- * doesn't support hena setting by virtchnnl yet.
+ * doesn't support hena setting by virtchnl yet.
*/
if (vf->request_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
I40E_WRITE_REG(hw, I40E_VFQF_HENA1(0, vf->vf_idx),
@@ -725,7 +725,7 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
if ((map->rxq_map < qbit_max) && (map->txq_map < qbit_max)) {
i40e_pf_config_irq_link_list(vf, map);
} else {
- /* configured queue size excceed limit */
+ /* configured queue size exceed limit */
ret = I40E_ERR_PARAM;
goto send_msg;
}
@@ -609,7 +609,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
rxdp[i].read.pkt_addr = dma_addr;
}
- /* Update rx tail regsiter */
+ /* Update rx tail register */
I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
rxq->rx_free_trigger =
@@ -995,7 +995,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
* threshold of the queue, advance the Receive Descriptor Tail (RDT)
* register. Update the RDT with the value of the last processed RX
* descriptor minus 1, to guarantee that the RDT register is never
- * equal to the RDH register, which creates a "full" ring situtation
+ * equal to the RDH register, which creates a "full" ring situation
* from the hardware point of view.
*/
nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
@@ -1467,7 +1467,7 @@ tx_xmit_pkts(struct i40e_tx_queue *txq,
i40e_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
- /* Determin if RS bit needs to be set */
+ /* Determine if RS bit needs to be set */
if (txq->tx_tail > txq->tx_next_rs) {
txr[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
@@ -1697,7 +1697,7 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}
if (rxq->rx_deferred_start)
- PMD_DRV_LOG(WARNING, "RX queue %u is deferrd start",
+ PMD_DRV_LOG(WARNING, "RX queue %u is deferred start",
rx_queue_id);
err = i40e_alloc_rx_queue_mbufs(rxq);
@@ -1706,7 +1706,7 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return err;
}
- /* Init the RX tail regieter. */
+ /* Init the RX tail register. */
I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
@@ -1771,7 +1771,7 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
}
if (txq->tx_deferred_start)
- PMD_DRV_LOG(WARNING, "TX queue %u is deferrd start",
+ PMD_DRV_LOG(WARNING, "TX queue %u is deferred start",
tx_queue_id);
/*
@@ -1930,7 +1930,7 @@ i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "Can't use default burst.");
return -EINVAL;
}
- /* check scatterred conflict */
+ /* check scattered conflict */
if (!dev->data->scattered_rx && use_scattered_rx) {
PMD_DRV_LOG(ERR, "Scattered rx is required.");
return -EINVAL;
@@ -2014,7 +2014,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
rxq->offloads = offloads;
- /* Allocate the maximun number of RX ring hardware descriptor. */
+ /* Allocate the maximum number of RX ring hardware descriptor. */
len = I40E_MAX_RING_DESC;
/**
@@ -2322,7 +2322,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
*/
tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
- /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
+ /* force tx_rs_thresh to adapt an aggressive tx_free_thresh */
tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ?
nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH;
if (tx_conf->tx_rs_thresh > 0)
@@ -2991,7 +2991,7 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
if (rxq->max_pkt_len > buf_size)
dev_data->scattered_rx = 1;
- /* Init the RX tail regieter. */
+ /* Init the RX tail register. */
I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
return 0;
@@ -430,7 +430,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
desc_to_olflags_v(descs, &rx_pkts[pos]);
- /* C.4 calc avaialbe number of desc */
+ /* C.4 calc available number of desc */
var = __builtin_popcountll((vec_ld(0,
(vector unsigned long *)&staterr)[0]));
nb_pkts_recd += var;
@@ -151,7 +151,7 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, uint64x2_t descs[4],
vreinterpretq_u8_u32(l3_l4e)));
/* then we shift left 1 bit */
l3_l4e = vshlq_n_u32(l3_l4e, 1);
- /* we need to mask out the reduntant bits */
+ /* we need to mask out the redundant bits */
l3_l4e = vandq_u32(l3_l4e, cksum_mask);
vlan0 = vorrq_u32(vlan0, rss);
@@ -416,7 +416,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *__rte_restrict rxq,
I40E_UINT16_BIT - 1));
stat = ~vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0);
- /* C.4 calc avaialbe number of desc */
+ /* C.4 calc available number of desc */
if (unlikely(stat == 0)) {
nb_pkts_recd += RTE_I40E_DESCS_PER_LOOP;
} else {
@@ -282,7 +282,7 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, volatile union i40e_rx_desc *rxdp,
l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);
/* then we shift left 1 bit */
l3_l4e = _mm_slli_epi32(l3_l4e, 1);
- /* we need to mask out the reduntant bits */
+ /* we need to mask out the redundant bits */
l3_l4e = _mm_and_si128(l3_l4e, cksum_mask);
vlan0 = _mm_or_si128(vlan0, rss);
@@ -297,7 +297,7 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, volatile union i40e_rx_desc *rxdp,
__m128i v_fdir_ol_flags = descs_to_fdir_16b(desc_fltstat,
descs, rx_pkts);
#endif
- /* OR in ol_flag bits after descriptor speicific extraction */
+ /* OR in ol_flag bits after descriptor specific extraction */
vlan0 = _mm_or_si128(vlan0, v_fdir_ol_flags);
}
@@ -577,7 +577,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb1);
desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
- /* C.4 calc avaialbe number of desc */
+ /* C.4 calc available number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
if (likely(var != RTE_I40E_DESCS_PER_LOOP))
@@ -1427,7 +1427,7 @@ rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map)
/* Get all TCs' bandwidth. */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (veb->enabled_tc & BIT_ULL(i)) {
- /* For rubust, if bandwidth is 0, use 1 instead. */
+ /* For robust, if bandwidth is 0, use 1 instead. */
if (veb->bw_info.bw_ets_share_credits[i])
ets_data.tc_bw_share_credits[i] =
veb->bw_info.bw_ets_share_credits[i];
@@ -516,7 +516,7 @@ iavf_init_rss(struct iavf_adapter *adapter)
j = 0;
vf->rss_lut[i] = j;
}
- /* send virtchnnl ops to configure rss*/
+ /* send virtchnl ops to configure rss*/
ret = iavf_configure_rss_lut(adapter);
if (ret)
return ret;
@@ -831,7 +831,7 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
"vector %u are mapping to all Rx queues",
vf->msix_base);
} else {
- /* If Rx interrupt is reuquired, and we can use
+ /* If Rx interrupt is required, and we can use
* multi interrupts, then the vec is from 1
*/
vf->nb_msix =
@@ -1420,7 +1420,7 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
}
rte_memcpy(vf->rss_lut, lut, reta_size);
- /* send virtchnnl ops to configure rss*/
+ /* send virtchnl ops to configure rss*/
ret = iavf_configure_rss_lut(adapter);
if (ret) /* revert back */
rte_memcpy(vf->rss_lut, lut, reta_size);
@@ -69,7 +69,7 @@ struct iavf_security_session {
* 16B - 3
*
* but we also need the IV Length for TSO to correctly calculate the total
- * header length so placing it in the upper 6-bits here for easier reterival.
+ * header length so placing it in the upper 6-bits here for easier retrieval.
*/
static inline uint8_t
calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
@@ -448,7 +448,7 @@ sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
/**
* Send SA add virtual channel request to Inline IPsec driver.
*
- * Inline IPsec driver expects SPI and destination IP adderss to be in host
+ * Inline IPsec driver expects SPI and destination IP address to be in host
* order, but DPDK APIs are network order, therefore we need to do a htonl
* conversion of these parameters.
*/
@@ -726,7 +726,7 @@ iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
/**
* Send virtual channel security policy add request to IES driver.
*
- * IES driver expects SPI and destination IP adderss to be in host
+ * IES driver expects SPI and destination IP address to be in host
* order, but DPDK APIs are network order, therefore we need to do a htonl
* conversion of these parameters.
*/
@@ -994,7 +994,7 @@ iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter,
request->req_id = (uint16_t)0xDEADBEEF;
/**
- * SA delete supports deletetion of 1-8 specified SA's or if the flag
+ * SA delete supports deletion of 1-8 specified SA's or if the flag
* field is zero, all SA's associated with VF will be deleted.
*/
if (sess) {
@@ -1147,7 +1147,7 @@ iavf_ipsec_crypto_pkt_metadata_set(void *device,
md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
struct iavf_ipsec_crypto_pkt_metadata *);
- /* Set immutatable metadata values from session template */
+ /* Set immutable metadata values from session template */
memcpy(md, &iavf_sess->pkt_metadata_template,
sizeof(struct iavf_ipsec_crypto_pkt_metadata));
@@ -1355,7 +1355,7 @@ iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
capabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED;
/**
- * Iterate over each virtchl crypto capability by crypto type and
+ * Iterate over each virtchnl crypto capability by crypto type and
* algorithm.
*/
for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
@@ -1454,7 +1454,7 @@ iavf_ipsec_crypto_capabilities_get(void *device)
/**
* Update the security capabilities struct with the runtime discovered
* crypto capabilities, except for last element of the array which is
- * the null terminatation
+ * the null termination
*/
for (i = 0; i < ((sizeof(iavf_security_capabilities) /
sizeof(iavf_security_capabilities[0])) - 1); i++) {
@@ -73,7 +73,7 @@ enum iavf_ipsec_iv_len {
};
-/* IPsec Crypto Packet Metaday offload flags */
+/* IPsec Crypto Packet Metadata offload flags */
#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN (0x1 << 0)
#define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN (0x1 << 1)
#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS (0x1 << 2)
@@ -648,8 +648,8 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return -ENOMEM;
}
- /* Allocate the maximun number of RX ring hardware descriptor with
- * a liitle more to support bulk allocate.
+ /* Allocate the maximum number of RX ring hardware descriptor with
+ * a little more to support bulk allocate.
*/
len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
@@ -159,7 +159,7 @@ desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4],
l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);
/* then we shift left 1 bit */
l3_l4e = _mm_slli_epi32(l3_l4e, 1);
- /* we need to mask out the reduntant bits */
+ /* we need to mask out the redundant bits */
l3_l4e = _mm_and_si128(l3_l4e, cksum_mask);
vlan0 = _mm_or_si128(vlan0, rss);
@@ -613,7 +613,7 @@ _recv_raw_pkts_vec(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts,
_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb1);
desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
- /* C.4 calc avaialbe number of desc */
+ /* C.4 calc available number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
if (likely(var != IAVF_VPMD_DESCS_PER_LOOP))
@@ -461,7 +461,7 @@ iavf_check_api_version(struct iavf_adapter *adapter)
(vf->virtchnl_version.major == VIRTCHNL_VERSION_MAJOR_START &&
vf->virtchnl_version.minor < VIRTCHNL_VERSION_MINOR_START)) {
PMD_INIT_LOG(ERR, "VIRTCHNL API version should not be lower"
- " than (%u.%u) to support Adapative VF",
+ " than (%u.%u) to support Adaptive VF",
VIRTCHNL_VERSION_MAJOR_START,
VIRTCHNL_VERSION_MAJOR_START);
return -1;
@@ -1487,7 +1487,7 @@ iavf_fdir_check(struct iavf_adapter *adapter,
err = iavf_execute_vf_cmd(adapter, &args, 0);
if (err) {
- PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
+ PMD_DRV_LOG(ERR, "fail to check flow director rule");
return err;
}
@@ -864,7 +864,7 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw)
j = 0;
hw->rss_lut[i] = j;
}
- /* send virtchnnl ops to configure rss*/
+ /* send virtchnl ops to configure rss*/
ret = ice_dcf_configure_rss_lut(hw);
if (ret)
return ret;
@@ -203,7 +203,7 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
"vector %u are mapping to all Rx queues",
hw->msix_base);
} else {
- /* If Rx interrupt is reuquired, and we can use
+ /* If Rx interrupt is required, and we can use
* multi interrupts, then the vec is from 1
*/
hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
@@ -1264,7 +1264,7 @@ ice_handle_aq_msg(struct rte_eth_dev *dev)
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
@@ -1627,7 +1627,7 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
}
/* At the beginning, only TC0. */
- /* What we need here is the maximam number of the TX queues.
+ /* What we need here is the maximum number of the TX queues.
* Currently vsi->nb_qps means it.
* Correct it if any change.
*/
@@ -3576,7 +3576,7 @@ ice_dev_start(struct rte_eth_dev *dev)
goto rx_err;
}
- /* enable Rx interrput and mapping Rx queue to interrupt vector */
+ /* enable Rx interrupt and mapping Rx queue to interrupt vector */
if (ice_rxq_intr_setup(dev))
return -EIO;
@@ -3603,7 +3603,7 @@ ice_dev_start(struct rte_eth_dev *dev)
ice_dev_set_link_up(dev);
- /* Call get_link_info aq commond to enable/disable LSE */
+ /* Call get_link_info aq command to enable/disable LSE */
ice_link_update(dev, 0);
pf->adapter_stopped = false;
@@ -5395,7 +5395,7 @@ ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
count++;
}
- /* Get individiual stats from ice_hw_port struct */
+ /* Get individual stats from ice_hw_port struct */
for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
xstats[count].value =
*(uint64_t *)((char *)hw_stats +
@@ -5426,7 +5426,7 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
count++;
}
- /* Get individiual stats from ice_hw_port struct */
+ /* Get individual stats from ice_hw_port struct */
for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
sizeof(xstats_names[count].name));
@@ -1118,7 +1118,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
rxq->proto_xtr = pf->proto_xtr != NULL ?
pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
- /* Allocate the maximun number of RX ring hardware descriptor. */
+ /* Allocate the maximum number of RX ring hardware descriptor. */
len = ICE_MAX_RING_DESC;
/**
@@ -1248,7 +1248,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
tx_conf->tx_free_thresh :
ICE_DEFAULT_TX_FREE_THRESH);
- /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
+ /* force tx_rs_thresh to adapt an aggressive tx_free_thresh */
tx_rs_thresh =
(ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
@@ -1714,7 +1714,7 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
rxdp[i].read.pkt_addr = dma_addr;
}
- /* Update rx tail regsiter */
+ /* Update rx tail register */
ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
rxq->rx_free_trigger =
@@ -1976,7 +1976,7 @@ ice_recv_scattered_pkts(void *rx_queue,
* threshold of the queue, advance the Receive Descriptor Tail (RDT)
* register. Update the RDT with the value of the last processed RX
* descriptor minus 1, to guarantee that the RDT register is never
- * equal to the RDH register, which creates a "full" ring situtation
+ * equal to the RDH register, which creates a "full" ring situation
* from the hardware point of view.
*/
nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
@@ -3117,7 +3117,7 @@ tx_xmit_pkts(struct ice_tx_queue *txq,
ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
- /* Determin if RS bit needs to be set */
+ /* Determine if RS bit needs to be set */
if (txq->tx_tail > txq->tx_next_rs) {
txr[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
@@ -202,7 +202,7 @@ ice_rx_desc_to_olflags_v(struct ice_rx_queue *rxq, __m128i descs[4],
__m128i l3_l4_mask = _mm_set_epi32(~0x6, ~0x6, ~0x6, ~0x6);
__m128i l3_l4_flags = _mm_and_si128(flags, l3_l4_mask);
flags = _mm_or_si128(l3_l4_flags, l4_outer_flags);
- /* we need to mask out the reduntant bits introduced by RSS or
+ /* we need to mask out the redundant bits introduced by RSS or
* VLAN fields.
*/
flags = _mm_and_si128(flags, cksum_mask);
@@ -566,7 +566,7 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb0);
ice_rx_desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
- /* C.4 calc avaialbe number of desc */
+ /* C.4 calc available number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
if (likely(var != ICE_DESCS_PER_LOOP))
@@ -167,7 +167,7 @@ igc_tuple_filter_lookup(const struct igc_adapter *igc,
/* search the filter array */
for (; i < IGC_MAX_NTUPLE_FILTERS; i++) {
if (igc->ntuple_filters[i].hash_val) {
- /* compare the hase value */
+ /* compare the hash value */
if (ntuple->hash_val ==
igc->ntuple_filters[i].hash_val)
/* filter be found, return index */
@@ -2099,7 +2099,7 @@ eth_igc_tx_done_cleanup(void *txqueue, uint32_t free_cnt)
sw_ring[tx_id].mbuf = NULL;
sw_ring[tx_id].last_id = tx_id;
- /* Move to next segemnt. */
+ /* Move to next segment. */
tx_id = sw_ring[tx_id].next_id;
} while (tx_id != tx_next);
@@ -2133,7 +2133,7 @@ eth_igc_tx_done_cleanup(void *txqueue, uint32_t free_cnt)
* Walk the list and find the next mbuf, if any.
*/
do {
- /* Move to next segemnt. */
+ /* Move to next segment. */
tx_id = sw_ring[tx_id].next_id;
if (sw_ring[tx_id].mbuf)
@@ -2068,7 +2068,7 @@ typedef struct ionic_admin_comp ionic_fw_download_comp;
* enum ionic_fw_control_oper - FW control operations
* @IONIC_FW_RESET: Reset firmware
* @IONIC_FW_INSTALL: Install firmware
- * @IONIC_FW_ACTIVATE: Acticate firmware
+ * @IONIC_FW_ACTIVATE: Activate firmware
*/
enum ionic_fw_control_oper {
IONIC_FW_RESET = 0,
@@ -2091,7 +2091,7 @@ struct ionic_fw_control_cmd {
};
/**
- * struct ionic_fw_control_comp - Firmware control copletion
+ * struct ionic_fw_control_comp - Firmware control completion
* @status: Status of the command (enum ionic_status_code)
* @comp_index: Index in the descriptor ring for which this is the completion
* @slot: Slot where the firmware was installed
@@ -2878,7 +2878,7 @@ struct ionic_doorbell {
* and @identity->intr_coal_div to convert from
* usecs to device units:
*
- * coal_init = coal_usecs * coal_mutl / coal_div
+ * coal_init = coal_usecs * coal_mult / coal_div
*
* When an interrupt is sent the interrupt
* coalescing timer current value
@@ -483,7 +483,7 @@ static int ipn3ke_vswitch_probe(struct rte_afu_device *afu_dev)
RTE_CACHE_LINE_SIZE,
afu_dev->device.numa_node);
if (!hw) {
- IPN3KE_AFU_PMD_ERR("failed to allocate hardwart data");
+ IPN3KE_AFU_PMD_ERR("failed to allocate hardware data");
retval = -ENOMEM;
return -ENOMEM;
}
@@ -223,7 +223,7 @@ struct ipn3ke_hw_cap {
};
/**
- * Strucute to store private data for each representor instance
+ * Structure to store private data for each representor instance
*/
struct ipn3ke_rpst {
TAILQ_ENTRY(ipn3ke_rpst) next; /**< Next in device list. */
@@ -237,7 +237,7 @@ struct ipn3ke_rpst {
uint16_t i40e_pf_eth_port_id;
struct rte_eth_link ori_linfo;
struct ipn3ke_tm_internals tm;
- /**< Private data store of assocaiated physical function */
+ /**< Private data store of associated physical function */
struct rte_ether_addr mac_addr;
};
@@ -1299,7 +1299,7 @@ int ipn3ke_flow_init(void *dev)
IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_LKUP_ENABLE: %x\n", data);
- /* configure rx parse config, settings associatied with VxLAN */
+ /* configure rx parse config, settings associated with VxLAN */
IPN3KE_MASK_WRITE_REG(hw,
IPN3KE_CLF_RX_PARSE_CFG,
0,
@@ -2282,7 +2282,7 @@ ipn3ke_rpst_xstats_get
count++;
}
- /* Get individiual stats from ipn3ke_rpst_hw_port */
+ /* Get individual stats from ipn3ke_rpst_hw_port */
for (i = 0; i < IPN3KE_RPST_HW_PORT_XSTATS_CNT; i++) {
xstats[count].value = *(uint64_t *)(((char *)(&hw_stats)) +
ipn3ke_rpst_hw_port_strings[i].offset);
@@ -2290,7 +2290,7 @@ ipn3ke_rpst_xstats_get
count++;
}
- /* Get individiual stats from ipn3ke_rpst_rxq_pri */
+ /* Get individual stats from ipn3ke_rpst_rxq_pri */
for (i = 0; i < IPN3KE_RPST_RXQ_PRIO_XSTATS_CNT; i++) {
for (prio = 0; prio < IPN3KE_RPST_PRIO_XSTATS_CNT; prio++) {
xstats[count].value =
@@ -2302,7 +2302,7 @@ ipn3ke_rpst_xstats_get
}
}
- /* Get individiual stats from ipn3ke_rpst_txq_prio */
+ /* Get individual stats from ipn3ke_rpst_txq_prio */
for (i = 0; i < IPN3KE_RPST_TXQ_PRIO_XSTATS_CNT; i++) {
for (prio = 0; prio < IPN3KE_RPST_PRIO_XSTATS_CNT; prio++) {
xstats[count].value =
@@ -2340,7 +2340,7 @@ __rte_unused unsigned int limit)
count++;
}
- /* Get individiual stats from ipn3ke_rpst_hw_port */
+ /* Get individual stats from ipn3ke_rpst_hw_port */
for (i = 0; i < IPN3KE_RPST_HW_PORT_XSTATS_CNT; i++) {
snprintf(xstats_names[count].name,
sizeof(xstats_names[count].name),
@@ -2349,7 +2349,7 @@ __rte_unused unsigned int limit)
count++;
}
- /* Get individiual stats from ipn3ke_rpst_rxq_pri */
+ /* Get individual stats from ipn3ke_rpst_rxq_pri */
for (i = 0; i < IPN3KE_RPST_RXQ_PRIO_XSTATS_CNT; i++) {
for (prio = 0; prio < 8; prio++) {
snprintf(xstats_names[count].name,
@@ -2361,7 +2361,7 @@ __rte_unused unsigned int limit)
}
}
- /* Get individiual stats from ipn3ke_rpst_txq_prio */
+ /* Get individual stats from ipn3ke_rpst_txq_prio */
for (i = 0; i < IPN3KE_RPST_TXQ_PRIO_XSTATS_CNT; i++) {
for (prio = 0; prio < 8; prio++) {
snprintf(xstats_names[count].name,
@@ -8,7 +8,7 @@ if is_windows
endif
#
-# Add the experimenatal APIs called from this PMD
+# Add the experimental APIs called from this PMD
# rte_eth_switch_domain_alloc()
# rte_eth_dev_create()
# rte_eth_dev_destroy()
@@ -11,7 +11,7 @@
#define BYPASS_STATUS_OFF_MASK 3
-/* Macros to check for invlaid function pointers. */
+/* Macros to check for invalid function pointers. */
#define FUNC_PTR_OR_ERR_RET(func, retval) do { \
if ((func) == NULL) { \
PMD_DRV_LOG(ERR, "%s:%d function not supported", \
@@ -135,7 +135,7 @@ static s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status)
* ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang.
*
* If we send a write we can't be sure it took until we can read back
- * that same register. It can be a problem as some of the feilds may
+ * that same register. It can be a problem as some of the fields may
* for valid reasons change between the time wrote the register and
* we read it again to verify. So this function check everything we
* can check and then assumes it worked.
@@ -189,7 +189,7 @@ static bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg)
}
/**
- * ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter.
+ * ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Register.
*
* @hw: pointer to hardware structure
* @cmd: The control word we are setting.
@@ -2375,7 +2375,7 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
- /* multipe queue mode checking */
+ /* multiple queue mode checking */
ret = ixgbe_check_mq_mode(dev);
if (ret != 0) {
PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
@@ -2603,7 +2603,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
}
}
- /* confiugre msix for sleep until rx interrupt */
+ /* configure msix for sleep until rx interrupt */
ixgbe_configure_msix(dev);
/* initialize transmission unit */
@@ -2907,7 +2907,7 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
if (hw->mac.type == ixgbe_mac_82599EB) {
#ifdef RTE_LIBRTE_IXGBE_BYPASS
if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
- /* Not suported in bypass mode */
+ /* Not supported in bypass mode */
PMD_INIT_LOG(ERR, "Set link up is not supported "
"by device id 0x%x", hw->device_id);
return -ENOTSUP;
@@ -2938,7 +2938,7 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
if (hw->mac.type == ixgbe_mac_82599EB) {
#ifdef RTE_LIBRTE_IXGBE_BYPASS
if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
- /* Not suported in bypass mode */
+ /* Not supported in bypass mode */
PMD_INIT_LOG(ERR, "Set link down is not supported "
"by device id 0x%x", hw->device_id);
return -ENOTSUP;
@@ -4603,7 +4603,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
@@ -4659,7 +4659,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
@@ -5921,7 +5921,7 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
/* Configure all RX queues of VF */
for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
/* Force all queue use vector 0,
- * as IXGBE_VF_MAXMSIVECOTR = 1
+ * as IXGBE_VF_MAXMSIVECTOR = 1
*/
ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
rte_intr_vec_list_index_set(intr_handle, q_idx,
@@ -6256,7 +6256,7 @@ ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
* @param
* dev: Pointer to struct rte_eth_dev.
* index: the index the filter allocates.
- * filter: ponter to the filter that will be added.
+ * filter: pointer to the filter that will be added.
* rx_queue: the queue id the filter assigned to.
*
* @return
@@ -6872,7 +6872,7 @@ ixgbe_timesync_disable(struct rte_eth_dev *dev)
/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
- /* Stop incrementating the System Time registers. */
+ /* Stop incrementing the System Time registers. */
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
return 0;
@@ -68,7 +68,7 @@
#define IXGBE_LPBK_NONE 0x0 /* Default value. Loopback is disabled. */
#define IXGBE_LPBK_TX_RX 0x1 /* Tx->Rx loopback operation is enabled. */
/* X540-X550 specific loopback operations */
-#define IXGBE_MII_AUTONEG_ENABLE 0x1000 /* Auto-negociation enable (default = 1) */
+#define IXGBE_MII_AUTONEG_ENABLE 0x1000 /* Auto-negotiation enable (default = 1) */
#define IXGBE_MAX_JUMBO_FRAME_SIZE 0x2600 /* Maximum Jumbo frame size. */
@@ -390,7 +390,7 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
switch (info->mask.tunnel_type_mask) {
case 0:
- /* Mask turnnel type */
+ /* Mask tunnel type */
fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
break;
case 1:
@@ -135,7 +135,7 @@ const struct rte_flow_action *next_no_void_action(
}
/**
- * Please aware there's an asumption for all the parsers.
+ * Please aware there's an assumption for all the parsers.
* rte_flow_item is using big endian, rte_flow_attr and
* rte_flow_action are using CPU order.
* Because the pattern is used to describe the packets,
@@ -3261,7 +3261,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
/**
* Check if the flow rule is supported by ixgbe.
- * It only checkes the format. Don't guarantee the rule can be programmed into
+ * It only checks the format. Don't guarantee the rule can be programmed into
* the HW. Because there can be no enough room for the rule.
*/
static int
@@ -310,7 +310,7 @@ ixgbe_crypto_remove_sa(struct rte_eth_dev *dev,
return -1;
}
- /* Disable and clear Rx SPI and key table table entryes*/
+ /* Disable and clear Rx SPI and key table table entries*/
reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | (sa_index << 3);
IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
@@ -242,7 +242,7 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
/* PFDMA Tx General Switch Control Enables VMDQ loopback */
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
- /* clear VMDq map to perment rar 0 */
+ /* clear VMDq map to permanent rar 0 */
hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
/* clear VMDq map to scan rar 127 */
@@ -1954,7 +1954,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* register.
* Update the RDT with the value of the last processed RX descriptor
* minus 1, to guarantee that the RDT register is never equal to the
- * RDH register, which creates a "full" ring situtation from the
+ * RDH register, which creates a "full" ring situation from the
* hardware point of view...
*/
nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
@@ -2303,7 +2303,7 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
* register.
* Update the RDT with the value of the last processed RX descriptor
* minus 1, to guarantee that the RDT register is never equal to the
- * RDH register, which creates a "full" ring situtation from the
+ * RDH register, which creates a "full" ring situation from the
* hardware point of view...
*/
if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
@@ -2666,7 +2666,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
*/
tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
- /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
+ /* force tx_rs_thresh to adapt an aggressive tx_free_thresh */
tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ?
nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH;
if (tx_conf->tx_rs_thresh > 0)
@@ -4831,7 +4831,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
dev->data->port_id);
dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
} else {
- PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
+ PMD_INIT_LOG(DEBUG, "Using Regular (non-vector, "
"single allocation) "
"Scattered Rx callback "
"(port=%d).",
@@ -5170,7 +5170,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
/*
* Setup the Checksum Register.
* Disable Full-Packet Checksum which is mutually exclusive with RSS.
- * Enable IP/L4 checkum computation by hardware if requested to do so.
+ * Enable IP/L4 checksum computation by hardware if requested to do so.
*/
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
rxcsum |= IXGBE_RXCSUM_PCSD;
@@ -562,7 +562,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
desc_to_ptype_v(descs, rxq->pkt_type_mask, &rx_pkts[pos]);
- /* C.4 calc avaialbe number of desc */
+ /* C.4 calc available number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
@@ -726,7 +726,7 @@ memif_msg_receive(struct memif_control_channel *cc)
break;
case MEMIF_MSG_TYPE_INIT:
/*
- * This cc does not have an interface asociated with it.
+ * This cc does not have an interface associated with it.
* If suitable interface is found it will be assigned here.
*/
ret = memif_msg_receive_init(cc, &msg);
@@ -1026,7 +1026,7 @@ memif_regions_init(struct rte_eth_dev *dev)
if (ret < 0)
return ret;
} else {
- /* create one memory region contaning rings and buffers */
+ /* create one memory region containing rings and buffers */
ret = memif_region_init_shm(dev, /* has buffers */ 1);
if (ret < 0)
return ret;
@@ -74,7 +74,7 @@ enum mlx4_mp_req_type {
MLX4_MP_REQ_STOP_RXTX,
};
-/* Pameters for IPC. */
+/* Parameters for IPC. */
struct mlx4_mp_param {
enum mlx4_mp_req_type type;
int port_id;
@@ -752,7 +752,7 @@ mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
* Pointer to Ethernet device structure.
*
* @return
- * alwasy 0 on success
+ * always 0 on success
*/
int
mlx4_stats_reset(struct rte_eth_dev *dev)
@@ -112,7 +112,7 @@ static struct mlx5_indexed_pool_config icfg[] = {
* Pointer to RQ channel object, which includes the channel fd
*
* @param[out] fd
- * The file descriptor (representing the intetrrupt) used in this channel.
+ * The file descriptor (representing the interrupt) used in this channel.
*
* @return
* 0 on successfully setting the fd to non-blocking, non-zero otherwise.
@@ -1743,7 +1743,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
priv->drop_queue.hrxq = mlx5_drop_action_create(eth_dev);
if (!priv->drop_queue.hrxq)
goto error;
- /* Port representor shares the same max prioirity with pf port. */
+ /* Port representor shares the same max priority with pf port. */
if (!priv->sh->flow_priority_check_flag) {
/* Supported Verbs flow priority number detection. */
err = mlx5_flow_discover_priorities(eth_dev);
@@ -2300,7 +2300,7 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
/*
* Force standalone bonding
* device for ROCE LAG
- * confgiurations.
+ * configurations.
*/
list[ns].info.master = 0;
list[ns].info.representor = 0;
@@ -2637,7 +2637,7 @@ mlx5_os_pci_probe(struct mlx5_common_device *cdev)
}
if (ret) {
DRV_LOG(ERR, "Probe of PCI device " PCI_PRI_FMT " "
- "aborted due to proding failure of PF %u",
+ "aborted due to prodding failure of PF %u",
pci_dev->addr.domain, pci_dev->addr.bus,
pci_dev->addr.devid, pci_dev->addr.function,
eth_da.ports[p]);
@@ -1642,7 +1642,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
/*
* Free the shared context in last turn, because the cleanup
* routines above may use some shared fields, like
- * mlx5_os_mac_addr_flush() uses ibdev_path for retrieveing
+ * mlx5_os_mac_addr_flush() uses ibdev_path for retrieving
* ifindex if Netlink fails.
*/
mlx5_free_shared_dev_ctx(priv->sh);
@@ -1962,7 +1962,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
if (tmp != MLX5_RCM_NONE &&
tmp != MLX5_RCM_LIGHT &&
tmp != MLX5_RCM_AGGR) {
- DRV_LOG(ERR, "Unrecognize %s: \"%s\"", key, val);
+ DRV_LOG(ERR, "Unrecognized %s: \"%s\"", key, val);
rte_errno = EINVAL;
return -rte_errno;
}
@@ -2177,17 +2177,17 @@ mlx5_set_metadata_mask(struct rte_eth_dev *dev)
break;
}
if (sh->dv_mark_mask && sh->dv_mark_mask != mark)
- DRV_LOG(WARNING, "metadata MARK mask mismatche %08X:%08X",
+ DRV_LOG(WARNING, "metadata MARK mask mismatch %08X:%08X",
sh->dv_mark_mask, mark);
else
sh->dv_mark_mask = mark;
if (sh->dv_meta_mask && sh->dv_meta_mask != meta)
- DRV_LOG(WARNING, "metadata META mask mismatche %08X:%08X",
+ DRV_LOG(WARNING, "metadata META mask mismatch %08X:%08X",
sh->dv_meta_mask, meta);
else
sh->dv_meta_mask = meta;
if (sh->dv_regc0_mask && sh->dv_regc0_mask != reg_c0)
- DRV_LOG(WARNING, "metadata reg_c0 mask mismatche %08X:%08X",
+ DRV_LOG(WARNING, "metadata reg_c0 mask mismatch %08X:%08X",
sh->dv_meta_mask, reg_c0);
else
sh->dv_regc0_mask = reg_c0;
@@ -977,7 +977,7 @@ struct mlx5_flow_id_pool {
uint32_t base_index;
/**< The next index that can be used without any free elements. */
uint32_t *curr; /**< Pointer to the index to pop. */
- uint32_t *last; /**< Pointer to the last element in the empty arrray. */
+ uint32_t *last; /**< Pointer to the last element in the empty array. */
uint32_t max_id; /**< Maximum id can be allocated from the pool. */
};
@@ -1014,7 +1014,7 @@ struct mlx5_dev_txpp {
void *pp; /* Packet pacing context. */
uint16_t pp_id; /* Packet pacing context index. */
uint16_t ts_n; /* Number of captured timestamps. */
- uint16_t ts_p; /* Pointer to statisticks timestamp. */
+ uint16_t ts_p; /* Pointer to statistics timestamp. */
struct mlx5_txpp_ts *tsa; /* Timestamps sliding window stats. */
struct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */
uint32_t sync_lost:1; /* ci/timestamp synchronization lost. */
@@ -1118,7 +1118,7 @@ struct mlx5_flex_parser_devx {
uint32_t sample_ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
};
-/* Pattern field dscriptor - how to translate flex pattern into samples. */
+/* Pattern field descriptor - how to translate flex pattern into samples. */
__extension__
struct mlx5_flex_pattern_field {
uint16_t width:6;
@@ -1169,7 +1169,7 @@ struct mlx5_dev_ctx_shared {
/* Shared DV/DR flow data section. */
uint32_t dv_meta_mask; /* flow META metadata supported mask. */
uint32_t dv_mark_mask; /* flow MARK metadata supported mask. */
- uint32_t dv_regc0_mask; /* available bits of metatada reg_c[0]. */
+ uint32_t dv_regc0_mask; /* available bits of metadata reg_c[0]. */
void *fdb_domain; /* FDB Direct Rules name space handle. */
void *rx_domain; /* RX Direct Rules name space handle. */
void *tx_domain; /* TX Direct Rules name space handle. */
@@ -1206,7 +1206,7 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
}
/**
- * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the device
* flow.
*
* @param[in] dev
@@ -3008,7 +3008,7 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK)
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Geneve TLV opt length exceeeds the limit (31)");
+ "Geneve TLV opt length exceeds the limit (31)");
/* Check if class type and length masks are full. */
if (full_mask.option_class != mask->option_class ||
full_mask.option_type != mask->option_type ||
@@ -3957,7 +3957,7 @@ find_graph_root(uint32_t rss_level)
* subflow.
*
* @param[in] dev_flow
- * Pointer the created preifx subflow.
+ * Pointer the created prefix subflow.
*
* @return
* The layers get from prefix subflow.
@@ -4284,7 +4284,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx)
[3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
};
- /* Fill the register fileds in the flow. */
+ /* Fill the register fields in the flow. */
ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
if (ret < 0)
return NULL;
@@ -4353,7 +4353,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx)
/*
* The copy Flows are not included in any list. There
* ones are referenced from other Flows and can not
- * be applied, removed, deleted in ardbitrary order
+ * be applied, removed, deleted in arbitrary order
* by list traversing.
*/
mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP,
@@ -4810,7 +4810,7 @@ flow_create_split_inner(struct rte_eth_dev *dev,
/*
* If dev_flow is as one of the suffix flow, some actions in suffix
* flow may need some user defined item layer flags, and pass the
- * Metadate rxq mark flag to suffix flow as well.
+ * Metadata rxq mark flag to suffix flow as well.
*/
if (flow_split_info->prefix_layers)
dev_flow->handle->layers = flow_split_info->prefix_layers;
@@ -5359,7 +5359,7 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
* @param[out] error
* Perform verbose error reporting if not NULL.
* @param[in] encap_idx
- * The encap action inndex.
+ * The encap action index.
*
* @return
* 0 on success, negative value otherwise
@@ -6884,7 +6884,7 @@ flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
* @param type
* Flow type to be flushed.
* @param active
- * If flushing is called avtively.
+ * If flushing is called actively.
*/
void
mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,
@@ -8531,7 +8531,7 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
* @return
- * 0 on success, a nagative value otherwise.
+ * 0 on success, a negative value otherwise.
*/
int
mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
@@ -9009,7 +9009,7 @@ mlx5_get_tof(const struct rte_flow_item *item,
}
/**
- * tunnel offload functionalilty is defined for DV environment only
+ * tunnel offload functionality is defined for DV environment only
*/
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
__extension__
@@ -598,7 +598,7 @@ struct mlx5_flow_tbl_data_entry {
const struct mlx5_flow_tunnel *tunnel;
uint32_t group_id;
uint32_t external:1;
- uint32_t tunnel_offload:1; /* Tunnel offlod table or not. */
+ uint32_t tunnel_offload:1; /* Tunnel offload table or not. */
uint32_t is_egress:1; /**< Egress table. */
uint32_t is_transfer:1; /**< Transfer table. */
uint32_t dummy:1; /**< DR table. */
@@ -696,8 +696,8 @@ struct mlx5_flow_handle {
/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
void *drv_flow; /**< pointer to driver flow object. */
uint32_t split_flow_id:27; /**< Sub flow unique match flow id. */
- uint32_t is_meter_flow_id:1; /**< Indate if flow_id is for meter. */
- uint32_t mark:1; /**< Metadate rxq mark flag. */
+ uint32_t is_meter_flow_id:1; /**< Indicate if flow_id is for meter. */
+ uint32_t mark:1; /**< Metadata rxq mark flag. */
uint32_t fate_action:3; /**< Fate action type. */
uint32_t flex_item; /**< referenced Flex Item bitmask. */
union {
@@ -2032,7 +2032,7 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
if (reg == REG_NON)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
- "unavalable extended metadata register");
+ "unavailable extended metadata register");
if (reg == REG_B)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -3205,7 +3205,7 @@ flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
if (reg == REG_NON)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
- "unavalable extended metadata register");
+ "unavailable extended metadata register");
if (reg != REG_A && reg != REG_B) {
struct mlx5_priv *priv = dev->data->dev_private;
@@ -5145,7 +5145,7 @@ flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
* Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
@@ -7858,7 +7858,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
* - Explicit decap action is prohibited by the tunnel offload API.
* - Drop action in tunnel steer rule is prohibited by the API.
* - Application cannot use MARK action because it's value can mask
- * tunnel default miss nitification.
+ * tunnel default miss notification.
* - JUMP in tunnel match rule has no support in current PMD
* implementation.
* - TAG & META are reserved for future uses.
@@ -9184,7 +9184,7 @@ flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
geneve_opt_v->option_type &&
geneve_opt_resource->length ==
geneve_opt_v->option_len) {
- /* We already have GENVE TLV option obj allocated. */
+ /* We already have GENEVE TLV option obj allocated. */
__atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
__ATOMIC_RELAXED);
} else {
@@ -10226,7 +10226,7 @@ __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
* Check flow matching criteria first, subtract misc5/4 length if flow
* doesn't own misc5/4 parameters. In some old rdma-core releases,
* misc5/4 are not supported, and matcher creation failure is expected
- * w/o subtration. If misc5 is provided, misc4 must be counted in since
+ * w/o subtraction. If misc5 is provided, misc4 must be counted in since
* misc5 is right after misc4.
*/
if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
@@ -11425,7 +11425,7 @@ flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx)
goto error;
}
}
- /* create a dest array actioin */
+ /* create a dest array action */
ret = mlx5_os_flow_dr_create_flow_action_dest_array
(domain,
resource->num_of_dest,
@@ -205,7 +205,7 @@ mlx5_flex_set_match_sample(void *misc4_m, void *misc4_v,
* @param dev
* Ethernet device to translate flex item on.
* @param[in, out] matcher
- * Flow matcher to confgiure
+ * Flow matcher to configure
* @param[in, out] key
* Flow matcher value.
* @param[in] item
@@ -457,7 +457,7 @@ mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr,
if (field->offset_shift > 15 || field->offset_shift < 0)
return rte_flow_error_set
(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
- "header length field shift exceeeds limit");
+ "header length field shift exceeds limit");
node->header_length_field_shift = field->offset_shift;
node->header_length_field_offset = field->offset_base;
}
@@ -251,7 +251,7 @@ mlx5_flow_meter_xir_man_exp_calc(int64_t xir, uint8_t *man, uint8_t *exp)
uint8_t _exp = 0;
uint64_t m, e;
- /* Special case xir == 0 ? both exp and matissa are 0. */
+ /* Special case xir == 0 ? both exp and mantissa are 0. */
if (xir == 0) {
*man = 0;
*exp = 0;
@@ -287,7 +287,7 @@ mlx5_flow_meter_xbs_man_exp_calc(uint64_t xbs, uint8_t *man, uint8_t *exp)
int _exp;
double _man;
- /* Special case xbs == 0 ? both exp and matissa are 0. */
+ /* Special case xbs == 0 ? both exp and mantissa are 0. */
if (xbs == 0) {
*man = 0;
*exp = 0;
@@ -305,7 +305,7 @@ mlx5_flow_meter_xbs_man_exp_calc(uint64_t xbs, uint8_t *man, uint8_t *exp)
* Fill the prm meter parameter.
*
* @param[in,out] fmp
- * Pointer to meter profie to be converted.
+ * Pointer to meter profile to be converted.
* @param[out] error
* Pointer to the error structure.
*
@@ -1101,7 +1101,7 @@ mlx5_flow_meter_action_modify(struct mlx5_priv *priv,
if (ret)
return ret;
}
- /* Update succeedded modify meter parameters. */
+ /* Update succeeded modify meter parameters. */
if (modify_bits & MLX5_FLOW_METER_OBJ_MODIFY_FIELD_ACTIVE)
fm->active_state = !!active_state;
}
@@ -1615,7 +1615,7 @@ mlx5_flow_meter_profile_update(struct rte_eth_dev *dev,
return -rte_mtr_error_set(error, -ret,
RTE_MTR_ERROR_TYPE_MTR_PARAMS,
NULL, "Failed to update meter"
- " parmeters in hardware.");
+ " parameters in hardware.");
}
old_fmp->ref_cnt--;
fmp->ref_cnt++;
@@ -178,7 +178,7 @@ mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
* Pointer to the device structure.
*
* @param rx_queue_id
- * Rx queue identificatior.
+ * Rx queue identification.
*
* @param mode
* Pointer to the burts mode information.
@@ -2152,7 +2152,7 @@ mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx)
* Number of queues in the array.
*
* @return
- * 1 if all queues in indirection table match 0 othrwise.
+ * 1 if all queues in indirection table match 0 otherwise.
*/
static int
mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl,
@@ -2586,7 +2586,7 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,
if (hrxq->standalone) {
/*
* Replacement of indirection table unsupported for
- * stanalone hrxq objects (used by shared RSS).
+ * standalone hrxq objects (used by shared RSS).
*/
rte_errno = ENOTSUP;
return -rte_errno;
@@ -1230,7 +1230,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
uint32_t mask = rxq->flow_meta_port_mask;
uint32_t metadata;
- /* This code is subject for futher optimization. */
+ /* This code is subject for further optimization. */
metadata = rte_be_to_cpu_32
(cq[pos].flow_table_metadata) & mask;
*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
@@ -839,7 +839,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
}
}
if (rxq->dynf_meta) {
- /* This code is subject for futher optimization. */
+ /* This code is subject for further optimization. */
int32_t offs = rxq->flow_meta_offset;
uint32_t mask = rxq->flow_meta_port_mask;
@@ -772,7 +772,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
}
}
if (rxq->dynf_meta) {
- /* This code is subject for futher optimization. */
+ /* This code is subject for further optimization. */
int32_t offs = rxq->flow_meta_offset;
uint32_t mask = rxq->flow_meta_port_mask;
@@ -728,7 +728,7 @@ mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
* Pointer to the device structure.
*
* @param tx_queue_id
- * Tx queue identificatior.
+ * Tx queue identification.
*
* @param mode
* Pointer to the burts mode information.
@@ -55,7 +55,7 @@ extern int mlx5_logtype;
/*
* For the case which data is linked with sequence increased index, the
- * array table will be more efficiect than hash table once need to serarch
+ * array table will be more efficient than hash table once need to search
* one data entry in large numbers of entries. Since the traditional hash
* tables has fixed table size, when huge numbers of data saved to the hash
* table, it also comes lots of hash conflict.
@@ -400,7 +400,7 @@ mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data)
/*
* set_specific_workspace when current value is NULL
* can happen only once per thread, mark this thread in
- * linked list to be able to release reasorces later on.
+ * linked list to be able to release resources later on.
*/
err = mlx5_add_workspace_to_list(data);
if (err) {
@@ -226,7 +226,7 @@ mlx5_os_free_shared_dr(struct mlx5_priv *priv)
* Pointer to RQ channel object, which includes the channel fd
*
* @param[out] fd
- * The file descriptor (representing the intetrrupt) used in this channel.
+ * The file descriptor (representing the interrupt) used in this channel.
*
* @return
* 0 on successfully setting the fd to non-blocking, non-zero otherwise.
@@ -247,7 +247,7 @@ mvneta_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
(mru + MRVL_NETA_PKT_OFFS > mbuf_data_size)) {
mru = mbuf_data_size - MRVL_NETA_PKT_OFFS;
mtu = MRVL_NETA_MRU_TO_MTU(mru);
- MVNETA_LOG(WARNING, "MTU too big, max MTU possible limitted by"
+ MVNETA_LOG(WARNING, "MTU too big, max MTU possible limited by"
" current mbuf size: %u. Set MTU to %u, MRU to %u",
mbuf_data_size, mtu, mru);
}
@@ -579,7 +579,7 @@ mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
if (mru - RTE_ETHER_CRC_LEN + MRVL_PKT_OFFS > mbuf_data_size) {
mru = mbuf_data_size + RTE_ETHER_CRC_LEN - MRVL_PKT_OFFS;
mtu = MRVL_PP2_MRU_TO_MTU(mru);
- MRVL_LOG(WARNING, "MTU too big, max MTU possible limitted "
+ MRVL_LOG(WARNING, "MTU too big, max MTU possible limited "
"by current mbuf size: %u. Set MTU to %u, MRU to %u",
mbuf_data_size, mtu, mru);
}
@@ -301,7 +301,7 @@ get_entry_values(const char *entry, uint8_t *tab,
}
/**
- * Parse Traffic Class'es mapping configuration.
+ * Parse Traffic Classes mapping configuration.
*
* @param file Config file handle.
* @param port Which port to look for.
@@ -736,7 +736,7 @@ mrvl_get_cfg(const char *key __rte_unused, const char *path, void *extra_args)
/* MRVL_TOK_START_HDR replaces MRVL_TOK_DSA_MODE parameter.
* MRVL_TOK_DSA_MODE will be supported for backward
- * compatibillity.
+ * compatibility.
*/
entry = rte_cfgfile_get_entry(file, sec_name,
MRVL_TOK_START_HDR);
@@ -229,7 +229,7 @@ hn_nvs_conn_rxbuf(struct hn_data *hv)
hv->rxbuf_section_cnt = resp.nvs_sect[0].slotcnt;
/*
- * Pimary queue's rxbuf_info is not allocated at creation time.
+ * Primary queue's rxbuf_info is not allocated at creation time.
* Now we can allocate it after we figure out the slotcnt.
*/
hv->primary->rxbuf_info = rte_calloc("HN_RXBUF_INFO",
@@ -578,7 +578,7 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb,
rte_iova_t iova;
/*
- * Build an external mbuf that points to recveive area.
+ * Build an external mbuf that points to receive area.
* Use refcount to handle multiple packets in same
* receive buffer section.
*/
@@ -1031,7 +1031,7 @@ hn_dev_rx_queue_count(void *rx_queue)
* returns:
* - -EINVAL - offset outside of ring
* - RTE_ETH_RX_DESC_AVAIL - no data available yet
- * - RTE_ETH_RX_DESC_DONE - data is waiting in stagin ring
+ * - RTE_ETH_RX_DESC_DONE - data is waiting in staging ring
*/
int hn_dev_rx_queue_status(void *arg, uint16_t offset)
{
@@ -103,7 +103,7 @@ static void hn_remove_delayed(void *args)
struct rte_device *dev = rte_eth_devices[port_id].device;
int ret;
- /* Tell VSP to switch data path to synthentic */
+ /* Tell VSP to switch data path to synthetic */
hn_vf_remove(hv);
PMD_DRV_LOG(NOTICE, "Start to remove port %d", port_id);
@@ -63,7 +63,7 @@
* Wildcard indicating a CPP read or write action
*
* The action used will be either read or write depending on whether a read or
- * write instruction/call is performed on the NFP_CPP_ID. It is recomended that
+ * write instruction/call is performed on the NFP_CPP_ID. It is recommended that
* the RW action is used even if all actions to be performed on a NFP_CPP_ID are
* known to be only reads or writes. Doing so will in many cases save NFP CPP
* internal software resources.
@@ -405,7 +405,7 @@ int nfp_idstr2meid(int chip_family, const char *s, const char **endptr);
* @param chip_family Chip family ID
* @param s A string of format "iX.anything" or "iX"
* @param endptr If non-NULL, *endptr will point to the trailing
- * striong after the ME ID part of the string, which
+ * string after the ME ID part of the string, which
* is either an empty string or the first character
* after the separating period.
* @return The island ID on succes, -1 on error.
@@ -425,7 +425,7 @@ int nfp_idstr2island(int chip_family, const char *s, const char **endptr);
* @param chip_family Chip family ID
* @param s A string of format "meX.anything" or "meX"
* @param endptr If non-NULL, *endptr will point to the trailing
- * striong after the ME ID part of the string, which
+ * string after the ME ID part of the string, which
* is either an empty string or the first character
* after the separating period.
* @return The ME number on succes, -1 on error.
@@ -202,7 +202,7 @@ nfp_cpp_area_alloc(struct nfp_cpp *cpp, uint32_t dest,
* @address: start address on CPP target
* @size: size of area
*
- * Allocate and initilizae a CPP area structure, and lock it down so
+ * Allocate and initialize a CPP area structure, and lock it down so
* that it can be accessed directly.
*
* NOTE: @address and @size must be 32-bit aligned values.
@@ -272,7 +272,7 @@ int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes);
* @br_primary: branch id of primary bootloader
* @br_secondary: branch id of secondary bootloader
* @br_nsp: branch id of NSP
- * @primary: version of primarary bootloader
+ * @primary: version of primary bootloader
* @secondary: version id of secondary bootloader
* @nsp: version id of NSP
* @sensor_mask: mask of present sensors available on NIC
@@ -207,7 +207,7 @@ nfp_resource_acquire(struct nfp_cpp *cpp, const char *name)
* nfp_resource_release() - Release a NFP Resource handle
* @res: NFP Resource handle
*
- * NOTE: This function implictly unlocks the resource handle
+ * NOTE: This function implicitly unlocks the resource handle
*/
void
nfp_resource_release(struct nfp_resource *res)
@@ -236,7 +236,7 @@ nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name)
* nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol
* @rtbl: NFP RTsym table
* @name: Symbol name
- * @error: Poniter to error code (optional)
+ * @error: Pointer to error code (optional)
*
* Lookup a symbol, map, read it and return it's value. Value of the symbol
* will be interpreted as a simple little-endian unsigned value. Symbol can
@@ -983,7 +983,7 @@ ngbe_dev_start(struct rte_eth_dev *dev)
}
}
- /* confiugre MSI-X for sleep until Rx interrupt */
+ /* configure MSI-X for sleep until Rx interrupt */
ngbe_configure_msix(dev);
/* initialize transmission unit */
@@ -2641,7 +2641,7 @@ ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
wr32(hw, NGBE_IVARMISC, tmp);
} else {
/* rx or tx causes */
- /* Workround for ICR lost */
+ /* Workaround for ICR lost */
idx = ((16 * (queue & 1)) + (8 * direction));
tmp = rd32(hw, NGBE_IVAR(queue >> 1));
tmp &= ~(0xFF << idx);
@@ -2893,7 +2893,7 @@ ngbe_timesync_disable(struct rte_eth_dev *dev)
/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0);
- /* Stop incrementating the System Time registers. */
+ /* Stop incrementing the System Time registers. */
wr32(hw, NGBE_TSTIMEINC, 0);
return 0;
@@ -163,7 +163,7 @@ int ngbe_pf_host_configure(struct rte_eth_dev *eth_dev)
wr32(hw, NGBE_PSRCTL, NGBE_PSRCTL_LBENA);
- /* clear VMDq map to perment rar 0 */
+ /* clear VMDq map to permanent rar 0 */
hw->mac.clear_vmdq(hw, 0, BIT_MASK32);
/* clear VMDq map to scan rar 31 */
@@ -1090,7 +1090,7 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
/* Verify queue index */
if (qidx >= dev->data->nb_rx_queues) {
- octeontx_log_err("QID %d not supporteded (0 - %d available)\n",
+ octeontx_log_err("QID %d not supported (0 - %d available)\n",
qidx, (dev->data->nb_rx_queues - 1));
return -ENOTSUP;
}
@@ -369,7 +369,7 @@ oxt2_nix_register_cq_irqs(struct rte_eth_dev *eth_dev)
"rc=%d", rc);
return rc;
}
- /* VFIO vector zero is resereved for misc interrupt so
+ /* VFIO vector zero is reserved for misc interrupt so
* doing required adjustment. (b13bfab4cd)
*/
if (rte_intr_vec_list_index_set(handle, q,
@@ -440,7 +440,7 @@ otx2_nix_read_clock(struct rte_eth_dev *eth_dev, uint64_t *clock)
/* This API returns the raw PTP HI clock value. Since LFs doesn't
* have direct access to PTP registers and it requires mbox msg
* to AF for this value. In fastpath reading this value for every
- * packet (which involes mbox call) becomes very expensive, hence
+ * packet (which involves mbox call) becomes very expensive, hence
* we should be able to derive PTP HI clock value from tsc by
* using freq_mult and clk_delta calculated during configure stage.
*/
@@ -61,7 +61,7 @@ otx2_nix_xmit_prepare_tstamp(uint64_t *cmd, const uint64_t *send_mem_desc,
/* Retrieving the default desc values */
cmd[off] = send_mem_desc[6];
- /* Using compiler barier to avoid voilation of C
+ /* Using compiler barrier to avoid violation of C
* aliasing rules.
*/
rte_compiler_barrier();
@@ -70,7 +70,7 @@ otx2_nix_xmit_prepare_tstamp(uint64_t *cmd, const uint64_t *send_mem_desc,
/* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, tx tstamp
* should not be recorded, hence changing the alg type to
* NIX_SENDMEMALG_SET and also changing send mem addr field to
- * next 8 bytes as it corrpt the actual tx tstamp registered
+ * next 8 bytes as it corrupts the actual tx tstamp registered
* address.
*/
send_mem->alg = NIX_SENDMEMALG_SETTSTMP - (is_ol_tstamp);
@@ -953,7 +953,7 @@ static void nix_vlan_reinstall_vlan_filters(struct rte_eth_dev *eth_dev)
struct vlan_entry *entry;
int rc;
- /* VLAN filters can't be set without setting filtern on */
+ /* VLAN filters can't be set without setting filters on */
rc = nix_vlan_handle_default_rx_entry(eth_dev, false, true, true);
if (rc) {
otx2_err("Failed to reinstall vlan filters");
@@ -104,7 +104,7 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
iq->inst_cnt_reg = (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_IN_CNTS(iq_no);
- otx_ep_dbg("InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p",
+ otx_ep_dbg("InstQ[%d]:dbell reg @ 0x%p inst_cnt_reg @ 0x%p",
iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
do {
@@ -117,7 +117,7 @@ otx_ep_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
iq->inst_cnt_reg = (uint8_t *)otx_ep->hw_addr +
OTX_EP_R_IN_CNTS(iq_no);
- otx_ep_dbg("InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
+ otx_ep_dbg("InstQ[%d]:dbell reg @ 0x%p inst_cnt_reg @ 0x%p\n",
iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
do {
@@ -769,7 +769,7 @@ pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id)
if (eth_dev == NULL)
return -ENOMEM;
- /* Extract pltform data */
+ /* Extract platform data */
pfe_info = (struct ls1012a_pfe_platform_data *)&pfe->platform_data;
if (!pfe_info) {
PFE_PMD_ERR("pfe missing additional platform data");
@@ -187,7 +187,7 @@ gemac_set_mode(void *base, __rte_unused int mode)
{
u32 val = readl(base + EMAC_RCNTRL_REG);
- /*Remove loopbank*/
+ /*Remove loopback*/
val &= ~EMAC_RCNTRL_LOOP;
/*Enable flow control and MII mode*/
@@ -114,9 +114,9 @@ pfe_hif_init_buffers(struct pfe_hif *hif)
* results, eth id, queue id from PFE block along with data.
* so we have to provide additional memory for each packet to
* HIF rx rings so that PFE block can write its headers.
- * so, we are giving the data pointor to HIF rings whose
+ * so, we are giving the data pointer to HIF rings whose
* calculation is as below:
- * mbuf->data_pointor - Required_header_size
+ * mbuf->data_pointer - Required_header_size
*
* We are utilizing the HEADROOM area to receive the PFE
* block headers. On packet reception, HIF driver will use
@@ -8,7 +8,7 @@
#define HIF_CLIENT_QUEUES_MAX 16
#define HIF_RX_PKT_MIN_SIZE RTE_CACHE_LINE_SIZE
/*
- * HIF_TX_DESC_NT value should be always greter than 4,
+ * HIF_TX_DESC_NT value should be always greater than 4,
* Otherwise HIF_TX_POLL_MARK will become zero.
*/
#define HIF_RX_DESC_NT 64
@@ -38,7 +38,7 @@ pfe_hif_shm_clean(struct hif_shm *hif_shm)
* This function should be called before initializing HIF driver.
*
* @param[in] hif_shm Shared memory address location in DDR
- * @rerurn 0 - on succes, <0 on fail to initialize
+ * @return 0 - on succes, <0 on fail to initialize
*/
int
pfe_hif_shm_init(struct hif_shm *hif_shm, struct rte_mempool *mb_pool)
@@ -109,9 +109,9 @@ hif_lib_client_release_rx_buffers(struct hif_client_s *client)
for (ii = 0; ii < client->rx_q[qno].size; ii++) {
buf = (void *)desc->data;
if (buf) {
- /* Data pointor to mbuf pointor calculation:
+ /* Data pointer to mbuf pointer calculation:
* "Data - User private data - headroom - mbufsize"
- * Actual data pointor given to HIF BDs was
+ * Actual data pointer given to HIF BDs was
* "mbuf->data_offset - PFE_PKT_HEADER_SZ"
*/
buf = buf + PFE_PKT_HEADER_SZ
@@ -477,7 +477,7 @@ hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int
client_id, unsigned int qno,
u32 client_ctrl)
{
- /* Optimize the write since the destinaton may be non-cacheable */
+ /* Optimize the write since the destination may be non-cacheable */
if (!((unsigned long)pkt_hdr & 0x3)) {
((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |
client_id;
@@ -5983,7 +5983,7 @@ static char *qed_get_buf_ptr(void *buf, u32 offset)
/* Reads a param from the specified buffer. Returns the number of dwords read.
* If the returned str_param is NULL, the param is numeric and its value is
* returned in num_param.
- * Otheriwise, the param is a string and its pointer is returned in str_param.
+ * Otherwise, the param is a string and its pointer is returned in str_param.
*/
static u32 qed_read_param(u32 *dump_buf,
const char **param_name,
@@ -7558,7 +7558,7 @@ static enum dbg_status format_feature(struct ecore_hwfn *p_hwfn,
text_buf[i] = '\n';
- /* Free the old dump_buf and point the dump_buf to the newly allocagted
+ /* Free the old dump_buf and point the dump_buf to the newly allocated
* and formatted text buffer.
*/
OSAL_VFREE(p_hwfn, feature->dump_buf);
@@ -2338,7 +2338,7 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
if (fp->rxq != NULL) {
bufsz = (uint16_t)rte_pktmbuf_data_room_size(
fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
- /* cache align the mbuf size to simplfy rx_buf_size
+ /* cache align the mbuf size to simplify rx_buf_size
* calculation
*/
bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
@@ -90,7 +90,7 @@ static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count)
* (MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT
* 3) In regular mode - minimum rx_buf_size should be
* (MTU + Maximum L2 Header Size + 2)
- * In above cases +2 corrosponds to 2 bytes padding in front of L2
+ * In above cases +2 corresponds to 2 bytes padding in front of L2
* header.
* 4) rx_buf_size should be cacheline-size aligned. So considering
* criteria 1, we need to adjust the size to floor instead of ceil,
@@ -106,7 +106,7 @@ qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
if (dev->data->scattered_rx) {
/* per HW limitation, only ETH_RX_MAX_BUFF_PER_PKT number of
- * bufferes can be used for single packet. So need to make sure
+ * buffers can be used for single packet. So need to make sure
* mbuf size is sufficient enough for this.
*/
if ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) <
@@ -247,7 +247,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,
/* Fix up RX buffer size */
bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
- /* cache align the mbuf size to simplfy rx_buf_size calculation */
+ /* cache align the mbuf size to simplify rx_buf_size calculation */
bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
(max_rx_pktlen + QEDE_ETH_OVERHEAD) > bufsz) {
@@ -1745,7 +1745,7 @@ qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
}
- /* Request number of bufferes to be allocated in next loop */
+ /* Request number of buffers to be allocated in next loop */
rxq->rx_alloc_count = rx_alloc_count;
rxq->rcv_pkts += rx_pkt;
@@ -2042,7 +2042,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
}
- /* Request number of bufferes to be allocated in next loop */
+ /* Request number of buffers to be allocated in next loop */
rxq->rx_alloc_count = rx_alloc_count;
rxq->rcv_pkts += rx_pkt;
@@ -2506,7 +2506,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Inner L2 header size in two byte words */
inner_l2_hdr_size = (mbuf->l2_len -
MPLSINUDP_HDR_SIZE) / 2;
- /* Inner L4 header offset from the beggining
+ /* Inner L4 header offset from the beginning
* of inner packet in two byte words
*/
inner_l4_hdr_offset = (mbuf->l2_len -
@@ -225,7 +225,7 @@ struct qede_fastpath {
struct qede_tx_queue *txq;
};
-/* This structure holds the inforation of fast path queues
+/* This structure holds the information of fast path queues
* belonging to individual engines in CMT mode.
*/
struct qede_fastpath_cmt {
@@ -371,7 +371,7 @@ sfc_set_drv_limits(struct sfc_adapter *sa)
/*
* Limits are strict since take into account initial estimation.
- * Resource allocation stategy is described in
+ * Resource allocation strategy is described in
* sfc_estimate_resource_limits().
*/
lim.edl_min_evq_count = lim.edl_max_evq_count =
@@ -68,7 +68,7 @@ sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry)
{
if (sfc_dp_find_by_name(head, entry->type, entry->name) != NULL) {
SFC_GENERIC_LOG(ERR,
- "sfc %s dapapath '%s' already registered",
+ "sfc %s datapath '%s' already registered",
entry->type == SFC_DP_RX ? "Rx" :
entry->type == SFC_DP_TX ? "Tx" :
"unknown",
@@ -158,7 +158,7 @@ typedef int (sfc_dp_rx_qcreate_t)(uint16_t port_id, uint16_t queue_id,
struct sfc_dp_rxq **dp_rxqp);
/**
- * Free resources allocated for datapath recevie queue.
+ * Free resources allocated for datapath receive queue.
*/
typedef void (sfc_dp_rx_qdestroy_t)(struct sfc_dp_rxq *dp_rxq);
@@ -191,7 +191,7 @@ typedef bool (sfc_dp_rx_qrx_ps_ev_t)(struct sfc_dp_rxq *dp_rxq,
/**
* Receive queue purge function called after queue flush.
*
- * Should be used to free unused recevie buffers.
+ * Should be used to free unused receive buffers.
*/
typedef void (sfc_dp_rx_qpurge_t)(struct sfc_dp_rxq *dp_rxq);
@@ -19,7 +19,7 @@ extern "C" {
*
* @param evq_prime Global address of the prime register
* @param evq_hw_index Event queue index
- * @param evq_read_ptr Masked event qeueu read pointer
+ * @param evq_read_ptr Masked event queue read pointer
*/
static inline void
sfc_ef100_evq_prime(volatile void *evq_prime, unsigned int evq_hw_index,
@@ -851,7 +851,7 @@ sfc_ef100_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr,
unsup_rx_prefix_fields =
efx_rx_prefix_layout_check(pinfo, &sfc_ef100_rx_prefix_layout);
- /* LENGTH and CLASS filds must always be present */
+ /* LENGTH and CLASS fields must always be present */
if ((unsup_rx_prefix_fields &
((1U << EFX_RX_PREFIX_FIELD_LENGTH) |
(1U << EFX_RX_PREFIX_FIELD_CLASS))) != 0)
@@ -630,7 +630,7 @@ sfc_ef10_essb_rx_qcreate(uint16_t port_id, uint16_t queue_id,
rxq->block_size, rxq->buf_stride);
sfc_ef10_essb_rx_info(&rxq->dp.dpq,
"max fill level is %u descs (%u bufs), "
- "refill threashold %u descs (%u bufs)",
+ "refill threshold %u descs (%u bufs)",
rxq->max_fill_level,
rxq->max_fill_level * rxq->block_size,
rxq->refill_threshold,
@@ -40,7 +40,7 @@ sfc_ef10_rx_ev_to_offloads(const efx_qword_t rx_ev, struct rte_mbuf *m,
rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) |
(1ull << ESF_DZ_RX_ECRC_ERR_LBN) |
(1ull << ESF_DZ_RX_PARSE_INCOMPLETE_LBN)))) {
- /* Zero packet type is used as a marker to dicard bad packets */
+ /* Zero packet type is used as a marker to discard bad packets */
goto done;
}
@@ -8,7 +8,7 @@
*/
/*
- * At the momemt of writing DPDK v16.07 has notion of two types of
+ * At the moment of writing DPDK v16.07 has notion of two types of
* interrupts: LSC (link status change) and RXQ (receive indication).
* It allows to register interrupt callback for entire device which is
* not intended to be used for receive indication (i.e. link status
@@ -1057,7 +1057,7 @@ sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
/* Make sure that end padding does not write beyond the buffer */
if (buf_aligned < nic_align_end) {
/*
- * Estimate space which can be lost. If guarnteed buffer
+ * Estimate space which can be lost. If guaranteed buffer
* size is odd, lost space is (nic_align_end - 1). More
* accurate formula is below.
*/
@@ -1702,7 +1702,7 @@ sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
/*
* Finalize only ethdev queues since other ones are finalized only
- * on device close and they may require additional deinitializaton.
+ * on device close and they may require additional deinitialization.
*/
ethdev_qid = sas->ethdev_rxq_count;
while (--ethdev_qid >= (int)nb_rx_queues) {
@@ -1775,7 +1775,7 @@ sfc_rx_configure(struct sfc_adapter *sa)
reconfigure = true;
- /* Do not ununitialize reserved queues */
+ /* Do not uninitialize reserved queues */
if (nb_rx_queues < sas->ethdev_rxq_count)
sfc_rx_fini_queues(sa, nb_rx_queues);
@@ -356,7 +356,7 @@ sfc_tx_fini_queues(struct sfc_adapter *sa, unsigned int nb_tx_queues)
/*
* Finalize only ethdev queues since other ones are finalized only
- * on device close and they may require additional deinitializaton.
+ * on device close and they may require additional deinitialization.
*/
ethdev_qid = sas->ethdev_txq_count;
while (--ethdev_qid >= (int)nb_tx_queues) {
@@ -930,7 +930,7 @@ flow_rule_match_acl_get(struct pmd_internals *softnic __rte_unused,
* Both *tmask* and *fmask* are byte arrays of size *tsize* and *fsize*
* respectively.
* They are located within a larger buffer at offsets *toffset* and *foffset*
- * respectivelly. Both *tmask* and *fmask* represent bitmasks for the larger
+ * respectively. Both *tmask* and *fmask* represent bitmasks for the larger
* buffer.
* Question: are the two masks equivalent?
*
@@ -525,7 +525,7 @@ tap_tx_l4_cksum(uint16_t *l4_cksum, uint16_t l4_phdr_cksum,
}
}
-/* Accumaulate L4 raw checksums */
+/* Accumulate L4 raw checksums */
static void
tap_tx_l4_add_rcksum(char *l4_data, unsigned int l4_len, uint16_t *l4_cksum,
uint32_t *l4_raw_cksum)
@@ -96,7 +96,7 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
* Load BPF instructions to kernel
*
* @param[in] type
- * BPF program type: classifieir or action
+ * BPF program type: classifier or action
*
* @param[in] insns
* Array of BPF instructions (equivalent to BPF instructions)
@@ -104,7 +104,7 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
* @param[in] insns_cnt
* Number of BPF instructions (size of array)
*
- * @param[in] lincense
+ * @param[in] license
* License string that must be acknowledged by the kernel
*
* @return
@@ -961,7 +961,7 @@ add_action(struct rte_flow *flow, size_t *act_index, struct action_data *adata)
}
/**
- * Helper function to send a serie of TC actions to the kernel
+ * Helper function to send a series of TC actions to the kernel
*
* @param[in] flow
* Pointer to rte flow containing the netlink message
@@ -2017,7 +2017,7 @@ static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx)
break;
/*
- * Subtract offest to restore real key index
+ * Subtract offset to restore real key index
* If a non RSS flow is falsely trying to release map
* entry 0 - the offset subtraction will calculate the real
* map index as an out-of-range value and the release operation
@@ -21,7 +21,7 @@ nicvf_svf_push(struct nicvf *vf)
entry = rte_zmalloc("nicvf", sizeof(*entry), RTE_CACHE_LINE_SIZE);
if (entry == NULL)
- rte_panic("Cannoc allocate memory for svf_entry\n");
+ rte_panic("Cannot allocate memory for svf_entry\n");
entry->vf = vf;
@@ -1678,7 +1678,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
return -ENOMEM;
}
}
- /* confiugre msix for sleep until rx interrupt */
+ /* configure msix for sleep until rx interrupt */
txgbe_configure_msix(dev);
/* initialize transmission unit */
@@ -3682,7 +3682,7 @@ txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
wr32(hw, TXGBE_IVARMISC, tmp);
} else {
/* rx or tx causes */
- /* Workround for ICR lost */
+ /* Workaround for ICR lost */
idx = ((16 * (queue & 1)) + (8 * direction));
tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
tmp &= ~(0xFF << idx);
@@ -4387,7 +4387,7 @@ txgbe_timesync_disable(struct rte_eth_dev *dev)
/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0);
- /* Stop incrementating the System Time registers. */
+ /* Stop incrementing the System Time registers. */
wr32(hw, TXGBE_TSTIMEINC, 0);
return 0;
@@ -961,7 +961,7 @@ txgbevf_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
wr32(hw, TXGBE_VFIVARMISC, tmp);
} else {
/* rx or tx cause */
- /* Workround for ICR lost */
+ /* Workaround for ICR lost */
idx = ((16 * (queue & 1)) + (8 * direction));
tmp = rd32(hw, TXGBE_VFIVAR(queue >> 1));
tmp &= ~(0xFF << idx);
@@ -997,7 +997,7 @@ txgbevf_configure_msix(struct rte_eth_dev *dev)
/* Configure all RX queues of VF */
for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
/* Force all queue use vector 0,
- * as TXGBE_VF_MAXMSIVECOTR = 1
+ * as TXGBE_VF_MAXMSIVECTOR = 1
*/
txgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
rte_intr_vec_list_index_set(intr_handle, q_idx,
@@ -1288,7 +1288,7 @@ txgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
/* only one misc vector supported - mailbox */
eicr &= TXGBE_VFICR_MASK;
- /* Workround for ICR lost */
+ /* Workaround for ICR lost */
intr->flags |= TXGBE_FLAG_MAILBOX;
/* To avoid compiler warnings set eicr to used. */
@@ -288,7 +288,7 @@ txgbe_crypto_remove_sa(struct rte_eth_dev *dev,
return -1;
}
- /* Disable and clear Rx SPI and key table entryes*/
+ /* Disable and clear Rx SPI and key table entries*/
reg_val = TXGBE_IPSRXIDX_WRITE |
TXGBE_IPSRXIDX_TB_SPI | (sa_index << 3);
wr32(hw, TXGBE_IPSRXSPI, 0);
@@ -236,7 +236,7 @@ int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
wr32(hw, TXGBE_PSRCTL, TXGBE_PSRCTL_LBENA);
- /* clear VMDq map to perment rar 0 */
+ /* clear VMDq map to permanent rar 0 */
hw->mac.clear_vmdq(hw, 0, BIT_MASK32);
/* clear VMDq map to scan rar 127 */
@@ -2657,7 +2657,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
hw->has_rx_offload = rx_offload_enabled(hw);
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- /* Enable vector (0) for Link State Intrerrupt */
+ /* Enable vector (0) for Link State Interrupt */
if (VIRTIO_OPS(hw)->set_config_irq(hw, 0) ==
VIRTIO_MSI_NO_VECTOR) {
PMD_DRV_LOG(ERR, "failed to set config vector");
@@ -2775,7 +2775,7 @@ virtio_dev_start(struct rte_eth_dev *dev)
}
}
- /* Enable uio/vfio intr/eventfd mapping: althrough we already did that
+ /* Enable uio/vfio intr/eventfd mapping: although we already did that
* in device configure, but it could be unmapped when device is
* stopped.
*/
@@ -235,7 +235,7 @@ legacy_get_isr(struct virtio_hw *hw)
return dst;
}
-/* Enable one vector (0) for Link State Intrerrupt */
+/* Enable one vector (0) for Link State Interrupt */
static uint16_t
legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec)
{
@@ -962,7 +962,7 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
return -EINVAL;
}
- /* Update mss lengthes in mbuf */
+ /* Update mss lengths in mbuf */
m->tso_segsz = hdr->gso_size;
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
@@ -192,7 +192,7 @@ virtqueue_dequeue_batch_packed_vec(struct virtnet_rx *rxvq,
/*
* load len from desc, store into mbuf pkt_len and data_len
- * len limiated by l6bit buf_len, pkt_len[16:31] can be ignored
+ * len limited by l6bit buf_len, pkt_len[16:31] can be ignored
*/
const __mmask16 mask = 0x6 | 0x6 << 4 | 0x6 << 8 | 0x6 << 12;
__m512i values = _mm512_maskz_shuffle_epi32(mask, v_desc, 0xAA);
@@ -13,7 +13,7 @@
/*
* Two types of mbuf to be cleaned:
* 1) mbuf that has been consumed by backend but not used by virtio.
- * 2) mbuf that hasn't been consued by backend.
+ * 2) mbuf that hasn't been consumed by backend.
*/
struct rte_mbuf *
virtqueue_detach_unused(struct virtqueue *vq)
@@ -227,7 +227,7 @@ struct virtio_net_ctrl_rss {
* Control link announce acknowledgement
*
* The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that
- * driver has recevied the notification; device would clear the
+ * driver has received the notification; device would clear the
* VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives
* this command.
*/
@@ -312,7 +312,7 @@ struct virtqueue {
struct vq_desc_extra vq_descx[0];
};
-/* If multiqueue is provided by host, then we suppport it. */
+/* If multiqueue is provided by host, then we support it. */
#define VIRTIO_NET_CTRL_MQ 4
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0
@@ -653,7 +653,7 @@ dpdmai_dev_dequeue_multijob_prefetch(
rte_prefetch0((void *)(size_t)(dq_storage + 1));
/* Prepare next pull descriptor. This will give space for the
- * prefething done on DQRR entries
+ * prefetching done on DQRR entries
*/
q_storage->toggle ^= 1;
dq_storage1 = q_storage->dq_storage[q_storage->toggle];
@@ -82,7 +82,7 @@ struct qdma_device {
/** total number of hw queues. */
uint16_t num_hw_queues;
/**
- * Maximum number of hw queues to be alocated per core.
+ * Maximum number of hw queues to be allocated per core.
* This is limited by MAX_HW_QUEUE_PER_CORE
*/
uint16_t max_hw_queues_per_core;
@@ -268,7 +268,7 @@ struct dpaa2_dpdmai_dev {
struct fsl_mc_io dpdmai;
/** HW ID for DPDMAI object */
uint32_t dpdmai_id;
- /** Tocken of this device */
+ /** Token of this device */
uint16_t token;
/** Number of queue in this DPDMAI device */
uint8_t num_queues;
@@ -382,7 +382,7 @@ ifpga_monitor_sensor(struct rte_rawdev *raw_dev,
if (HIGH_WARN(sensor, value) ||
LOW_WARN(sensor, value)) {
- IFPGA_RAWDEV_PMD_INFO("%s reach theshold %d\n",
+ IFPGA_RAWDEV_PMD_INFO("%s reach threshold %d\n",
sensor->name, value);
*gsd_start = true;
break;
@@ -393,7 +393,7 @@ ifpga_monitor_sensor(struct rte_rawdev *raw_dev,
if (!strcmp(sensor->name, "12V AUX Voltage")) {
if (value < AUX_VOLTAGE_WARN) {
IFPGA_RAWDEV_PMD_INFO(
- "%s reach theshold %d mV\n",
+ "%s reach threshold %d mV\n",
sensor->name, value);
*gsd_start = true;
break;
@@ -441,12 +441,12 @@ static int set_surprise_link_check_aer(
pos = ifpga_pci_find_ext_capability(fd, RTE_PCI_EXT_CAP_ID_ERR);
if (!pos)
goto end;
- /* save previout ECAP_AER+0x08 */
+ /* save previous ECAP_AER+0x08 */
ret = pread(fd, &data, sizeof(data), pos+0x08);
if (ret == -1)
goto end;
ifpga_rdev->aer_old[0] = data;
- /* save previout ECAP_AER+0x14 */
+ /* save previous ECAP_AER+0x14 */
ret = pread(fd, &data, sizeof(data), pos+0x14);
if (ret == -1)
goto end;
@@ -531,7 +531,7 @@ ifpga_monitor_start_func(void)
ifpga_rawdev_gsd_handle, NULL);
if (ret != 0) {
IFPGA_RAWDEV_PMD_ERR(
- "Fail to create ifpga nonitor thread");
+ "Fail to create ifpga monitor thread");
return -1;
}
ifpga_monitor_start = 1;
@@ -95,7 +95,7 @@ enum ntb_spad_idx {
* @spad_write: Write val to local/peer spad register.
* @db_read: Read doorbells status.
* @db_clear: Clear local doorbells.
- * @db_set_mask: Set bits in db mask, preventing db interrpts generated
+ * @db_set_mask: Set bits in db mask, preventing db interrupts generated
* for those db bits.
* @peer_db_set: Set doorbell bit to generate peer interrupt for that bit.
* @vector_bind: Bind vector source [intr] to msix vector [msix].
@@ -160,7 +160,7 @@ mlx5_vdpa_vhost_mem_regions_prepare(int vid, uint8_t *mode, uint64_t *mem_size,
* The target here is to group all the physical memory regions of the
* virtio device in one indirect mkey.
* For KLM Fixed Buffer Size mode (HW find the translation entry in one
- * read according to the guest phisical address):
+ * read according to the guest physical address):
* All the sub-direct mkeys of it must be in the same size, hence, each
* one of them should be in the GCD size of all the virtio memory
* regions and the holes between them.
@@ -403,7 +403,7 @@ mlx5_vdpa_features_validate(struct mlx5_vdpa_priv *priv)
if (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) {
if (!(priv->caps.virtio_queue_type & (1 <<
MLX5_VIRTQ_TYPE_PACKED))) {
- DRV_LOG(ERR, "Failed to configur PACKED mode for vdev "
+ DRV_LOG(ERR, "Failed to configure PACKED mode for vdev "
"%d - it was not reported by HW/driver"
" capability.", priv->vid);
return -ENOTSUP;
@@ -372,7 +372,7 @@ add_awgn(struct rte_mbuf **mbufs, uint16_t num_pkts)
/* Encoder output to Decoder input adapter. The Decoder accepts only soft input
* so each bit of the encoder output must be translated into one byte of LLR. If
* Sub-block Deinterleaver is bypassed, which is the case, the padding bytes
- * must additionally be insterted at the end of each sub-block.
+ * must additionally be inserted at the end of each sub-block.
*/
static inline void
transform_enc_out_dec_in(struct rte_mbuf **mbufs, uint8_t *temp_buf,
@@ -230,7 +230,7 @@ bond_port_init(struct rte_mempool *mbuf_pool)
0 /*SOCKET_ID_ANY*/);
if (retval < 0)
rte_exit(EXIT_FAILURE,
- "Faled to create bond port\n");
+ "Failed to create bond port\n");
BOND_PORT = retval;
@@ -405,7 +405,7 @@ static int lcore_main(__rte_unused void *arg1)
struct rte_ether_hdr *);
ether_type = eth_hdr->ether_type;
if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))
- printf("VLAN taged frame, offset:");
+ printf("VLAN tagged frame, offset:");
offset = get_vlan_offset(eth_hdr, ðer_type);
if (offset > 0)
printf("%d\n", offset);
@@ -87,7 +87,7 @@ static uint16_t nb_queues = 1;
/* MAC updating enabled by default. */
static int mac_updating = 1;
-/* hardare copy mode enabled by default. */
+/* hardware copy mode enabled by default. */
static copy_mode_t copy_mode = COPY_MODE_DMA_NUM;
/* size of descriptor ring for hardware copy mode or
@@ -402,7 +402,7 @@ rte_ethtool_net_set_rx_mode(uint16_t port_id)
#endif
}
- /* Enable Rx vlan filter, VF unspport status is discard */
+ /* Enable Rx vlan filter, VF unsupported status is discard */
ret = rte_eth_dev_set_vlan_offload(port_id, RTE_ETH_VLAN_FILTER_MASK);
if (ret != 0)
return ret;
@@ -189,7 +189,7 @@ int rte_ethtool_get_module_eeprom(uint16_t port_id,
/**
* Retrieve the Ethernet device pause frame configuration according to
- * parameter attributes desribed by ethtool data structure,
+ * parameter attributes described by ethtool data structure,
* ethtool_pauseparam.
*
* @param port_id
@@ -209,7 +209,7 @@ int rte_ethtool_get_pauseparam(uint16_t port_id,
/**
* Setting the Ethernet device pause frame configuration according to
- * parameter attributes desribed by ethtool data structure, ethtool_pauseparam.
+ * parameter attributes described by ethtool data structure, ethtool_pauseparam.
*
* @param port_id
* The port identifier of the Ethernet device.
@@ -244,7 +244,7 @@ static struct rte_lpm6 *socket_lpm6[RTE_MAX_NUMA_NODES];
#endif /* RTE_LIBRTE_IP_FRAG_TBL_STAT */
/*
- * If number of queued packets reached given threahold, then
+ * If number of queued packets reached given threshold, then
* send burst of packets on an output interface.
*/
static inline uint32_t
@@ -877,7 +877,7 @@ setup_queue_tbl(struct rx_queue *rxq, uint32_t lcore, uint32_t queue)
* Plus, each TX queue can hold up to <max_flow_num> packets.
*/
- /* mbufs stored int the gragment table. 8< */
+ /* mbufs stored int the fragment table. 8< */
nb_mbuf = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST) * MAX_FRAG_NUM;
nb_mbuf *= (port_conf.rxmode.mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
+ BUF_SIZE - 1) / BUF_SIZE;
@@ -1353,7 +1353,7 @@ eh_display_rx_adapter_conf(struct eventmode_conf *em_conf)
for (i = 0; i < nb_rx_adapter; i++) {
adapter = &(em_conf->rx_adapter[i]);
sprintf(print_buf,
- "\tRx adaper ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
+ "\tRx adapter ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
adapter->adapter_id,
adapter->nb_connections,
adapter->eventdev_id);
@@ -265,7 +265,7 @@ struct socket_ctx socket_ctx[NB_SOCKETS];
/*
* Determine is multi-segment support required:
* - either frame buffer size is smaller then mtu
- * - or reassmeble support is requested
+ * - or reassemble support is requested
*/
static int
multi_seg_required(void)
@@ -2050,7 +2050,7 @@ add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
ret = rte_hash_add_key_data(map, &key, (void *)i);
if (ret < 0) {
- printf("Faled to insert cdev mapping for (lcore %u, "
+ printf("Failed to insert cdev mapping for (lcore %u, "
"cdev %u, qp %u), errno %d\n",
key.lcore_id, ipsec_ctx->tbl[i].id,
ipsec_ctx->tbl[i].qp, ret);
@@ -2083,7 +2083,7 @@ add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
str = "Inbound";
}
- /* Required cryptodevs with operation chainning */
+ /* Required cryptodevs with operation chaining */
if (!(dev_info->feature_flags &
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
return ret;
@@ -2251,7 +2251,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
"Error during getting device (port %u) info: %s\n",
portid, strerror(-ret));
- /* limit allowed HW offloafs, as user requested */
+ /* limit allowed HW offloads, as user requested */
dev_info.rx_offload_capa &= dev_rx_offload;
dev_info.tx_offload_capa &= dev_tx_offload;
@@ -2298,7 +2298,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
local_port_conf.rxmode.offloads)
rte_exit(EXIT_FAILURE,
"Error: port %u required RX offloads: 0x%" PRIx64
- ", avaialbe RX offloads: 0x%" PRIx64 "\n",
+ ", available RX offloads: 0x%" PRIx64 "\n",
portid, local_port_conf.rxmode.offloads,
dev_info.rx_offload_capa);
@@ -2306,7 +2306,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
local_port_conf.txmode.offloads)
rte_exit(EXIT_FAILURE,
"Error: port %u required TX offloads: 0x%" PRIx64
- ", avaialbe TX offloads: 0x%" PRIx64 "\n",
+ ", available TX offloads: 0x%" PRIx64 "\n",
portid, local_port_conf.txmode.offloads,
dev_info.tx_offload_capa);
@@ -2317,7 +2317,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
- printf("port %u configurng rx_offloads=0x%" PRIx64
+ printf("port %u configuring rx_offloads=0x%" PRIx64
", tx_offloads=0x%" PRIx64 "\n",
portid, local_port_conf.rxmode.offloads,
local_port_conf.txmode.offloads);
@@ -897,7 +897,7 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
continue;
}
- /* unrecognizeable input */
+ /* unrecognizable input */
APP_CHECK(0, status, "unrecognized input \"%s\"",
tokens[ti]);
return;
@@ -1145,7 +1145,7 @@ get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,
if (rc4 >= 0) {
if (rc6 >= 0) {
RTE_LOG(ERR, IPSEC,
- "%s: SPI %u used simultaeously by "
+ "%s: SPI %u used simultaneously by "
"IPv4(%d) and IPv6 (%d) SP rules\n",
__func__, spi, rc4, rc6);
return -EINVAL;
@@ -1550,7 +1550,7 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
}
/*
- * Allocate space and init rte_ipsec_sa strcutures,
+ * Allocate space and init rte_ipsec_sa structures,
* one per session.
*/
static int
@@ -410,7 +410,7 @@ parse_sp4_tokens(char **tokens, uint32_t n_tokens,
continue;
}
- /* unrecognizeable input */
+ /* unrecognizable input */
APP_CHECK(0, status, "unrecognized input \"%s\"",
tokens[ti]);
return;
@@ -515,7 +515,7 @@ parse_sp6_tokens(char **tokens, uint32_t n_tokens,
continue;
}
- /* unrecognizeable input */
+ /* unrecognizable input */
APP_CHECK(0, status, "unrecognized input \"%s\"",
tokens[ti]);
return;
@@ -20,7 +20,7 @@ REMOTE_MAC=`ssh ${REMOTE_HOST} ip addr show dev ${REMOTE_IFACE}`
st=$?
REMOTE_MAC=`echo ${REMOTE_MAC} | sed -e 's/^.*ether //' -e 's/ brd.*$//'`
if [[ $st -ne 0 || -z "${REMOTE_MAC}" ]]; then
- echo "coouldn't retrieve ether addr from ${REMOTE_IFACE}"
+ echo "couldn't retrieve ether addr from ${REMOTE_IFACE}"
exit 127
fi
@@ -40,7 +40,7 @@ DPDK_VARS=""
# by default ipsec-secgw can't deal with multi-segment packets
# make sure our local/remote host wouldn't generate fragmented packets
-# if reassmebly option is not enabled
+# if reassembly option is not enabled
DEF_MTU_LEN=1400
DEF_PING_LEN=1200
@@ -1039,7 +1039,7 @@ main(int argc, char** argv)
pthread_t kni_link_tid;
int pid;
- /* Associate signal_hanlder function with USR signals */
+ /* Associate signal_handler function with USR signals */
signal(SIGUSR1, signal_handler);
signal(SIGUSR2, signal_handler);
signal(SIGRTMIN, signal_handler);
@@ -157,7 +157,7 @@ main(int argc, char *argv[])
int ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
- /* >8 End of initializion the Environment Abstraction Layer (EAL). */
+ /* >8 End of initialization the Environment Abstraction Layer (EAL). */
argc -= ret;
argv += ret;
@@ -42,7 +42,7 @@ l2fwd_event_device_setup_generic(struct l2fwd_resources *rsrc)
ethdev_count++;
}
- /* Event device configurtion */
+ /* Event device configuration */
rte_event_dev_info_get(event_d_id, &dev_info);
/* Enable implicit release */
@@ -40,7 +40,7 @@ l2fwd_event_device_setup_internal_port(struct l2fwd_resources *rsrc)
ethdev_count++;
}
- /* Event device configurtion */
+ /* Event device configuration */
rte_event_dev_info_get(event_d_id, &dev_info);
/* Enable implicit release */
@@ -468,7 +468,7 @@ l2fwd_flush_job(__rte_unused struct rte_timer *timer, __rte_unused void *arg)
qconf->next_flush_time[portid] = rte_get_timer_cycles() + drain_tsc;
}
- /* Pass target to indicate that this job is happy of time interwal
+ /* Pass target to indicate that this job is happy of time interval
* in which it was called. */
rte_jobstats_finish(&qconf->flush_job, qconf->flush_job.target);
}
@@ -801,8 +801,8 @@ send_packets(struct rte_mbuf **m, uint32_t *res, int num)
}
/*
- * Parses IPV6 address, exepcts the following format:
- * XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX (where X - is a hexedecimal digit).
+ * Parses IPV6 address, expects the following format:
+ * XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX (where X - is a hexadecimal digit).
*/
static int
parse_ipv6_addr(const char *in, const char **end, uint32_t v[IPV6_ADDR_U32],
@@ -1959,7 +1959,7 @@ check_all_ports_link_status(uint32_t port_mask)
}
/*
- * build-up default vaues for dest MACs.
+ * build-up default values for dest MACs.
*/
static void
set_default_dest_mac(void)
@@ -433,7 +433,7 @@ signal_exit_now(int sigtype)
}
-/* Freqency scale down timer callback */
+/* Frequency scale down timer callback */
static void
power_timer_cb(__rte_unused struct rte_timer *tim,
__rte_unused void *arg)
@@ -2358,7 +2358,7 @@ update_telemetry(__rte_unused struct rte_timer *tim,
ret = rte_metrics_update_values(RTE_METRICS_GLOBAL, telstats_index,
values, RTE_DIM(values));
if (ret < 0)
- RTE_LOG(WARNING, POWER, "failed to update metrcis\n");
+ RTE_LOG(WARNING, POWER, "failed to update metrics\n");
}
static int
@@ -51,7 +51,7 @@ rfc1812_process(struct rte_ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
#endif /* DO_RFC_1812_CHECKS */
/*
- * We group consecutive packets with the same destionation port into one burst.
+ * We group consecutive packets with the same destination port into one burst.
* To avoid extra latency this is done together with some other packet
* processing, but after we made a final decision about packet's destination.
* To do this we maintain:
@@ -76,7 +76,7 @@ rfc1812_process(struct rte_ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
static const struct {
uint64_t pnum; /* prebuild 4 values for pnum[]. */
- int32_t idx; /* index for new last updated elemnet. */
+ int32_t idx; /* index for new last updated element. */
uint16_t lpv; /* add value to the last updated element. */
} gptbl[GRPSZ] = {
{
@@ -64,7 +64,7 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
/*
* Group consecutive packets with the same destination port in bursts of 4.
- * Suppose we have array of destionation ports:
+ * Suppose we have array of destination ports:
* dst_port[] = {a, b, c, d,, e, ... }
* dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
* We doing 4 comparisons at once and the result is 4 bit mask.
@@ -64,7 +64,7 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
/*
* Group consecutive packets with the same destination port in bursts of 4.
- * Suppose we have array of destionation ports:
+ * Suppose we have array of destination ports:
* dst_port[] = {a, b, c, d,, e, ... }
* dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
* We doing 4 comparisons at once and the result is 4 bit mask.
@@ -175,7 +175,7 @@ static void cmd_dev_detach_parsed(void *parsed_result,
cmdline_printf(cl, "detached device %s\n",
da.name);
else
- cmdline_printf(cl, "failed to dettach device %s\n",
+ cmdline_printf(cl, "failed to detach device %s\n",
da.name);
rte_devargs_reset(&da);
}
@@ -4,7 +4,7 @@
/*
* This sample application is a simple multi-process application which
- * demostrates sharing of queues and memory pools between processes, and
+ * demonstrates sharing of queues and memory pools between processes, and
* using those queues/pools for communication between the processes.
*
* Application is designed to run with two processes, a primary and a
@@ -3,7 +3,7 @@
*/
/*
- * Sample application demostrating how to do packet I/O in a multi-process
+ * Sample application demonstrating how to do packet I/O in a multi-process
* environment. The same code can be run as a primary process and as a
* secondary process, just with a different proc-id parameter in each case
* (apart from the EAL flag to indicate a secondary process).
@@ -696,7 +696,7 @@ assign_stream_to_lcores(void)
break;
}
- /* Print packet forwading config. */
+ /* Print packet forwarding config. */
RTE_LCORE_FOREACH_WORKER(lcore_id) {
conf = &fwd_lcore_conf[lcore_id];
@@ -686,7 +686,7 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid packet_ordering arguments\n");
- /* Check if we have enought cores */
+ /* Check if we have enough cores */
if (rte_lcore_count() < 3)
rte_exit(EXIT_FAILURE, "Error, This application needs at "
"least 3 logical cores to run:\n"
@@ -178,7 +178,7 @@ lthread_create(struct lthread **new_lt, int lcore_id,
bzero(lt, sizeof(struct lthread));
lt->root_sched = THIS_SCHED;
- /* set the function args and exit handlder */
+ /* set the function args and exit handler */
_lthread_init(lt, fun, arg, _lthread_exit_handler);
/* put it in the ready queue */
@@ -384,7 +384,7 @@ void lthread_exit(void *ptr)
}
- /* wait until the joinging thread has collected the exit value */
+ /* wait until the joining thread has collected the exit value */
while (lt->join != LT_JOIN_EXIT_VAL_READ)
_reschedule();
@@ -410,7 +410,7 @@ int lthread_join(struct lthread *lt, void **ptr)
/* invalid to join a detached thread, or a thread that is joined */
if ((lt_state & BIT(ST_LT_DETACH)) || (lt->join == LT_JOIN_THREAD_SET))
return POSIX_ERRNO(EINVAL);
- /* pointer to the joining thread and a poingter to return a value */
+ /* pointer to the joining thread and a pointer to return a value */
lt->lt_join = current;
current->lt_exit_ptr = ptr;
/* There is a race between lthread_join() and lthread_exit()
@@ -232,7 +232,7 @@ lthread_sched_stats_display(void)
}
/*
- * Defafult diagnostic callback
+ * Default diagnostic callback
*/
static uint64_t
_lthread_diag_default_cb(uint64_t time, struct lthread *lt, int diag_event,
@@ -107,7 +107,7 @@ enum join_st {
LT_JOIN_EXIT_VAL_READ, /* joining thread has collected ret val */
};
-/* defnition of an lthread stack object */
+/* definition of an lthread stack object */
struct lthread_stack {
uint8_t stack[LTHREAD_MAX_STACK_SIZE];
size_t stack_size;
@@ -215,7 +215,7 @@ void _lthread_tls_alloc(struct lthread *lt)
tls->root_sched = (THIS_SCHED);
lt->tls = tls;
- /* allocate data for TLS varaiables using RTE_PER_LTHREAD macros */
+ /* allocate data for TLS variables using RTE_PER_LTHREAD macros */
if (sizeof(void *) < (uint64_t)RTE_PER_LTHREAD_SECTION_SIZE) {
lt->per_lthread_data =
_lthread_objcache_alloc((THIS_SCHED)->per_lthread_cache);
@@ -125,7 +125,7 @@ cb_parse_ptype(__rte_unused uint16_t port, __rte_unused uint16_t queue,
}
/*
- * When set to zero, simple forwaring path is eanbled.
+ * When set to zero, simple forwaring path is enabled.
* When set to one, optimized forwarding path is enabled.
* Note that LPM optimisation path uses SSE4.1 instructions.
*/
@@ -1529,7 +1529,7 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
}
/*
- * We group consecutive packets with the same destionation port into one burst.
+ * We group consecutive packets with the same destination port into one burst.
* To avoid extra latency this is done together with some other packet
* processing, but after we made a final decision about packet's destination.
* To do this we maintain:
@@ -1554,7 +1554,7 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
/*
* Group consecutive packets with the same destination port in bursts of 4.
- * Suppose we have array of destionation ports:
+ * Suppose we have array of destination ports:
* dst_port[] = {a, b, c, d,, e, ... }
* dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
* We doing 4 comparisons at once and the result is 4 bit mask.
@@ -1565,7 +1565,7 @@ port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, __m128i dp1, __m128i dp2)
{
static const struct {
uint64_t pnum; /* prebuild 4 values for pnum[]. */
- int32_t idx; /* index for new last updated elemnet. */
+ int32_t idx; /* index for new last updated element. */
uint16_t lpv; /* add value to the last updated element. */
} gptbl[GRPSZ] = {
{
@@ -1834,7 +1834,7 @@ process_burst(struct rte_mbuf *pkts_burst[MAX_PKT_BURST], int nb_rx,
/*
* Send packets out, through destination port.
- * Consecuteve pacekts with the same destination port
+ * Consecutive packets with the same destination port
* are already grouped together.
* If destination port for the packet equals BAD_PORT,
* then free the packet without sending it out.
@@ -3514,7 +3514,7 @@ main(int argc, char **argv)
ret = rte_timer_subsystem_init();
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Failed to initialize timer subystem\n");
+ rte_exit(EXIT_FAILURE, "Failed to initialize timer subsystem\n");
/* pre-init dst MACs for all ports to 02:00:00:00:00:xx */
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
@@ -41,7 +41,7 @@
*
* The decision whether to invoke the real library function or the lthread
* function is controlled by a per pthread flag that can be switched
- * on of off by the pthread_override_set() API described below. Typcially
+ * on of off by the pthread_override_set() API described below. Typically
* this should be done as the first action of the initial lthread.
*
* N.B In general it would be poor practice to revert to invoke a real
@@ -4,7 +4,7 @@
; This program is setting up two register arrays called "pkt_counters" and "byte_counters".
; On every input packet (Ethernet/IPv4), the "pkt_counters" register at location indexed by
; the IPv4 header "Source Address" field is incremented, while the same location in the
-; "byte_counters" array accummulates the value of the IPv4 header "Total Length" field.
+; "byte_counters" array accumulates the value of the IPv4 header "Total Length" field.
;
; The "regrd" and "regwr" CLI commands can be used to read and write the current value of
; any register array location.
@@ -41,7 +41,7 @@ static void cmd_help_parsed(__rte_unused void *parsed_result,
" qavg port X subport Y pipe Z : Show average queue size per pipe.\n"
" qavg port X subport Y pipe Z tc A : Show average queue size per pipe and TC.\n"
" qavg port X subport Y pipe Z tc A q B : Show average queue size of a specific queue.\n"
- " qavg [n|period] X : Set number of times and peiod (us).\n\n"
+ " qavg [n|period] X : Set number of times and period (us).\n\n"
);
}
@@ -296,7 +296,7 @@ handle_packets(struct rte_hash *h, struct rte_mbuf **bufs, uint16_t num_packets)
}
}
}
-/* >8 End of packets dequeueing. */
+/* >8 End of packets dequeuing. */
/*
* Application main function - loops through
@@ -179,7 +179,7 @@ main(int argc, char *argv[])
int ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
- /* >8 End of initializion the Environment Abstraction Layer (EAL). */
+ /* >8 End of initialization the Environment Abstraction Layer (EAL). */
argc -= ret;
argv += ret;
@@ -107,7 +107,7 @@ static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
static char *socket_files;
static int nb_sockets;
-/* empty vmdq configuration structure. Filled in programatically */
+/* empty vmdq configuration structure. Filled in programmatically */
static struct rte_eth_conf vmdq_conf_default = {
.rxmode = {
.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY,
@@ -115,7 +115,7 @@ static struct rte_eth_conf vmdq_conf_default = {
/*
* VLAN strip is necessary for 1G NIC such as I350,
* this fixes bug of ipv4 forwarding in guest can't
- * forward pakets from one virtio dev to another virtio dev.
+ * forward packets from one virtio dev to another virtio dev.
*/
.offloads = RTE_ETH_RX_OFFLOAD_VLAN_STRIP,
},
@@ -463,7 +463,7 @@ us_vhost_usage(const char *prgname)
" --nb-devices ND\n"
" -p PORTMASK: Set mask for ports to be used by application\n"
" --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
- " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
+ " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destination queue is full\n"
" --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
" --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
" --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
@@ -1289,7 +1289,7 @@ switch_worker(void *arg __rte_unused)
struct vhost_dev *vdev;
struct mbuf_table *tx_q;
- RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
+ RTE_LOG(INFO, VHOST_DATA, "Processing on Core %u started\n", lcore_id);
tx_q = &lcore_tx_queue[lcore_id];
for (i = 0; i < rte_lcore_count(); i++) {
@@ -1333,7 +1333,7 @@ switch_worker(void *arg __rte_unused)
/*
* Remove a device from the specific data core linked list and from the
- * main linked list. Synchonization occurs through the use of the
+ * main linked list. Synchronization occurs through the use of the
* lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
* of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
*/
@@ -404,7 +404,7 @@ get_pcpu_to_control(struct policy *pol)
/*
* So now that we're handling virtual and physical cores, we need to
- * differenciate between them when adding them to the branch monitor.
+ * differentiate between them when adding them to the branch monitor.
* Virtual cores need to be converted to physical cores.
*/
if (pol->pkt.core_type == RTE_POWER_CORE_TYPE_VIRTUAL) {
@@ -224,7 +224,7 @@ int power_manager_enable_turbo_core(unsigned int core_num);
int power_manager_disable_turbo_core(unsigned int core_num);
/**
- * Get the current freuency of the core specified by core_num
+ * Get the current frequency of the core specified by core_num
*
* @param core_num
* The core number to get the current frequency
@@ -62,7 +62,7 @@ static uint8_t rss_enable;
/* Default structure for VMDq. 8< */
-/* empty vmdq configuration structure. Filled in programatically */
+/* empty vmdq configuration structure. Filled in programmatically */
static const struct rte_eth_conf vmdq_conf_default = {
.rxmode = {
.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY,
@@ -41,7 +41,7 @@ kni_fifo_put(struct rte_kni_fifo *fifo, void **data, uint32_t num)
}
/**
- * Get up to num elements from the fifo. Return the number actully read
+ * Get up to num elements from the fifo. Return the number actually read
*/
static inline uint32_t
kni_fifo_get(struct rte_kni_fifo *fifo, void **data, uint32_t num)
@@ -885,7 +885,7 @@ acl_gen_range_trie(struct acl_build_context *context,
return root;
}
- /* gather information about divirgent paths */
+ /* gather information about divergent paths */
lo_00 = 0;
hi_ff = UINT8_MAX;
for (k = n - 1; k >= 0; k--) {
@@ -146,7 +146,7 @@ transition4(xmm_t next_input, const uint64_t *trans,
dfa_ofs = vec_sub(t, r);
- /* QUAD/SINGLE caluclations. */
+ /* QUAD/SINGLE calculations. */
t = (xmm_t)vec_cmpgt((vector signed char)in, (vector signed char)tr_hi);
t = (xmm_t)vec_sel(
vec_sel(
@@ -64,7 +64,7 @@ update_flow_mask(const struct acl_flow_avx512 *flow, uint32_t *fmsk,
}
/*
- * Resolve matches for multiple categories (LE 8, use 128b instuctions/regs)
+ * Resolve matches for multiple categories (LE 8, use 128b instructions/regs)
*/
static inline void
resolve_mcle8_avx512x1(uint32_t result[],
@@ -10,7 +10,7 @@
*/
/*
- * This implementation uses 512-bit registers(zmm) and instrincts.
+ * This implementation uses 512-bit registers(zmm) and instincts.
* So our main SIMD type is 512-bit width and each such variable can
* process sizeof(__m512i) / sizeof(uint32_t) == 16 entries in parallel.
*/
@@ -25,20 +25,20 @@
#define _F_(x) x##_avx512x16
/*
- * Same instrincts have different syntaxis (depending on the bit-width),
+ * Same instincts have different syntaxis (depending on the bit-width),
* so to overcome that few macros need to be defined.
*/
-/* Naming convention for generic epi(packed integers) type instrincts. */
+/* Naming convention for generic epi(packed integers) type instincts. */
#define _M_I_(x) _mm512_##x
-/* Naming convention for si(whole simd integer) type instrincts. */
+/* Naming convention for si(whole simd integer) type instincts. */
#define _M_SI_(x) _mm512_##x##_si512
-/* Naming convention for masked gather type instrincts. */
+/* Naming convention for masked gather type instincts. */
#define _M_MGI_(x) _mm512_##x
-/* Naming convention for gather type instrincts. */
+/* Naming convention for gather type instincts. */
#define _M_GI_(name, idx, base, scale) _mm512_##name(idx, base, scale)
/* num/mask of transitions per SIMD regs */
@@ -239,7 +239,7 @@ _F_(gather_bytes)(__m512i zero, const __m512i p[2], const uint32_t m[2],
}
/*
- * Resolve matches for multiple categories (GT 8, use 512b instuctions/regs)
+ * Resolve matches for multiple categories (GT 8, use 512b instructions/regs)
*/
static inline void
resolve_mcgt8_avx512x1(uint32_t result[],
@@ -10,7 +10,7 @@
*/
/*
- * This implementation uses 256-bit registers(ymm) and instrincts.
+ * This implementation uses 256-bit registers(ymm) and instincts.
* So our main SIMD type is 256-bit width and each such variable can
* process sizeof(__m256i) / sizeof(uint32_t) == 8 entries in parallel.
*/
@@ -25,20 +25,20 @@
#define _F_(x) x##_avx512x8
/*
- * Same instrincts have different syntaxis (depending on the bit-width),
+ * Same instincts have different syntaxis (depending on the bit-width),
* so to overcome that few macros need to be defined.
*/
-/* Naming convention for generic epi(packed integers) type instrincts. */
+/* Naming convention for generic epi(packed integers) type instincts. */
#define _M_I_(x) _mm256_##x
-/* Naming convention for si(whole simd integer) type instrincts. */
+/* Naming convention for si(whole simd integer) type instincts. */
#define _M_SI_(x) _mm256_##x##_si256
-/* Naming convention for masked gather type instrincts. */
+/* Naming convention for masked gather type instincts. */
#define _M_MGI_(x) _mm256_m##x
-/* Naming convention for gather type instrincts. */
+/* Naming convention for gather type instincts. */
#define _M_GI_(name, idx, base, scale) _mm256_##name(base, idx, scale)
/* num/mask of transitions per SIMD regs */
@@ -412,7 +412,7 @@ static int bpf_convert_filter(const struct bpf_insn *prog, size_t len,
BPF_EMIT_JMP;
break;
- /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
+ /* ldxb 4 * ([14] & 0xf) is remapped into 6 insns. */
case BPF_LDX | BPF_MSH | BPF_B:
/* tmp = A */
*insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
@@ -428,7 +428,7 @@ static int bpf_convert_filter(const struct bpf_insn *prog, size_t len,
*insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
break;
- /* RET_K is remaped into 2 insns. RET_A case doesn't need an
+ /* RET_K is remapped into 2 insns. RET_A case doesn't need an
* extra mov as EBPF_REG_0 is already mapped into BPF_REG_A.
*/
case BPF_RET | BPF_A:
@@ -533,7 +533,7 @@ struct rte_dma_port_param {
* @note If some fields can not be supported by the
* hardware/driver, then the driver ignores those fields.
* Please check driver-specific documentation for limitations
- * and capablites.
+ * and capabilities.
*/
__extension__
struct {
@@ -731,7 +731,7 @@ enum rte_dma_status_code {
/** The operation completed successfully. */
RTE_DMA_STATUS_SUCCESSFUL,
/** The operation failed to complete due abort by user.
- * This is mainly used when processing dev_stop, user could modidy the
+ * This is mainly used when processing dev_stop, user could modify the
* descriptors (e.g. change one bit to tell hardware abort this job),
* it allows outstanding requests to be complete as much as possible,
* so reduce the time to stop the device.
@@ -30,7 +30,7 @@ extern "C" {
/**
* This call is easily portable to any architecture, however,
- * it may require a system call and inprecise for some tasks.
+ * it may require a system call and imprecise for some tasks.
*/
static inline uint64_t
__rte_rdtsc_syscall(void)
@@ -234,7 +234,7 @@ rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
rte_spinlock_lock(&intr_lock);
- /* check if the insterrupt source for the fd is existent */
+ /* check if the interrupt source for the fd is existent */
TAILQ_FOREACH(src, &intr_sources, next)
if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
break;
@@ -288,7 +288,7 @@ rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
rte_spinlock_lock(&intr_lock);
- /* check if the insterrupt source for the fd is existent */
+ /* check if the interrupt source for the fd is existent */
TAILQ_FOREACH(src, &intr_sources, next)
if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
break;
@@ -157,7 +157,7 @@ rte_pflock_write_lock(rte_pflock_t *pf)
uint16_t ticket, w;
/* Acquire ownership of write-phase.
- * This is same as rte_tickelock_lock().
+ * This is same as rte_ticketlock_lock().
*/
ticket = __atomic_fetch_add(&pf->wr.in, 1, __ATOMIC_RELAXED);
rte_wait_until_equal_16(&pf->wr.out, ticket, __ATOMIC_ACQUIRE);
@@ -58,7 +58,7 @@ rte_malloc(const char *type, size_t size, unsigned align)
__rte_alloc_size(2);
/**
- * Allocate zero'ed memory from the heap.
+ * Allocate zeroed memory from the heap.
*
* Equivalent to rte_malloc() except that the memory zone is
* initialised with zeros. In NUMA systems, the memory allocated resides on the
@@ -189,7 +189,7 @@ rte_malloc_socket(const char *type, size_t size, unsigned align, int socket)
__rte_alloc_size(2);
/**
- * Allocate zero'ed memory from the heap.
+ * Allocate zeroed memory from the heap.
*
* Equivalent to rte_malloc() except that the memory zone is
* initialised with zeros.
@@ -589,7 +589,7 @@ rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
rte_spinlock_lock(&intr_lock);
- /* check if the insterrupt source for the fd is existent */
+ /* check if the interrupt source for the fd is existent */
TAILQ_FOREACH(src, &intr_sources, next) {
if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
break;
@@ -639,7 +639,7 @@ rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
rte_spinlock_lock(&intr_lock);
- /* check if the insterrupt source for the fd is existent */
+ /* check if the interrupt source for the fd is existent */
TAILQ_FOREACH(src, &intr_sources, next)
if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
break;
@@ -103,7 +103,7 @@ struct vfio_group {
typedef int (*vfio_dma_func_t)(int);
/* Custom memory region DMA mapping function prototype.
- * Takes VFIO container fd, virtual address, phisical address, length and
+ * Takes VFIO container fd, virtual address, physical address, length and
* operation type (0 to unmap 1 for map) as a parameters.
* Returns 0 on success, -1 on error.
**/
@@ -63,7 +63,7 @@ unsigned int eal_socket_numa_node(unsigned int socket_id);
* @param arg
* Argument to the called function.
* @return
- * 0 on success, netagive error code on failure.
+ * 0 on success, negative error code on failure.
*/
int eal_intr_thread_schedule(void (*func)(void *arg), void *arg);
@@ -440,7 +440,7 @@ opendir(const char *dirname)
* display correctly on console. The problem can be fixed in two ways:
* (1) change the character set of console to 1252 using chcp utility
* and use Lucida Console font, or (2) use _cprintf function when
- * writing to console. The _cprinf() will re-encode ANSI strings to the
+ * writing to console. The _cprintf() will re-encode ANSI strings to the
* console code page so many non-ASCII characters will display correctly.
*/
static struct dirent*
@@ -579,7 +579,7 @@ dirent_mbstowcs_s(
wcstr[n] = 0;
}
- /* Length of resuting multi-byte string WITH zero
+ /* Length of resulting multi-byte string WITH zero
*terminator
*/
if (pReturnValue)
@@ -26,14 +26,14 @@ extern "C" {
#define FNM_PREFIX_DIRS 0x20
/**
- * This function is used for searhing a given string source
+ * This function is used for searching a given string source
* with the given regular expression pattern.
*
* @param pattern
* regular expression notation describing the pattern to match
*
* @param string
- * source string to searcg for the pattern
+ * source string to search for the pattern
*
* @param flag
* containing information about the pattern
@@ -60,7 +60,7 @@ extern "C" {
* Basic idea is to use lock prefixed add with some dummy memory location
* as the destination. From their experiments 128B(2 cache lines) below
* current stack pointer looks like a good candidate.
- * So below we use that techinque for rte_smp_mb() implementation.
+ * So below we use that technique for rte_smp_mb() implementation.
*/
static __rte_always_inline void
@@ -3334,7 +3334,7 @@ handle_rxa_get_queue_conf(const char *cmd __rte_unused,
token = strtok(NULL, "\0");
if (token != NULL)
RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
- " telemetry command, igrnoring");
+ " telemetry command, ignoring");
if (rte_event_eth_rx_adapter_queue_conf_get(rx_adapter_id, eth_dev_id,
rx_queue_id, &queue_conf)) {
@@ -3398,7 +3398,7 @@ handle_rxa_get_queue_stats(const char *cmd __rte_unused,
token = strtok(NULL, "\0");
if (token != NULL)
RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
- " telemetry command, igrnoring");
+ " telemetry command, ignoring");
if (rte_event_eth_rx_adapter_queue_stats_get(rx_adapter_id, eth_dev_id,
rx_queue_id, &q_stats)) {
@@ -3460,7 +3460,7 @@ handle_rxa_queue_stats_reset(const char *cmd __rte_unused,
token = strtok(NULL, "\0");
if (token != NULL)
RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
- " telemetry command, igrnoring");
+ " telemetry command, ignoring");
if (rte_event_eth_rx_adapter_queue_stats_reset(rx_adapter_id,
eth_dev_id,
@@ -40,10 +40,10 @@ EAL_REGISTER_TAILQ(rte_fib_tailq)
struct rte_fib {
char name[RTE_FIB_NAMESIZE];
enum rte_fib_type type; /**< Type of FIB struct */
- struct rte_rib *rib; /**< RIB helper datastruct */
+ struct rte_rib *rib; /**< RIB helper datastructure */
void *dp; /**< pointer to the dataplane struct*/
rte_fib_lookup_fn_t lookup; /**< fib lookup function */
- rte_fib_modify_fn_t modify; /**< modify fib datastruct */
+ rte_fib_modify_fn_t modify; /**< modify fib datastructure */
uint64_t def_nh;
};
@@ -189,7 +189,7 @@ rte_fib_lookup_bulk(struct rte_fib *fib, uint32_t *ips,
* FIB object handle
* @return
* Pointer on the dataplane struct on success
- * NULL othervise
+ * NULL otherwise
*/
void *
rte_fib_get_dp(struct rte_fib *fib);
@@ -201,7 +201,7 @@ rte_fib_get_dp(struct rte_fib *fib);
* FIB object handle
* @return
* Pointer on the RIB on success
- * NULL othervise
+ * NULL otherwise
*/
struct rte_rib *
rte_fib_get_rib(struct rte_fib *fib);
@@ -40,10 +40,10 @@ EAL_REGISTER_TAILQ(rte_fib6_tailq)
struct rte_fib6 {
char name[FIB6_NAMESIZE];
enum rte_fib6_type type; /**< Type of FIB struct */
- struct rte_rib6 *rib; /**< RIB helper datastruct */
+ struct rte_rib6 *rib; /**< RIB helper datastructure */
void *dp; /**< pointer to the dataplane struct*/
rte_fib6_lookup_fn_t lookup; /**< fib lookup function */
- rte_fib6_modify_fn_t modify; /**< modify fib datastruct */
+ rte_fib6_modify_fn_t modify; /**< modify fib datastructure */
uint64_t def_nh;
};
@@ -184,7 +184,7 @@ rte_fib6_lookup_bulk(struct rte_fib6 *fib,
* FIB6 object handle
* @return
* Pointer on the dataplane struct on success
- * NULL othervise
+ * NULL otherwise
*/
void *
rte_fib6_get_dp(struct rte_fib6 *fib);
@@ -196,7 +196,7 @@ rte_fib6_get_dp(struct rte_fib6 *fib);
* FIB object handle
* @return
* Pointer on the RIB6 on success
- * NULL othervise
+ * NULL otherwise
*/
struct rte_rib6 *
rte_fib6_get_rib(struct rte_fib6 *fib);
@@ -236,7 +236,7 @@ RTE_INIT(rte_ipsec_telemetry_init)
"Return list of IPsec SAs with telemetry enabled.");
rte_telemetry_register_cmd("/ipsec/sa/stats",
handle_telemetry_cmd_ipsec_sa_stats,
- "Returns IPsec SA stastistics. Parameters: int sa_spi");
+ "Returns IPsec SA statistics. Parameters: int sa_spi");
rte_telemetry_register_cmd("/ipsec/sa/details",
handle_telemetry_cmd_ipsec_sa_details,
"Returns IPsec SA configuration. Parameters: int sa_spi");
@@ -153,7 +153,7 @@ rte_ipsec_sad_destroy(struct rte_ipsec_sad *sad);
* @param keys
* Array of keys to be looked up in the SAD
* @param sa
- * Pointer assocoated with the keys.
+ * Pointer associated with the keys.
* If the lookup for the given key failed, then corresponding sa
* will be NULL
* @param n
@@ -362,7 +362,7 @@ esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
memcpy(sa->hdr, prm->tun.hdr, prm->tun.hdr_len);
- /* insert UDP header if UDP encapsulation is inabled */
+ /* insert UDP header if UDP encapsulation is enabled */
if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) {
struct rte_udp_hdr *udph = (struct rte_udp_hdr *)
&sa->hdr[prm->tun.hdr_len];
@@ -8,7 +8,7 @@
/**
* @file
- * This file contains definion of RTE mbuf structure itself,
+ * This file contains definition of RTE mbuf structure itself,
* packet offload flags and some related macros.
* For majority of DPDK entities, it is not recommended to include
* this file directly, use include <rte_mbuf.h> instead.
@@ -3,7 +3,7 @@
# process all libraries equally, as far as possible
-# "core" libs first, then others alphebetically as far as possible
+# "core" libs first, then others alphabetically as far as possible
# NOTE: for speed of meson runs, the dependencies in the subdirectories
# sometimes skip deps that would be implied by others, e.g. if mempool is
# given as a dep, no need to mention ring. This is especially true for the
@@ -143,7 +143,7 @@ struct rte_l2tpv2_msg_without_length {
/**
* L2TPv2 message Header contains all options except ns_nr(length,
* offset size, offset padding).
- * Ns and Nr MUST be toghter.
+ * Ns and Nr MUST be together.
*/
struct rte_l2tpv2_msg_without_ns_nr {
rte_be16_t length; /**< length(16) */
@@ -155,7 +155,7 @@ struct rte_l2tpv2_msg_without_ns_nr {
/**
* L2TPv2 message Header contains all options except ns_nr(length, ns, nr).
- * offset size and offset padding MUST be toghter.
+ * offset size and offset padding MUST be together.
*/
struct rte_l2tpv2_msg_without_offset {
rte_be16_t length; /**< length(16) */
@@ -369,7 +369,7 @@ struct rte_swx_table_stats {
uint64_t n_pkts_miss;
/** Number of packets (with either lookup hit or miss) per pipeline
- * action. Array of pipeline *n_actions* elements indedex by the
+ * action. Array of pipeline *n_actions* elements indexed by the
* pipeline-level *action_id*, therefore this array has the same size
* for all the tables within the same pipeline.
*/
@@ -629,7 +629,7 @@ struct rte_swx_learner_stats {
uint64_t n_pkts_forget;
/** Number of packets (with either lookup hit or miss) per pipeline action. Array of
- * pipeline *n_actions* elements indedex by the pipeline-level *action_id*, therefore this
+ * pipeline *n_actions* elements indexed by the pipeline-level *action_id*, therefore this
* array has the same size for all the tables within the same pipeline.
*/
uint64_t *n_pkts_action;
@@ -309,7 +309,7 @@ enum instruction_type {
*/
INSTR_ALU_CKADD_FIELD, /* src = H */
INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
- INSTR_ALU_CKADD_STRUCT, /* src = h.hdeader, with any sizeof(header) */
+ INSTR_ALU_CKADD_STRUCT, /* src = h.header, with any sizeof(header) */
/* cksub dst src
* dst = dst '- src
@@ -1562,7 +1562,7 @@ emit_handler(struct thread *t)
return;
}
- /* Header encapsulation (optionally, with prior header decasulation). */
+ /* Header encapsulation (optionally, with prior header decapsulation). */
if ((t->n_headers_out == 2) &&
(h1->ptr + h1->n_bytes == t->ptr) &&
(h0->ptr == h0->ptr0)) {
@@ -2011,7 +2011,7 @@ rte_swx_pipeline_build_from_spec(struct rte_swx_pipeline *p,
if (err_line)
*err_line = 0;
if (err_msg)
- *err_msg = "Null pipeline arument.";
+ *err_msg = "Null pipeline argument.";
status = -EINVAL;
goto error;
}
@@ -621,7 +621,7 @@ power_cppc_enable_turbo(unsigned int lcore_id)
return -1;
}
- /* TODO: must set to max once enbling Turbo? Considering add condition:
+ /* TODO: must set to max once enabling Turbo? Considering add condition:
* if ((pi->turbo_available) && (pi->curr_idx <= 1))
*/
/* Max may have changed, so call to max function */
@@ -298,14 +298,14 @@ rte_regexdev_get_dev_id(const char *name);
* backtracking positions remembered by any tokens inside the group.
* Example RegEx is `a(?>bc|b)c` if the given patterns are `abc` and `abcc` then
* `a(bc|b)c` matches both where as `a(?>bc|b)c` matches only abcc because
- * atomic groups don't allow backtracing back to `b`.
+ * atomic groups don't allow backtracking back to `b`.
*
* @see struct rte_regexdev_info::regexdev_capa
*/
#define RTE_REGEXDEV_SUPP_PCRE_BACKTRACKING_CTRL_F (1ULL << 3)
/**< RegEx device support PCRE backtracking control verbs.
- * Some examples of backtracing verbs are (*COMMIT), (*ACCEPT), (*FAIL),
+ * Some examples of backtracking verbs are (*COMMIT), (*ACCEPT), (*FAIL),
* (*SKIP), (*PRUNE).
*
* @see struct rte_regexdev_info::regexdev_capa
@@ -1015,7 +1015,7 @@ rte_regexdev_rule_db_update(uint8_t dev_id,
* @b EXPERIMENTAL: this API may change without prior notice.
*
* Compile local rule set and burn the complied result to the
- * RegEx deive.
+ * RegEx device.
*
* @param dev_id
* RegEx device identifier.
@@ -12,7 +12,7 @@
/**
* @file
- * This file contains definion of RTE ring structure itself,
+ * This file contains definition of RTE ring structure itself,
* init flags and some related macros.
* For majority of DPDK entities, it is not recommended to include
* this file directly, use include <rte_ring.h> or <rte_ring_elem.h>
@@ -252,7 +252,7 @@ _rte_pie_drop(const struct rte_pie_config *pie_cfg,
}
/**
- * @brief Decides if new packet should be enqeued or dropped for non-empty queue
+ * @brief Decides if new packet should be enqueued or dropped for non-empty queue
*
* @param pie_cfg [in] config pointer to a PIE configuration parameter structure
* @param pie [in,out] data pointer to PIE runtime data
@@ -319,7 +319,7 @@ rte_pie_enqueue_nonempty(const struct rte_pie_config *pie_cfg,
}
/**
- * @brief Decides if new packet should be enqeued or dropped
+ * @brief Decides if new packet should be enqueued or dropped
* Updates run time data and gives verdict whether to enqueue or drop the packet.
*
* @param pie_cfg [in] config pointer to a PIE configuration parameter structure
@@ -330,7 +330,7 @@ rte_pie_enqueue_nonempty(const struct rte_pie_config *pie_cfg,
*
* @return Operation status
* @retval 0 enqueue the packet
- * @retval 1 drop the packet based on drop probility criteria
+ * @retval 1 drop the packet based on drop probability criteria
*/
static inline int
__rte_experimental
@@ -303,7 +303,7 @@ __rte_red_drop(const struct rte_red_config *red_cfg, struct rte_red *red)
}
/**
- * @brief Decides if new packet should be enqeued or dropped in queue non-empty case
+ * @brief Decides if new packet should be enqueued or dropped in queue non-empty case
*
* @param red_cfg [in] config pointer to a RED configuration parameter structure
* @param red [in,out] data pointer to RED runtime data
@@ -361,7 +361,7 @@ rte_red_enqueue_nonempty(const struct rte_red_config *red_cfg,
}
/**
- * @brief Decides if new packet should be enqeued or dropped
+ * @brief Decides if new packet should be enqueued or dropped
* Updates run time data based on new queue size value.
* Based on new queue average and RED configuration parameters
* gives verdict whether to enqueue or drop the packet.
@@ -239,7 +239,7 @@ struct rte_sched_port {
int socket;
/* Timing */
- uint64_t time_cpu_cycles; /* Current CPU time measured in CPU cyles */
+ uint64_t time_cpu_cycles; /* Current CPU time measured in CPU cycles */
uint64_t time_cpu_bytes; /* Current CPU time measured in bytes */
uint64_t time; /* Current NIC TX time measured in bytes */
struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */
@@ -360,7 +360,7 @@ rte_sched_subport_pipe_profile_add(struct rte_sched_port *port,
*
* Hierarchical scheduler subport bandwidth profile add
* Note that this function is safe to use in runtime for adding new
- * subport bandwidth profile as it doesn't have any impact on hiearchical
+ * subport bandwidth profile as it doesn't have any impact on hierarchical
* structure of the scheduler.
* @param port
* Handle to port scheduler instance
@@ -216,7 +216,7 @@ typedef int
* operations into the same table.
*
* The typical reason an implementation may choose to split the table lookup
- * operation into multiple steps is to hide the latency of the inherrent memory
+ * operation into multiple steps is to hide the latency of the inherent memory
* read operations: before a read operation with the source data likely not in
* the CPU cache, the source data prefetch is issued and the table lookup
* operation is postponed in favor of some other unrelated work, which the CPU
@@ -155,7 +155,7 @@ rte_swx_table_selector_group_set(void *table,
* mechanism allows for multiple concurrent select operations into the same table.
*
* The typical reason an implementation may choose to split the operation into multiple steps is to
- * hide the latency of the inherrent memory read operations: before a read operation with the
+ * hide the latency of the inherent memory read operations: before a read operation with the
* source data likely not in the CPU cache, the source data prefetch is issued and the operation is
* postponed in favor of some other unrelated work, which the CPU executes in parallel with the
* source data being fetched into the CPU cache; later on, the operation is resumed, this time with
@@ -534,7 +534,7 @@ telemetry_legacy_init(void)
}
rc = pthread_create(&t_old, NULL, socket_listener, &v1_socket);
if (rc != 0) {
- TMTY_LOG(ERR, "Error with create legcay socket thread: %s\n",
+ TMTY_LOG(ERR, "Error with create legacy socket thread: %s\n",
strerror(rc));
close(v1_socket.sock);
v1_socket.sock = -1;
@@ -23,7 +23,7 @@
/**
* @internal
* Copies a value into a buffer if the buffer has enough available space.
- * Nothing written to buffer if an overflow ocurs.
+ * Nothing written to buffer if an overflow occurs.
* This function is not for use for values larger than given buffer length.
*/
__rte_format_printf(3, 4)
@@ -1115,7 +1115,7 @@ vhost_user_postcopy_region_register(struct virtio_net *dev,
struct uffdio_register reg_struct;
/*
- * Let's register all the mmap'ed area to ensure
+ * Let's register all the mmapped area to ensure
* alignment on page boundary.
*/
reg_struct.range.start = (uint64_t)(uintptr_t)reg->mmap_addr;
@@ -1177,7 +1177,7 @@ vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
msg->fd_num = 0;
send_vhost_reply(main_fd, msg);
- /* Wait for qemu to acknolwedge it's got the addresses
+ /* Wait for qemu to acknowledge it's got the addresses
* we've got to wait before we're allowed to generate faults.
*/
if (read_vhost_message(main_fd, &ack_msg) <= 0) {