@@ -89,6 +89,9 @@ main(int argc, char **argv)
if (!evdevs)
rte_panic("no eventdev devices found\n");
+ if (rte_event_test_seqn_dynfield_register() < 0)
+ rte_panic("failed to register event dev sequence number\n");
+
/* Populate the default values of the options */
evt_options_default(&opt);
@@ -50,7 +50,7 @@ order_producer(void *arg)
const flow_id_t flow = (uintptr_t)m % nb_flows;
/* Maintain seq number per flow */
- m->seqn = producer_flow_seq[flow]++;
+ *rte_event_test_seqn(m) = producer_flow_seq[flow]++;
flow_id_save(flow, m, &ev);
while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
@@ -89,9 +89,10 @@ order_process_stage_1(struct test_order *const t,
{
const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
/* compare the seqn against expected value */
- if (ev->mbuf->seqn != expected_flow_seq[flow]) {
+ if (*rte_event_test_seqn(ev->mbuf) != expected_flow_seq[flow]) {
evt_err("flow=%x seqn mismatch got=%x expected=%x",
- flow, ev->mbuf->seqn, expected_flow_seq[flow]);
+ flow, *rte_event_test_seqn(ev->mbuf),
+ expected_flow_seq[flow]);
t->err = true;
rte_smp_wmb();
}
@@ -300,7 +300,7 @@ inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
m = rte_pktmbuf_alloc(eventdev_test_mempool);
RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
- m->seqn = i;
+ *rte_event_test_seqn(m) = i;
update_event_and_validation_attr(m, &ev, flow_id, event_type,
sub_event_type, sched_type, queue, port);
rte_event_enqueue_burst(evdev, port, &ev, 1);
@@ -320,7 +320,8 @@ check_excess_events(uint8_t port)
valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
RTE_TEST_ASSERT_SUCCESS(valid_event,
- "Unexpected valid event=%d", ev.mbuf->seqn);
+ "Unexpected valid event=%d",
+ *rte_event_test_seqn(ev.mbuf));
}
return 0;
}
@@ -425,8 +426,9 @@ static int
validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
{
RTE_SET_USED(port);
- RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d",
- index, ev->mbuf->seqn);
+ RTE_TEST_ASSERT_EQUAL(index, *rte_event_test_seqn(ev->mbuf),
+ "index=%d != seqn=%d", index,
+ *rte_event_test_seqn(ev->mbuf));
return 0;
}
@@ -509,10 +511,10 @@ validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
expected_val += ev->queue_id;
RTE_SET_USED(port);
- RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
- "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
- ev->mbuf->seqn, index, expected_val, range,
- queue_count, MAX_EVENTS);
+ RTE_TEST_ASSERT_EQUAL(*rte_event_test_seqn(ev->mbuf), expected_val,
+ "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
+ *rte_event_test_seqn(ev->mbuf), index, expected_val, range,
+ queue_count, MAX_EVENTS);
return 0;
}
@@ -537,7 +539,7 @@ test_multi_queue_priority(void)
m = rte_pktmbuf_alloc(eventdev_test_mempool);
RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
- m->seqn = i;
+ *rte_event_test_seqn(m) = i;
queue = i % queue_count;
update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
@@ -904,7 +906,7 @@ worker_flow_based_pipeline(void *arg)
ev.op = RTE_EVENT_OP_FORWARD;
rte_event_enqueue_burst(evdev, port, &ev, 1);
} else if (ev.sub_event_type == 1) { /* Events from stage 1*/
- if (seqn_list_update(ev.mbuf->seqn) == 0) {
+ if (seqn_list_update(*rte_event_test_seqn(ev.mbuf)) == 0) {
rte_pktmbuf_free(ev.mbuf);
rte_atomic32_sub(total_events, 1);
} else {
@@ -939,7 +941,7 @@ test_multiport_flow_sched_type_test(uint8_t in_sched_type,
return 0;
}
- /* Injects events with m->seqn=0 to total_events */
+ /* Injects events with a 0 sequence number to total_events */
ret = inject_events(
0x1 /*flow_id */,
RTE_EVENT_TYPE_CPU /* event_type */,
@@ -1059,7 +1061,7 @@ worker_group_based_pipeline(void *arg)
ev.op = RTE_EVENT_OP_FORWARD;
rte_event_enqueue_burst(evdev, port, &ev, 1);
} else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
- if (seqn_list_update(ev.mbuf->seqn) == 0) {
+ if (seqn_list_update(*rte_event_test_seqn(ev.mbuf)) == 0) {
rte_pktmbuf_free(ev.mbuf);
rte_atomic32_sub(total_events, 1);
} else {
@@ -1101,7 +1103,7 @@ test_multiport_queue_sched_type_test(uint8_t in_sched_type,
return 0;
}
- /* Injects events with m->seqn=0 to total_events */
+ /* Injects events with a 0 sequence number to total_events */
ret = inject_events(
0x1 /*flow_id */,
RTE_EVENT_TYPE_CPU /* event_type */,
@@ -1238,7 +1240,7 @@ launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
return 0;
}
- /* Injects events with m->seqn=0 to total_events */
+ /* Injects events with a 0 sequence number to total_events */
ret = inject_events(
0x1 /*flow_id */,
RTE_EVENT_TYPE_CPU /* event_type */,
@@ -1360,7 +1362,7 @@ worker_ordered_flow_producer(void *arg)
if (m == NULL)
continue;
- m->seqn = counter++;
+ *rte_event_test_seqn(m) = counter++;
struct rte_event ev = {.event = 0, .u64 = 0};
@@ -279,7 +279,7 @@ inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
m = rte_pktmbuf_alloc(eventdev_test_mempool);
RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
- m->seqn = i;
+ *rte_event_test_seqn(m) = i;
update_event_and_validation_attr(m, &ev, flow_id, event_type,
sub_event_type, sched_type,
queue, port);
@@ -301,7 +301,7 @@ check_excess_events(uint8_t port)
RTE_TEST_ASSERT_SUCCESS(valid_event,
"Unexpected valid event=%d",
- ev.mbuf->seqn);
+ *rte_event_test_seqn(ev.mbuf));
}
return 0;
}
@@ -406,8 +406,9 @@ static int
validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
{
RTE_SET_USED(port);
- RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d",
- index, ev->mbuf->seqn);
+ RTE_TEST_ASSERT_EQUAL(index, *rte_event_test_seqn(ev->mbuf),
+ "index=%d != seqn=%d",
+ index, *rte_event_test_seqn(ev->mbuf));
return 0;
}
@@ -493,10 +494,10 @@ validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
expected_val += ev->queue_id;
RTE_SET_USED(port);
- RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
- "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
- ev->mbuf->seqn, index, expected_val, range,
- queue_count, MAX_EVENTS);
+ RTE_TEST_ASSERT_EQUAL(*rte_event_test_seqn(ev->mbuf), expected_val,
+ "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
+ *rte_event_test_seqn(ev->mbuf), index, expected_val, range,
+ queue_count, MAX_EVENTS);
return 0;
}
@@ -523,7 +524,7 @@ test_multi_queue_priority(void)
m = rte_pktmbuf_alloc(eventdev_test_mempool);
RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
- m->seqn = i;
+ *rte_event_test_seqn(m) = i;
queue = i % queue_count;
update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
0, RTE_SCHED_TYPE_PARALLEL,
@@ -888,7 +889,7 @@ worker_flow_based_pipeline(void *arg)
ev.op = RTE_EVENT_OP_FORWARD;
rte_event_enqueue_burst(evdev, port, &ev, 1);
} else if (ev.sub_event_type == 1) { /* Events from stage 1*/
- if (seqn_list_update(ev.mbuf->seqn) == 0) {
+ if (seqn_list_update(*rte_event_test_seqn(ev.mbuf)) == 0) {
rte_pktmbuf_free(ev.mbuf);
rte_atomic32_sub(total_events, 1);
} else {
@@ -923,7 +924,7 @@ test_multiport_flow_sched_type_test(uint8_t in_sched_type,
return 0;
}
- /* Injects events with m->seqn=0 to total_events */
+ /* Injects events with a 0 sequence number to total_events */
ret = inject_events(0x1 /*flow_id */,
RTE_EVENT_TYPE_CPU /* event_type */,
0 /* sub_event_type (stage 0) */,
@@ -1043,7 +1044,7 @@ worker_group_based_pipeline(void *arg)
ev.op = RTE_EVENT_OP_FORWARD;
rte_event_enqueue_burst(evdev, port, &ev, 1);
} else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
- if (seqn_list_update(ev.mbuf->seqn) == 0) {
+ if (seqn_list_update(*rte_event_test_seqn(ev.mbuf)) == 0) {
rte_pktmbuf_free(ev.mbuf);
rte_atomic32_sub(total_events, 1);
} else {
@@ -1084,7 +1085,7 @@ test_multiport_queue_sched_type_test(uint8_t in_sched_type,
return 0;
}
- /* Injects events with m->seqn=0 to total_events */
+ /* Injects events with a 0 sequence number to total_events */
ret = inject_events(0x1 /*flow_id */,
RTE_EVENT_TYPE_CPU /* event_type */,
0 /* sub_event_type (stage 0) */,
@@ -1222,7 +1223,7 @@ launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
return 0;
}
- /* Injects events with m->seqn=0 to total_events */
+ /* Injects events with a 0 sequence number to total_events */
ret = inject_events(0x1 /*flow_id */,
RTE_EVENT_TYPE_CPU /* event_type */,
0 /* sub_event_type (stage 0) */,
@@ -1348,7 +1349,7 @@ worker_ordered_flow_producer(void *arg)
if (m == NULL)
continue;
- m->seqn = counter++;
+ *rte_event_test_seqn(m) = counter++;
struct rte_event ev = {.event = 0, .u64 = 0};
@@ -256,7 +256,7 @@ ordered_basic(struct test *t)
ev.queue_id = t->qid[0];
ev.op = RTE_EVENT_OP_NEW;
ev.mbuf = mbufs[i];
- mbufs[i]->seqn = MAGIC_SEQN + i;
+ *rte_event_test_seqn(mbufs[i]) = MAGIC_SEQN + i;
/* generate pkt and enqueue */
err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
@@ -281,7 +281,7 @@ ordered_basic(struct test *t)
rte_event_dev_dump(evdev, stdout);
return -1;
}
- seq = deq_ev[i].mbuf->seqn - MAGIC_SEQN;
+ seq = *rte_event_test_seqn(deq_ev[i].mbuf) - MAGIC_SEQN;
if (seq != (i-1)) {
PMD_DRV_LOG(ERR, " seq test failed ! eq is %d , "
@@ -396,7 +396,7 @@ atomic_basic(struct test *t)
ev.op = RTE_EVENT_OP_NEW;
ev.flow_id = 1;
ev.mbuf = mbufs[i];
- mbufs[i]->seqn = MAGIC_SEQN + i;
+ *rte_event_test_seqn(mbufs[i]) = MAGIC_SEQN + i;
/* generate pkt and enqueue */
err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
@@ -625,7 +625,7 @@ single_link_w_stats(struct test *t)
ev.queue_id = t->qid[0];
ev.op = RTE_EVENT_OP_NEW;
ev.mbuf = mbufs[i];
- mbufs[i]->seqn = 1234 + i;
+ *rte_event_test_seqn(mbufs[i]) = 1234 + i;
/* generate pkt and enqueue */
err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
@@ -380,7 +380,7 @@ run_prio_packet_test(struct test *t)
printf("%d: gen of pkt failed\n", __LINE__);
return -1;
}
- arp->seqn = MAGIC_SEQN[i];
+ *rte_event_test_seqn(arp) = MAGIC_SEQN[i];
ev = (struct rte_event){
.priority = PRIORITY[i],
@@ -419,7 +419,7 @@ run_prio_packet_test(struct test *t)
rte_event_dev_dump(evdev, stdout);
return -1;
}
- if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
+ if (*rte_event_test_seqn(ev.mbuf) != MAGIC_SEQN[1]) {
printf("%d: first packet out not highest priority\n",
__LINE__);
rte_event_dev_dump(evdev, stdout);
@@ -433,7 +433,7 @@ run_prio_packet_test(struct test *t)
rte_event_dev_dump(evdev, stdout);
return -1;
}
- if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
+ if (*rte_event_test_seqn(ev2.mbuf) != MAGIC_SEQN[0]) {
printf("%d: second packet out not lower priority\n",
__LINE__);
rte_event_dev_dump(evdev, stdout);
@@ -477,7 +477,7 @@ test_single_directed_packet(struct test *t)
}
const uint32_t MAGIC_SEQN = 4711;
- arp->seqn = MAGIC_SEQN;
+ *rte_event_test_seqn(arp) = MAGIC_SEQN;
/* generate pkt and enqueue */
err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
@@ -516,7 +516,7 @@ test_single_directed_packet(struct test *t)
return -1;
}
- if (ev.mbuf->seqn != MAGIC_SEQN) {
+ if (*rte_event_test_seqn(ev.mbuf) != MAGIC_SEQN) {
printf("%d: error magic sequence number not dequeued\n",
__LINE__);
return -1;
@@ -934,7 +934,7 @@ xstats_tests(struct test *t)
ev.op = RTE_EVENT_OP_NEW;
ev.mbuf = arp;
ev.flow_id = 7;
- arp->seqn = i;
+ *rte_event_test_seqn(arp) = i;
int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
if (err != 1) {
@@ -1485,7 +1485,7 @@ xstats_id_reset_tests(struct test *t)
ev.queue_id = t->qid[i];
ev.op = RTE_EVENT_OP_NEW;
ev.mbuf = arp;
- arp->seqn = i;
+ *rte_event_test_seqn(arp) = i;
int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
if (err != 1) {
@@ -1873,7 +1873,7 @@ qid_priorities(struct test *t)
ev.queue_id = t->qid[i];
ev.op = RTE_EVENT_OP_NEW;
ev.mbuf = arp;
- arp->seqn = i;
+ *rte_event_test_seqn(arp) = i;
int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
if (err != 1) {
@@ -1894,7 +1894,7 @@ qid_priorities(struct test *t)
return -1;
}
for (i = 0; i < 3; i++) {
- if (ev[i].mbuf->seqn != 2-i) {
+ if (*rte_event_test_seqn(ev[i].mbuf) != 2-i) {
printf(
"%d: qid priority test: seqn %d incorrectly prioritized\n",
__LINE__, i);
@@ -2371,7 +2371,7 @@ single_packet(struct test *t)
ev.mbuf = arp;
ev.queue_id = 0;
ev.flow_id = 3;
- arp->seqn = MAGIC_SEQN;
+ *rte_event_test_seqn(arp) = MAGIC_SEQN;
err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
if (err != 1) {
@@ -2411,7 +2411,7 @@ single_packet(struct test *t)
}
err = test_event_dev_stats_get(evdev, &stats);
- if (ev.mbuf->seqn != MAGIC_SEQN) {
+ if (*rte_event_test_seqn(ev.mbuf) != MAGIC_SEQN) {
printf("%d: magic sequence number not dequeued\n", __LINE__);
return -1;
}
@@ -2684,7 +2684,7 @@ parallel_basic(struct test *t, int check_order)
ev.queue_id = t->qid[0];
ev.op = RTE_EVENT_OP_NEW;
ev.mbuf = mbufs[i];
- mbufs[i]->seqn = MAGIC_SEQN + i;
+ *rte_event_test_seqn(mbufs[i]) = MAGIC_SEQN + i;
/* generate pkt and enqueue */
err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
@@ -2739,10 +2739,12 @@ parallel_basic(struct test *t, int check_order)
/* Check to see if the sequence numbers are in expected order */
if (check_order) {
for (j = 0 ; j < deq_pkts ; j++) {
- if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
- printf(
- "%d: Incorrect sequence number(%d) from port %d\n",
- __LINE__, mbufs_out[j]->seqn, tx_port);
+ if (*rte_event_test_seqn(deq_ev[j].mbuf) !=
+ MAGIC_SEQN + j) {
+ printf("%d: Incorrect sequence number(%d) from port %d\n",
+ __LINE__,
+ *rte_event_test_seqn(mbufs_out[j]),
+ tx_port);
return -1;
}
}
@@ -109,6 +109,22 @@ rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
return 0;
}
+#define RTE_EVENT_TEST_SEQN_DYNFIELD_NAME "rte_event_test_seqn_dynfield"
+int rte_event_test_seqn_dynfield_offset = -1;
+
+int
+rte_event_test_seqn_dynfield_register(void)
+{
+ static const struct rte_mbuf_dynfield event_test_seqn_dynfield_desc = {
+ .name = RTE_EVENT_TEST_SEQN_DYNFIELD_NAME,
+ .size = sizeof(rte_event_test_seqn_t),
+ .align = __alignof__(rte_event_test_seqn_t),
+ };
+ rte_event_test_seqn_dynfield_offset =
+ rte_mbuf_dynfield_register(&event_test_seqn_dynfield_desc);
+ return rte_event_test_seqn_dynfield_offset;
+}
+
int
rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
uint32_t *caps)
@@ -1247,8 +1263,11 @@ int rte_event_dev_selftest(uint8_t dev_id)
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
struct rte_eventdev *dev = &rte_eventdevs[dev_id];
- if (dev->dev_ops->dev_selftest != NULL)
+ if (dev->dev_ops->dev_selftest != NULL) {
+ if (rte_event_test_seqn_dynfield_register() < 0)
+ return -ENOMEM;
return (*dev->dev_ops->dev_selftest)();
+ }
return -ENOTSUP;
}
@@ -211,13 +211,15 @@ extern "C" {
#endif
#include <rte_common.h>
+#include <rte_compat.h>
#include <rte_config.h>
+#include <rte_mbuf.h>
+#include <rte_mbuf_dyn.h>
#include <rte_memory.h>
#include <rte_errno.h>
#include "rte_eventdev_trace_fp.h"
-struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
struct rte_event;
/* Event device capability bitmap flags */
@@ -570,9 +572,9 @@ struct rte_event_queue_conf {
*/
uint32_t nb_atomic_order_sequences;
/**< The maximum number of outstanding events waiting to be
- * reordered by this queue. In other words, the number of entries in
- * this queue’s reorder buffer.When the number of events in the
- * reorder buffer reaches to *nb_atomic_order_sequences* then the
+ * event_tested by this queue. In other words, the number of entries in
+ * this queue’s event_test buffer.When the number of events in the
+ * event_test buffer reaches to *nb_atomic_order_sequences* then the
* scheduler cannot schedule the events from this queue and invalid
* event will be returned from dequeue until one or more entries are
* freed up/released.
@@ -935,7 +937,7 @@ rte_event_dev_close(uint8_t dev_id);
* Event ordering is based on the received event(s), but also other
* (newly allocated or stored) events are ordered when enqueued within the same
* ordered context. Events not enqueued (e.g. released or stored) within the
- * context are considered missing from reordering and are skipped at this time
+ * context are considered missing from event_testing and are skipped at this time
* (but can be ordered again within another context).
*
* @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
@@ -1021,7 +1023,7 @@ rte_event_dev_close(uint8_t dev_id);
* then this function hints the scheduler that the user has done all that need
* to maintain event order in the current ordered context.
* The scheduler is allowed to release the ordered context of this port and
- * avoid reordering any following enqueues.
+ * avoid event_testing any following enqueues.
*
* Early ordered context release may increase parallelism and thus system
* performance.
@@ -1111,6 +1113,30 @@ struct rte_event {
};
};
+typedef uint32_t rte_event_test_seqn_t;
+extern int rte_event_test_seqn_dynfield_offset;
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Read test sequence number from mbuf.
+ *
+ * @param mbuf Structure to read from.
+ * @return pointer to test sequence number.
+ */
+__rte_experimental
+static inline rte_event_test_seqn_t *
+rte_event_test_seqn(const struct rte_mbuf *mbuf)
+{
+ return RTE_MBUF_DYNFIELD(mbuf, rte_event_test_seqn_dynfield_offset,
+ rte_event_test_seqn_t *);
+}
+
+__rte_experimental
+int
+rte_event_test_seqn_dynfield_register(void);
+
/* Ethdev Rx adapter capability bitmap flags */
#define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1
/**< This flag is sent when the packet transfer mechanism is in HW.
@@ -138,4 +138,6 @@ EXPERIMENTAL {
__rte_eventdev_trace_port_setup;
# added in 20.11
rte_event_pmd_pci_probe_named;
+ rte_event_test_seqn_dynfield_offset;
+ rte_event_test_seqn_dynfield_register;
};