[v9,09/17] graph: add structure for stream moving between cores
Checks
Commit Message
Add graph_mcore_dispatch_wq_node to hold graph scheduling workqueue
node.
Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
Signed-off-by: Cunming Liang <cunming.liang@intel.com>
Signed-off-by: Zhirun Yan <zhirun.yan@intel.com>
---
lib/graph/graph.c | 2 ++
lib/graph/graph_populate.c | 1 +
lib/graph/graph_private.h | 12 ++++++++++++
lib/graph/rte_graph_worker_common.h | 29 +++++++++++++++++++++++++++++
4 files changed, 44 insertions(+)
@@ -289,6 +289,7 @@ rte_graph_model_mcore_dispatch_core_bind(rte_graph_t id, int lcore)
RTE_ASSERT(graph->graph->model == RTE_GRAPH_MODEL_MCORE_DISPATCH);
graph->lcore_id = lcore;
+ graph->graph->dispatch.lcore_id = graph->lcore_id;
graph->socket = rte_lcore_to_socket_id(lcore);
/* check the availability of source node */
@@ -312,6 +313,7 @@ rte_graph_model_mcore_dispatch_core_unbind(rte_graph_t id)
break;
graph->lcore_id = RTE_MAX_LCORE;
+ graph->graph->dispatch.lcore_id = RTE_MAX_LCORE;
fail:
return;
@@ -89,6 +89,7 @@ graph_nodes_populate(struct graph *_graph)
}
node->id = graph_node->node->id;
node->parent_id = pid;
+ node->dispatch.lcore_id = graph_node->node->lcore_id;
nb_edges = graph_node->node->nb_edges;
node->nb_edges = nb_edges;
off += sizeof(struct rte_node);
@@ -64,6 +64,18 @@ struct node {
char next_nodes[][RTE_NODE_NAMESIZE]; /**< Names of next nodes. */
};
+/**
+ * @internal
+ *
+ * Structure that holds the graph scheduling workqueue node stream.
+ * Used for mcore dispatch model.
+ */
+struct graph_mcore_dispatch_wq_node {
+ rte_graph_off_t node_off;
+ uint16_t nb_objs;
+ void *objs[RTE_GRAPH_BURST_SIZE];
+} __rte_cache_aligned;
+
/**
* @internal
*
@@ -36,12 +36,20 @@ extern "C" {
#define RTE_GRAPH_MODEL_MCORE_DISPATCH 1
/**< Dispatch model to support cross-core dispatching within core affinity. */
+/**
+ * @internal
+ *
+ * Singly-linked list head for graph schedule run-queue.
+ */
+SLIST_HEAD(rte_graph_rq_head, rte_graph);
+
/**
* @internal
*
* Data structure to hold graph data.
*/
struct rte_graph {
+ /* Fast path area. */
uint32_t tail; /**< Tail of circular buffer. */
uint32_t head; /**< Head of circular buffer. */
uint32_t cir_mask; /**< Circular buffer wrap around mask. */
@@ -49,6 +57,20 @@ struct rte_graph {
rte_graph_off_t *cir_start; /**< Pointer to circular buffer. */
rte_graph_off_t nodes_start; /**< Offset at which node memory starts. */
uint32_t model; /**< graph model */
+ RTE_STD_C11
+ union {
+ /* Fast schedule area for mcore dispatch model */
+ struct {
+ struct rte_graph_rq_head *rq __rte_cache_aligned; /* The run-queue */
+ struct rte_graph_rq_head rq_head; /* The head for run-queue list */
+
+ unsigned int lcore_id; /**< The graph running Lcore. */
+ struct rte_ring *wq; /**< The work-queue for pending streams. */
+ struct rte_mempool *mp; /**< The mempool for scheduling streams. */
+ } dispatch; /** Only used by dispatch model */
+ };
+ SLIST_ENTRY(rte_graph) next; /* The next for rte_graph list */
+ /* End of Fast path area.*/
rte_graph_t id; /**< Graph identifier. */
int socket; /**< Socket ID where memory is allocated. */
char name[RTE_GRAPH_NAMESIZE]; /**< Name of the graph. */
@@ -81,6 +103,13 @@ struct rte_node {
/** Original process function when pcap is enabled. */
rte_node_process_t original_process;
+ RTE_STD_C11
+ union {
+ /* Fast schedule area for mcore dispatch model */
+ struct {
+ unsigned int lcore_id; /**< Node running lcore. */
+ } dispatch;
+ };
/* Fast path area */
#define RTE_NODE_CTX_SZ 16
uint8_t ctx[RTE_NODE_CTX_SZ] __rte_cache_aligned; /**< Node Context. */