[dpdk-dev,v2,2/4] net/dpaa2: add API's to support event eth adapter
Checks
Commit Message
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
drivers/net/dpaa2/dpaa2_ethdev.c | 67 +++++++++++++++++++++++++++++
drivers/net/dpaa2/dpaa2_ethdev.h | 15 +++++++
drivers/net/dpaa2/dpaa2_rxtx.c | 20 +++++++++
drivers/net/dpaa2/rte_pmd_dpaa2_version.map | 7 +++
4 files changed, 109 insertions(+)
Comments
On 10/12/2017 10:48 PM, Nipun Gupta wrote:
> Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
> ---
> drivers/net/dpaa2/dpaa2_ethdev.c | 67 +++++++++++++++++++++++++++++
> drivers/net/dpaa2/dpaa2_ethdev.h | 15 +++++++
> drivers/net/dpaa2/dpaa2_rxtx.c | 20 +++++++++
> drivers/net/dpaa2/rte_pmd_dpaa2_version.map | 7 +++
> 4 files changed, 109 insertions(+)
>
> diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
> index 39c32b3..724719a 100644
> --- a/drivers/net/dpaa2/dpaa2_ethdev.c
> +++ b/drivers/net/dpaa2/dpaa2_ethdev.c
> @@ -1634,6 +1634,73 @@ void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
> return 0;
> }
>
> +int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
> + int eth_rx_queue_id,
> + uint16_t dpcon_id,
> + const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
> +{
> + struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
> + struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
> + struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
> + uint8_t flow_id = dpaa2_ethq->flow_id;
> +
> + struct dpni_queue cfg;
> + uint8_t options;
> + int ret;
> +
> + memset(&cfg, 0, sizeof(struct dpni_queue));
> + options = DPNI_QUEUE_OPT_DEST;
> + cfg.destination.type = DPNI_DEST_DPCON;
> + cfg.destination.id = dpcon_id;
> + cfg.destination.priority = queue_conf->ev.priority;
> +
> + options |= DPNI_QUEUE_OPT_USER_CTX;
> + cfg.user_context = (uint64_t)(dpaa2_ethq);
> +
> + ret = dpni_set_queue(dpni, CMD_PRI_LOW,
> + eth_priv->token, DPNI_QUEUE_RX,
> + dpaa2_ethq->tc_index, flow_id, options, &cfg);
> + if (ret) {
> + RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret);
> + return ret;
> + }
> +
> + if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
> + dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
> + else
> + return -1;
> +
Should not you check the type supported in the start of function?
This way you will not be doing hw "dpni_set_queue" for not supported case?
> + memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
> +
> + return 0;
> +}
> +
> +int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
> + int eth_rx_queue_id)
> +{
> + struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
> + struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
> + struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
> + uint8_t flow_id = dpaa2_ethq->flow_id;
> + struct dpni_queue cfg;
> + uint8_t options;
> + int ret;
> +
> + memset(&cfg, 0, sizeof(struct dpni_queue));
> + options = DPNI_QUEUE_OPT_DEST;
> + cfg.destination.type = DPNI_DEST_NONE;
> +
> + ret = dpni_set_queue(dpni, CMD_PRI_LOW,
> + eth_priv->token, DPNI_QUEUE_RX,
> + dpaa2_ethq->tc_index, flow_id, options, &cfg);
> + if (ret) {
> + RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret);
> + return ret;
> + }
> +
> + return 0;
trivial one.
you can simply "return ret" in both cases.
> +}
> +
> static struct eth_dev_ops dpaa2_ethdev_ops = {
> .dev_configure = dpaa2_eth_dev_configure,
> .dev_start = dpaa2_dev_start,
> diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
> index 7b14ae0..b8e94aa 100644
> --- a/drivers/net/dpaa2/dpaa2_ethdev.h
> +++ b/drivers/net/dpaa2/dpaa2_ethdev.h
> @@ -34,6 +34,8 @@
> #ifndef _DPAA2_ETHDEV_H
> #define _DPAA2_ETHDEV_H
>
> +#include <rte_event_eth_rx_adapter.h>
> +
> #include <mc/fsl_dpni.h>
> #include <mc/fsl_mc_sys.h>
>
> @@ -100,8 +102,21 @@ int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
>
> int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, void *blist);
>
> +int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
> + int eth_rx_queue_id,
> + uint16_t dpcon_id,
> + const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
> +
> +int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
> + int eth_rx_queue_id);
> +
> uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs,
> uint16_t nb_pkts);
> +void dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
> + const struct qbman_fd *fd,
> + const struct qbman_result *dq,
> + struct dpaa2_queue *rxq,
> + struct rte_event *ev);
> uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
> uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
> #endif /* _DPAA2_ETHDEV_H */
> diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
> index 7cfa73a..a317f7f 100644
> --- a/drivers/net/dpaa2/dpaa2_rxtx.c
> +++ b/drivers/net/dpaa2/dpaa2_rxtx.c
> @@ -514,6 +514,26 @@ static inline int __attribute__((hot))
> return num_rx;
> }
>
> +void __attribute__((hot))
> +dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
> + const struct qbman_fd *fd,
> + const struct qbman_result *dq,
> + struct dpaa2_queue *rxq,
> + struct rte_event *ev)
> +{
> + ev->mbuf = eth_fd_to_mbuf(fd);
> +
> + ev->flow_id = rxq->ev.flow_id;
> + ev->sub_event_type = rxq->ev.sub_event_type;
> + ev->event_type = RTE_EVENT_TYPE_ETHDEV;
> + ev->op = RTE_EVENT_OP_NEW;
> + ev->sched_type = rxq->ev.sched_type;
> + ev->queue_id = rxq->ev.queue_id;
> + ev->priority = rxq->ev.priority;
> +
> + qbman_swp_dqrr_consume(swp, dq);
> +}
> +
> /*
> * Callback to handle sending packets through WRIOP based interface
> */
> diff --git a/drivers/net/dpaa2/rte_pmd_dpaa2_version.map b/drivers/net/dpaa2/rte_pmd_dpaa2_version.map
> index 8591cc0..b741bc0 100644
> --- a/drivers/net/dpaa2/rte_pmd_dpaa2_version.map
> +++ b/drivers/net/dpaa2/rte_pmd_dpaa2_version.map
> @@ -2,3 +2,10 @@ DPDK_17.05 {
>
> local: *;
> };
> +
> +DPDK_17.11 {
> + global:
> +
> + dpaa2_eth_eventq_attach;
> + dpaa2_eth_eventq_detach;
> +};
>
@@ -1634,6 +1634,73 @@ void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
return 0;
}
+int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id,
+ uint16_t dpcon_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
+ struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
+ uint8_t flow_id = dpaa2_ethq->flow_id;
+
+ struct dpni_queue cfg;
+ uint8_t options;
+ int ret;
+
+ memset(&cfg, 0, sizeof(struct dpni_queue));
+ options = DPNI_QUEUE_OPT_DEST;
+ cfg.destination.type = DPNI_DEST_DPCON;
+ cfg.destination.id = dpcon_id;
+ cfg.destination.priority = queue_conf->ev.priority;
+
+ options |= DPNI_QUEUE_OPT_USER_CTX;
+ cfg.user_context = (uint64_t)(dpaa2_ethq);
+
+ ret = dpni_set_queue(dpni, CMD_PRI_LOW,
+ eth_priv->token, DPNI_QUEUE_RX,
+ dpaa2_ethq->tc_index, flow_id, options, &cfg);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret);
+ return ret;
+ }
+
+ if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
+ dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
+ else
+ return -1;
+
+ memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
+
+ return 0;
+}
+
+int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id)
+{
+ struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
+ struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
+ uint8_t flow_id = dpaa2_ethq->flow_id;
+ struct dpni_queue cfg;
+ uint8_t options;
+ int ret;
+
+ memset(&cfg, 0, sizeof(struct dpni_queue));
+ options = DPNI_QUEUE_OPT_DEST;
+ cfg.destination.type = DPNI_DEST_NONE;
+
+ ret = dpni_set_queue(dpni, CMD_PRI_LOW,
+ eth_priv->token, DPNI_QUEUE_RX,
+ dpaa2_ethq->tc_index, flow_id, options, &cfg);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static struct eth_dev_ops dpaa2_ethdev_ops = {
.dev_configure = dpaa2_eth_dev_configure,
.dev_start = dpaa2_dev_start,
@@ -34,6 +34,8 @@
#ifndef _DPAA2_ETHDEV_H
#define _DPAA2_ETHDEV_H
+#include <rte_event_eth_rx_adapter.h>
+
#include <mc/fsl_dpni.h>
#include <mc/fsl_mc_sys.h>
@@ -100,8 +102,21 @@ int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, void *blist);
+int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id,
+ uint16_t dpcon_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
+
+int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id);
+
uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts);
+void dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev);
uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
#endif /* _DPAA2_ETHDEV_H */
@@ -514,6 +514,26 @@ static inline int __attribute__((hot))
return num_rx;
}
+void __attribute__((hot))
+dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev)
+{
+ ev->mbuf = eth_fd_to_mbuf(fd);
+
+ ev->flow_id = rxq->ev.flow_id;
+ ev->sub_event_type = rxq->ev.sub_event_type;
+ ev->event_type = RTE_EVENT_TYPE_ETHDEV;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = rxq->ev.sched_type;
+ ev->queue_id = rxq->ev.queue_id;
+ ev->priority = rxq->ev.priority;
+
+ qbman_swp_dqrr_consume(swp, dq);
+}
+
/*
* Callback to handle sending packets through WRIOP based interface
*/
@@ -2,3 +2,10 @@ DPDK_17.05 {
local: *;
};
+
+DPDK_17.11 {
+ global:
+
+ dpaa2_eth_eventq_attach;
+ dpaa2_eth_eventq_detach;
+};