[v7,1/2] ethdev: make queue release callback optional
Checks
Commit Message
Some drivers don't need Rx and Tx queue release callback, make them
optional. Clean up empty queue release callbacks for some drivers.
Signed-off-by: Xueming Li <xuemingl@nvidia.com>
Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
---
app/test/virtual_pmd.c | 12 ----
drivers/net/af_packet/rte_eth_af_packet.c | 7 --
drivers/net/af_xdp/rte_eth_af_xdp.c | 7 --
drivers/net/dpaa/dpaa_ethdev.c | 13 ----
drivers/net/dpaa2/dpaa2_ethdev.c | 7 --
drivers/net/ipn3ke/ipn3ke_representor.c | 12 ----
drivers/net/kni/rte_eth_kni.c | 7 --
drivers/net/pcap/pcap_ethdev.c | 7 --
drivers/net/pfe/pfe_ethdev.c | 14 ----
drivers/net/ring/rte_eth_ring.c | 4 --
drivers/net/virtio/virtio_ethdev.c | 8 ---
lib/ethdev/rte_ethdev.c | 86 ++++++++++-------------
12 files changed, 36 insertions(+), 148 deletions(-)
Comments
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> -----Original Message-----
> From: Xueming Li <xuemingl@nvidia.com>
> Sent: Wednesday, October 6, 2021 4:48 PM
> To: dev@dpdk.org
> Cc: xuemingl@nvidia.com; Ferruh Yigit <ferruh.yigit@intel.com>; Andrew
> Rybchenko <andrew.rybchenko@oktetlabs.ru>; Singh Aman Deep
> <aman.deep.singh@intel.com>; Thomas Monjalon <thomas@monjalon.net>;
> John W. Linville <linville@tuxdriver.com>; Ciara Loftus
> <ciara.loftus@intel.com>; Qi Zhang <qi.z.zhang@intel.com>; Hemant
> Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena (OSS)
> <sachin.saxena@oss.nxp.com>; Rosen Xu <rosen.xu@intel.com>;
> Gagandeep Singh <G.Singh@nxp.com>; Bruce Richardson
> <bruce.richardson@intel.com>; Maxime Coquelin
> <maxime.coquelin@redhat.com>; Chenbo Xia <chenbo.xia@intel.com>
> Subject: [PATCH v7 1/2] ethdev: make queue release callback optional
> Importance: High
>
> Some drivers don't need Rx and Tx queue release callback, make them
> optional. Clean up empty queue release callbacks for some drivers.
>
> Signed-off-by: Xueming Li <xuemingl@nvidia.com>
> Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> Acked-by: Ferruh Yigit <ferruh.yigit@intel.com>
> Acked-by: Thomas Monjalon <thomas@monjalon.net>
> ---
> app/test/virtual_pmd.c | 12 ----
> drivers/net/af_packet/rte_eth_af_packet.c | 7 --
> drivers/net/af_xdp/rte_eth_af_xdp.c | 7 --
> drivers/net/dpaa/dpaa_ethdev.c | 13 ----
> drivers/net/dpaa2/dpaa2_ethdev.c | 7 --
> drivers/net/ipn3ke/ipn3ke_representor.c | 12 ----
> drivers/net/kni/rte_eth_kni.c | 7 --
> drivers/net/pcap/pcap_ethdev.c | 7 --
> drivers/net/pfe/pfe_ethdev.c | 14 ----
> drivers/net/ring/rte_eth_ring.c | 4 --
> drivers/net/virtio/virtio_ethdev.c | 8 ---
> lib/ethdev/rte_ethdev.c | 86 ++++++++++-------------
> 12 files changed, 36 insertions(+), 148 deletions(-)
>
> diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c index
> 7036f401ed9..7e15b47eb0f 100644
> --- a/app/test/virtual_pmd.c
> +++ b/app/test/virtual_pmd.c
> @@ -163,16 +163,6 @@ virtual_ethdev_tx_queue_setup_fail(struct
> rte_eth_dev *dev __rte_unused,
> return -1;
> }
>
> -static void
> -virtual_ethdev_rx_queue_release(void *q __rte_unused) -{ -}
> -
> -static void
> -virtual_ethdev_tx_queue_release(void *q __rte_unused) -{ -}
> -
> static int
> virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
> int wait_to_complete __rte_unused)
> @@ -243,8 +233,6 @@ static const struct eth_dev_ops
> virtual_ethdev_default_dev_ops = {
> .dev_infos_get = virtual_ethdev_info_get,
> .rx_queue_setup = virtual_ethdev_rx_queue_setup_success,
> .tx_queue_setup = virtual_ethdev_tx_queue_setup_success,
> - .rx_queue_release = virtual_ethdev_rx_queue_release,
> - .tx_queue_release = virtual_ethdev_tx_queue_release,
> .link_update = virtual_ethdev_link_update_success,
> .mac_addr_set = virtual_ethdev_mac_address_set,
> .stats_get = virtual_ethdev_stats_get, diff --git
> a/drivers/net/af_packet/rte_eth_af_packet.c
> b/drivers/net/af_packet/rte_eth_af_packet.c
> index fcd80903995..c73d2ec5c86 100644
> --- a/drivers/net/af_packet/rte_eth_af_packet.c
> +++ b/drivers/net/af_packet/rte_eth_af_packet.c
> @@ -427,11 +427,6 @@ eth_dev_close(struct rte_eth_dev *dev)
> return 0;
> }
>
> -static void
> -eth_queue_release(void *q __rte_unused) -{ -}
> -
> static int
> eth_link_update(struct rte_eth_dev *dev __rte_unused,
> int wait_to_complete __rte_unused) @@ -594,8 +589,6 @@ static
> const struct eth_dev_ops ops = {
> .promiscuous_disable = eth_dev_promiscuous_disable,
> .rx_queue_setup = eth_rx_queue_setup,
> .tx_queue_setup = eth_tx_queue_setup,
> - .rx_queue_release = eth_queue_release,
> - .tx_queue_release = eth_queue_release,
> .link_update = eth_link_update,
> .stats_get = eth_stats_get,
> .stats_reset = eth_stats_reset,
> diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c
> b/drivers/net/af_xdp/rte_eth_af_xdp.c
> index 9bea0a895a3..a619dd218d0 100644
> --- a/drivers/net/af_xdp/rte_eth_af_xdp.c
> +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
> @@ -989,11 +989,6 @@ eth_dev_close(struct rte_eth_dev *dev)
> return 0;
> }
>
> -static void
> -eth_queue_release(void *q __rte_unused) -{ -}
> -
> static int
> eth_link_update(struct rte_eth_dev *dev __rte_unused,
> int wait_to_complete __rte_unused)
> @@ -1474,8 +1469,6 @@ static const struct eth_dev_ops ops = {
> .promiscuous_disable = eth_dev_promiscuous_disable,
> .rx_queue_setup = eth_rx_queue_setup,
> .tx_queue_setup = eth_tx_queue_setup,
> - .rx_queue_release = eth_queue_release,
> - .tx_queue_release = eth_queue_release,
> .link_update = eth_link_update,
> .stats_get = eth_stats_get,
> .stats_reset = eth_stats_reset,
> diff --git a/drivers/net/dpaa/dpaa_ethdev.c
> b/drivers/net/dpaa/dpaa_ethdev.c index 36d8f9249df..2c12956ff6b 100644
> --- a/drivers/net/dpaa/dpaa_ethdev.c
> +++ b/drivers/net/dpaa/dpaa_ethdev.c
> @@ -1233,12 +1233,6 @@ dpaa_eth_eventq_detach(const struct
> rte_eth_dev *dev,
> return 0;
> }
>
> -static
> -void dpaa_eth_rx_queue_release(void *rxq __rte_unused) -{
> - PMD_INIT_FUNC_TRACE();
> -}
> -
> static
> int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> uint16_t nb_desc __rte_unused,
> @@ -1272,11 +1266,6 @@ int dpaa_eth_tx_queue_setup(struct rte_eth_dev
> *dev, uint16_t queue_idx,
> return 0;
> }
>
> -static void dpaa_eth_tx_queue_release(void *txq __rte_unused) -{
> - PMD_INIT_FUNC_TRACE();
> -}
> -
> static uint32_t
> dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
> { @@ -1571,8 +1560,6 @@ static struct eth_dev_ops dpaa_devops = {
>
> .rx_queue_setup = dpaa_eth_rx_queue_setup,
> .tx_queue_setup = dpaa_eth_tx_queue_setup,
> - .rx_queue_release = dpaa_eth_rx_queue_release,
> - .tx_queue_release = dpaa_eth_tx_queue_release,
> .rx_burst_mode_get = dpaa_dev_rx_burst_mode_get,
> .tx_burst_mode_get = dpaa_dev_tx_burst_mode_get,
> .rxq_info_get = dpaa_rxq_info_get,
> diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c
> b/drivers/net/dpaa2/dpaa2_ethdev.c
> index c12169578e2..48ffbf6c214 100644
> --- a/drivers/net/dpaa2/dpaa2_ethdev.c
> +++ b/drivers/net/dpaa2/dpaa2_ethdev.c
> @@ -1004,12 +1004,6 @@ dpaa2_dev_rx_queue_release(void *q
> __rte_unused)
> }
> }
>
> -static void
> -dpaa2_dev_tx_queue_release(void *q __rte_unused) -{
> - PMD_INIT_FUNC_TRACE();
> -}
> -
> static uint32_t
> dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
> { @@ -2427,7 +2421,6 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
> .rx_queue_setup = dpaa2_dev_rx_queue_setup,
> .rx_queue_release = dpaa2_dev_rx_queue_release,
> .tx_queue_setup = dpaa2_dev_tx_queue_setup,
> - .tx_queue_release = dpaa2_dev_tx_queue_release,
> .rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get,
> .tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get,
> .flow_ctrl_get = dpaa2_flow_ctrl_get,
[Hemant]
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Hi
> -----Original Message-----
> From: Xueming Li <xuemingl@nvidia.com>
> Sent: Wednesday, October 06, 2021 19:18
> To: dev@dpdk.org
> Cc: xuemingl@nvidia.com; Yigit, Ferruh <ferruh.yigit@intel.com>; Andrew
> Rybchenko <andrew.rybchenko@oktetlabs.ru>; Singh, Aman Deep
> <aman.deep.singh@intel.com>; Thomas Monjalon <thomas@monjalon.net>;
> John W. Linville <linville@tuxdriver.com>; Loftus, Ciara
> <ciara.loftus@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Hemant
> Agrawal <hemant.agrawal@nxp.com>; Sachin Saxena
> <sachin.saxena@oss.nxp.com>; Xu, Rosen <rosen.xu@intel.com>;
> Gagandeep Singh <g.singh@nxp.com>; Richardson, Bruce
> <bruce.richardson@intel.com>; Maxime Coquelin
> <maxime.coquelin@redhat.com>; Xia, Chenbo <chenbo.xia@intel.com>
> Subject: [PATCH v7 1/2] ethdev: make queue release callback optional
>
> Some drivers don't need Rx and Tx queue release callback, make them
> optional. Clean up empty queue release callbacks for some drivers.
>
> Signed-off-by: Xueming Li <xuemingl@nvidia.com>
> Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> Acked-by: Ferruh Yigit <ferruh.yigit@intel.com>
> Acked-by: Thomas Monjalon <thomas@monjalon.net>
> ---
> app/test/virtual_pmd.c | 12 ----
> drivers/net/af_packet/rte_eth_af_packet.c | 7 --
> drivers/net/af_xdp/rte_eth_af_xdp.c | 7 --
> drivers/net/dpaa/dpaa_ethdev.c | 13 ----
> drivers/net/dpaa2/dpaa2_ethdev.c | 7 --
> drivers/net/ipn3ke/ipn3ke_representor.c | 12 ----
> drivers/net/kni/rte_eth_kni.c | 7 --
> drivers/net/pcap/pcap_ethdev.c | 7 --
> drivers/net/pfe/pfe_ethdev.c | 14 ----
> drivers/net/ring/rte_eth_ring.c | 4 --
> drivers/net/virtio/virtio_ethdev.c | 8 ---
> lib/ethdev/rte_ethdev.c | 86 ++++++++++-------------
> 12 files changed, 36 insertions(+), 148 deletions(-)
>
> diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c index
> 7036f401ed9..7e15b47eb0f 100644
> --- a/app/test/virtual_pmd.c
> +++ b/app/test/virtual_pmd.c
> @@ -163,16 +163,6 @@ virtual_ethdev_tx_queue_setup_fail(struct
> rte_eth_dev *dev __rte_unused,
> return -1;
> }
>
> -static void
> -virtual_ethdev_rx_queue_release(void *q __rte_unused) -{ -}
> -
> -static void
> -virtual_ethdev_tx_queue_release(void *q __rte_unused) -{ -}
> -
> static int
> virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
> int wait_to_complete __rte_unused)
> @@ -243,8 +233,6 @@ static const struct eth_dev_ops
> virtual_ethdev_default_dev_ops = {
> .dev_infos_get = virtual_ethdev_info_get,
> .rx_queue_setup = virtual_ethdev_rx_queue_setup_success,
> .tx_queue_setup = virtual_ethdev_tx_queue_setup_success,
> - .rx_queue_release = virtual_ethdev_rx_queue_release,
> - .tx_queue_release = virtual_ethdev_tx_queue_release,
> .link_update = virtual_ethdev_link_update_success,
> .mac_addr_set = virtual_ethdev_mac_address_set,
> .stats_get = virtual_ethdev_stats_get, diff --git
> a/drivers/net/af_packet/rte_eth_af_packet.c
> b/drivers/net/af_packet/rte_eth_af_packet.c
> index fcd80903995..c73d2ec5c86 100644
> --- a/drivers/net/af_packet/rte_eth_af_packet.c
> +++ b/drivers/net/af_packet/rte_eth_af_packet.c
> @@ -427,11 +427,6 @@ eth_dev_close(struct rte_eth_dev *dev)
> return 0;
> }
>
> -static void
> -eth_queue_release(void *q __rte_unused) -{ -}
> -
> static int
> eth_link_update(struct rte_eth_dev *dev __rte_unused,
> int wait_to_complete __rte_unused) @@ -594,8 +589,6 @@ static
> const struct eth_dev_ops ops = {
> .promiscuous_disable = eth_dev_promiscuous_disable,
> .rx_queue_setup = eth_rx_queue_setup,
> .tx_queue_setup = eth_tx_queue_setup,
> - .rx_queue_release = eth_queue_release,
> - .tx_queue_release = eth_queue_release,
> .link_update = eth_link_update,
> .stats_get = eth_stats_get,
> .stats_reset = eth_stats_reset,
> diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c
> b/drivers/net/af_xdp/rte_eth_af_xdp.c
> index 9bea0a895a3..a619dd218d0 100644
> --- a/drivers/net/af_xdp/rte_eth_af_xdp.c
> +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
> @@ -989,11 +989,6 @@ eth_dev_close(struct rte_eth_dev *dev)
> return 0;
> }
>
> -static void
> -eth_queue_release(void *q __rte_unused) -{ -}
> -
> static int
> eth_link_update(struct rte_eth_dev *dev __rte_unused,
> int wait_to_complete __rte_unused)
> @@ -1474,8 +1469,6 @@ static const struct eth_dev_ops ops = {
> .promiscuous_disable = eth_dev_promiscuous_disable,
> .rx_queue_setup = eth_rx_queue_setup,
> .tx_queue_setup = eth_tx_queue_setup,
> - .rx_queue_release = eth_queue_release,
> - .tx_queue_release = eth_queue_release,
> .link_update = eth_link_update,
> .stats_get = eth_stats_get,
> .stats_reset = eth_stats_reset,
> diff --git a/drivers/net/dpaa/dpaa_ethdev.c
> b/drivers/net/dpaa/dpaa_ethdev.c index 36d8f9249df..2c12956ff6b 100644
> --- a/drivers/net/dpaa/dpaa_ethdev.c
> +++ b/drivers/net/dpaa/dpaa_ethdev.c
> @@ -1233,12 +1233,6 @@ dpaa_eth_eventq_detach(const struct
> rte_eth_dev *dev,
> return 0;
> }
>
> -static
> -void dpaa_eth_rx_queue_release(void *rxq __rte_unused) -{
> - PMD_INIT_FUNC_TRACE();
> -}
> -
> static
> int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> uint16_t nb_desc __rte_unused,
> @@ -1272,11 +1266,6 @@ int dpaa_eth_tx_queue_setup(struct rte_eth_dev
> *dev, uint16_t queue_idx,
> return 0;
> }
>
> -static void dpaa_eth_tx_queue_release(void *txq __rte_unused) -{
> - PMD_INIT_FUNC_TRACE();
> -}
> -
> static uint32_t
> dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
> { @@ -1571,8 +1560,6 @@ static struct eth_dev_ops dpaa_devops = {
>
> .rx_queue_setup = dpaa_eth_rx_queue_setup,
> .tx_queue_setup = dpaa_eth_tx_queue_setup,
> - .rx_queue_release = dpaa_eth_rx_queue_release,
> - .tx_queue_release = dpaa_eth_tx_queue_release,
> .rx_burst_mode_get = dpaa_dev_rx_burst_mode_get,
> .tx_burst_mode_get = dpaa_dev_tx_burst_mode_get,
> .rxq_info_get = dpaa_rxq_info_get,
> diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c
> b/drivers/net/dpaa2/dpaa2_ethdev.c
> index c12169578e2..48ffbf6c214 100644
> --- a/drivers/net/dpaa2/dpaa2_ethdev.c
> +++ b/drivers/net/dpaa2/dpaa2_ethdev.c
> @@ -1004,12 +1004,6 @@ dpaa2_dev_rx_queue_release(void *q
> __rte_unused)
> }
> }
>
> -static void
> -dpaa2_dev_tx_queue_release(void *q __rte_unused) -{
> - PMD_INIT_FUNC_TRACE();
> -}
> -
> static uint32_t
> dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
> { @@ -2427,7 +2421,6 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
> .rx_queue_setup = dpaa2_dev_rx_queue_setup,
> .rx_queue_release = dpaa2_dev_rx_queue_release,
> .tx_queue_setup = dpaa2_dev_tx_queue_setup,
> - .tx_queue_release = dpaa2_dev_tx_queue_release,
> .rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get,
> .tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get,
> .flow_ctrl_get = dpaa2_flow_ctrl_get,
> diff --git a/drivers/net/ipn3ke/ipn3ke_representor.c
> b/drivers/net/ipn3ke/ipn3ke_representor.c
> index 589d9fa5877..694435a4ae2 100644
> --- a/drivers/net/ipn3ke/ipn3ke_representor.c
> +++ b/drivers/net/ipn3ke/ipn3ke_representor.c
> @@ -288,11 +288,6 @@ ipn3ke_rpst_rx_queue_setup(__rte_unused struct
> rte_eth_dev *dev,
> return 0;
> }
>
> -static void
> -ipn3ke_rpst_rx_queue_release(__rte_unused void *rxq) -{ -}
> -
> static int
> ipn3ke_rpst_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
> __rte_unused uint16_t queue_idx, __rte_unused uint16_t nb_desc,
> @@ -302,11 +297,6 @@ ipn3ke_rpst_tx_queue_setup(__rte_unused struct
> rte_eth_dev *dev,
> return 0;
> }
>
> -static void
> -ipn3ke_rpst_tx_queue_release(__rte_unused void *txq) -{ -}
> -
> /* Statistics collected by each port, VSI, VEB, and S-channel */ struct
> ipn3ke_rpst_eth_stats {
> uint64_t tx_bytes; /* gotc */
> @@ -2865,9 +2855,7 @@ static const struct eth_dev_ops
> ipn3ke_rpst_dev_ops = {
> .tx_queue_start = ipn3ke_rpst_tx_queue_start,
> .tx_queue_stop = ipn3ke_rpst_tx_queue_stop,
> .rx_queue_setup = ipn3ke_rpst_rx_queue_setup,
> - .rx_queue_release = ipn3ke_rpst_rx_queue_release,
> .tx_queue_setup = ipn3ke_rpst_tx_queue_setup,
> - .tx_queue_release = ipn3ke_rpst_tx_queue_release,
>
> .dev_set_link_up = ipn3ke_rpst_dev_set_link_up,
> .dev_set_link_down = ipn3ke_rpst_dev_set_link_down,
> diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
> index 871d11c4133..cb9f7c8e820 100644
> --- a/drivers/net/kni/rte_eth_kni.c
> +++ b/drivers/net/kni/rte_eth_kni.c
> @@ -284,11 +284,6 @@ eth_kni_tx_queue_setup(struct rte_eth_dev *dev,
> return 0;
> }
>
> -static void
> -eth_kni_queue_release(void *q __rte_unused) -{ -}
> -
> static int
> eth_kni_link_update(struct rte_eth_dev *dev __rte_unused,
> int wait_to_complete __rte_unused)
> @@ -362,8 +357,6 @@ static const struct eth_dev_ops eth_kni_ops = {
> .dev_infos_get = eth_kni_dev_info,
> .rx_queue_setup = eth_kni_rx_queue_setup,
> .tx_queue_setup = eth_kni_tx_queue_setup,
> - .rx_queue_release = eth_kni_queue_release,
> - .tx_queue_release = eth_kni_queue_release,
> .link_update = eth_kni_link_update,
> .stats_get = eth_kni_stats_get,
> .stats_reset = eth_kni_stats_reset,
> diff --git a/drivers/net/pcap/pcap_ethdev.c
> b/drivers/net/pcap/pcap_ethdev.c index 3566aea0105..d695c5eef7b 100644
> --- a/drivers/net/pcap/pcap_ethdev.c
> +++ b/drivers/net/pcap/pcap_ethdev.c
> @@ -857,11 +857,6 @@ eth_dev_close(struct rte_eth_dev *dev)
> return 0;
> }
>
> -static void
> -eth_queue_release(void *q __rte_unused) -{ -}
> -
> static int
> eth_link_update(struct rte_eth_dev *dev __rte_unused,
> int wait_to_complete __rte_unused)
> @@ -1006,8 +1001,6 @@ static const struct eth_dev_ops ops = {
> .tx_queue_start = eth_tx_queue_start,
> .rx_queue_stop = eth_rx_queue_stop,
> .tx_queue_stop = eth_tx_queue_stop,
> - .rx_queue_release = eth_queue_release,
> - .tx_queue_release = eth_queue_release,
> .link_update = eth_link_update,
> .stats_get = eth_stats_get,
> .stats_reset = eth_stats_reset,
> diff --git a/drivers/net/pfe/pfe_ethdev.c b/drivers/net/pfe/pfe_ethdev.c
> index feec4d10a26..4c7f568bf42 100644
> --- a/drivers/net/pfe/pfe_ethdev.c
> +++ b/drivers/net/pfe/pfe_ethdev.c
> @@ -494,18 +494,6 @@ pfe_rx_queue_setup(struct rte_eth_dev *dev,
> uint16_t queue_idx,
> return 0;
> }
>
> -static void
> -pfe_rx_queue_release(void *q __rte_unused) -{
> - PMD_INIT_FUNC_TRACE();
> -}
> -
> -static void
> -pfe_tx_queue_release(void *q __rte_unused) -{
> - PMD_INIT_FUNC_TRACE();
> -}
> -
> static int
> pfe_tx_queue_setup(struct rte_eth_dev *dev,
> uint16_t queue_idx,
> @@ -759,9 +747,7 @@ static const struct eth_dev_ops ops = {
> .dev_configure = pfe_eth_configure,
> .dev_infos_get = pfe_eth_info,
> .rx_queue_setup = pfe_rx_queue_setup,
> - .rx_queue_release = pfe_rx_queue_release,
> .tx_queue_setup = pfe_tx_queue_setup,
> - .tx_queue_release = pfe_tx_queue_release,
> .dev_supported_ptypes_get = pfe_supported_ptypes_get,
> .link_update = pfe_eth_link_update,
> .promiscuous_enable = pfe_promiscuous_enable,
> diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
> index 1faf38a714c..0440019e07e 100644
> --- a/drivers/net/ring/rte_eth_ring.c
> +++ b/drivers/net/ring/rte_eth_ring.c
> @@ -225,8 +225,6 @@ eth_mac_addr_add(struct rte_eth_dev *dev
> __rte_unused,
> return 0;
> }
>
> -static void
> -eth_queue_release(void *q __rte_unused) { ; } static int
> eth_link_update(struct rte_eth_dev *dev __rte_unused,
> int wait_to_complete __rte_unused) { return 0; } @@ -272,8
> +270,6 @@ static const struct eth_dev_ops ops = {
> .dev_infos_get = eth_dev_info,
> .rx_queue_setup = eth_rx_queue_setup,
> .tx_queue_setup = eth_tx_queue_setup,
> - .rx_queue_release = eth_queue_release,
> - .tx_queue_release = eth_queue_release,
> .link_update = eth_link_update,
> .stats_get = eth_stats_get,
> .stats_reset = eth_stats_reset,
> diff --git a/drivers/net/virtio/virtio_ethdev.c
> b/drivers/net/virtio/virtio_ethdev.c
> index b60eeb24abe..6aa36b3f394 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -370,12 +370,6 @@ virtio_set_multiple_queues(struct rte_eth_dev *dev,
> uint16_t nb_queues)
> return 0;
> }
>
> -static void
> -virtio_dev_queue_release(void *queue __rte_unused) -{
> - /* do nothing */
> -}
> -
> static uint16_t
> virtio_get_nr_vq(struct virtio_hw *hw)
> {
> @@ -981,9 +975,7 @@ static const struct eth_dev_ops virtio_eth_dev_ops =
> {
> .rx_queue_setup = virtio_dev_rx_queue_setup,
> .rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable,
> .rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable,
> - .rx_queue_release = virtio_dev_queue_release,
> .tx_queue_setup = virtio_dev_tx_queue_setup,
> - .tx_queue_release = virtio_dev_queue_release,
> /* collect stats per queue */
> .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
> .vlan_filter_set = virtio_vlan_filter_set,
> diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index
> daf5ca92422..4439ad336e2 100644
> --- a/lib/ethdev/rte_ethdev.c
> +++ b/lib/ethdev/rte_ethdev.c
> @@ -889,6 +889,32 @@ eth_err(uint16_t port_id, int ret)
> return ret;
> }
>
> +static void
> +eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid) {
> + void **rxq = dev->data->rx_queues;
> +
> + if (rxq[qid] == NULL)
> + return;
> +
> + if (dev->dev_ops->rx_queue_release != NULL)
> + (*dev->dev_ops->rx_queue_release)(rxq[qid]);
> + rxq[qid] = NULL;
> +}
> +
> +static void
> +eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid) {
> + void **txq = dev->data->tx_queues;
> +
> + if (txq[qid] == NULL)
> + return;
> +
> + if (dev->dev_ops->tx_queue_release != NULL)
> + (*dev->dev_ops->tx_queue_release)(txq[qid]);
> + txq[qid] = NULL;
> +}
> +
> static int
> eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
> { @@ -905,12 +931,10 @@ eth_dev_rx_queue_config(struct rte_eth_dev
> *dev, uint16_t nb_queues)
> return -(ENOMEM);
> }
> } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-
> configure */
> - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops-
> >rx_queue_release, -ENOTSUP);
> + for (i = nb_queues; i < old_nb_queues; i++)
> + eth_dev_rxq_release(dev, i);
>
> rxq = dev->data->rx_queues;
> -
> - for (i = nb_queues; i < old_nb_queues; i++)
> - (*dev->dev_ops->rx_queue_release)(rxq[i]);
> rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
> RTE_CACHE_LINE_SIZE);
> if (rxq == NULL)
> @@ -925,12 +949,8 @@ eth_dev_rx_queue_config(struct rte_eth_dev *dev,
> uint16_t nb_queues)
> dev->data->rx_queues = rxq;
>
> } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
> - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops-
> >rx_queue_release, -ENOTSUP);
> -
> - rxq = dev->data->rx_queues;
> -
> for (i = nb_queues; i < old_nb_queues; i++)
> - (*dev->dev_ops->rx_queue_release)(rxq[i]);
> + eth_dev_rxq_release(dev, i);
>
> rte_free(dev->data->rx_queues);
> dev->data->rx_queues = NULL;
> @@ -1145,12 +1165,10 @@ eth_dev_tx_queue_config(struct rte_eth_dev
> *dev, uint16_t nb_queues)
> return -(ENOMEM);
> }
> } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-
> configure */
> - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops-
> >tx_queue_release, -ENOTSUP);
> + for (i = nb_queues; i < old_nb_queues; i++)
> + eth_dev_txq_release(dev, i);
>
> txq = dev->data->tx_queues;
> -
> - for (i = nb_queues; i < old_nb_queues; i++)
> - (*dev->dev_ops->tx_queue_release)(txq[i]);
> txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
> RTE_CACHE_LINE_SIZE);
> if (txq == NULL)
> @@ -1165,12 +1183,8 @@ eth_dev_tx_queue_config(struct rte_eth_dev
> *dev, uint16_t nb_queues)
> dev->data->tx_queues = txq;
>
> } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
> - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops-
> >tx_queue_release, -ENOTSUP);
> -
> - txq = dev->data->tx_queues;
> -
> for (i = nb_queues; i < old_nb_queues; i++)
> - (*dev->dev_ops->tx_queue_release)(txq[i]);
> + eth_dev_txq_release(dev, i);
>
> rte_free(dev->data->tx_queues);
> dev->data->tx_queues = NULL;
> @@ -2006,7 +2020,6 @@ rte_eth_rx_queue_setup(uint16_t port_id,
> uint16_t rx_queue_id,
> struct rte_eth_dev *dev;
> struct rte_eth_dev_info dev_info;
> struct rte_eth_rxconf local_conf;
> - void **rxq;
>
> RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
> dev = &rte_eth_devices[port_id];
> @@ -2110,13 +2123,7 @@ rte_eth_rx_queue_setup(uint16_t port_id,
> uint16_t rx_queue_id,
> RTE_ETH_QUEUE_STATE_STOPPED))
> return -EBUSY;
>
> - rxq = dev->data->rx_queues;
> - if (rxq[rx_queue_id]) {
> - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops-
> >rx_queue_release,
> - -ENOTSUP);
> - (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
> - rxq[rx_queue_id] = NULL;
> - }
> + eth_dev_rxq_release(dev, rx_queue_id);
>
> if (rx_conf == NULL)
> rx_conf = &dev_info.default_rxconf;
> @@ -2189,7 +2196,6 @@ rte_eth_rx_hairpin_queue_setup(uint16_t port_id,
> uint16_t rx_queue_id,
> int ret;
> struct rte_eth_dev *dev;
> struct rte_eth_hairpin_cap cap;
> - void **rxq;
> int i;
> int count;
>
> @@ -2246,13 +2252,7 @@ rte_eth_rx_hairpin_queue_setup(uint16_t
> port_id, uint16_t rx_queue_id,
> }
> if (dev->data->dev_started)
> return -EBUSY;
> - rxq = dev->data->rx_queues;
> - if (rxq[rx_queue_id] != NULL) {
> - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops-
> >rx_queue_release,
> - -ENOTSUP);
> - (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
> - rxq[rx_queue_id] = NULL;
> - }
> + eth_dev_rxq_release(dev, rx_queue_id);
> ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
> nb_rx_desc, conf);
> if (ret == 0)
> @@ -2269,7 +2269,6 @@ rte_eth_tx_queue_setup(uint16_t port_id,
> uint16_t tx_queue_id,
> struct rte_eth_dev *dev;
> struct rte_eth_dev_info dev_info;
> struct rte_eth_txconf local_conf;
> - void **txq;
> int ret;
>
> RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); @@ -
> 2314,13 +2313,7 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t
> tx_queue_id,
> RTE_ETH_QUEUE_STATE_STOPPED))
> return -EBUSY;
>
> - txq = dev->data->tx_queues;
> - if (txq[tx_queue_id]) {
> - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops-
> >tx_queue_release,
> - -ENOTSUP);
> - (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
> - txq[tx_queue_id] = NULL;
> - }
> + eth_dev_txq_release(dev, tx_queue_id);
>
> if (tx_conf == NULL)
> tx_conf = &dev_info.default_txconf;
> @@ -2368,7 +2361,6 @@ rte_eth_tx_hairpin_queue_setup(uint16_t port_id,
> uint16_t tx_queue_id, {
> struct rte_eth_dev *dev;
> struct rte_eth_hairpin_cap cap;
> - void **txq;
> int i;
> int count;
> int ret;
> @@ -2426,13 +2418,7 @@ rte_eth_tx_hairpin_queue_setup(uint16_t
> port_id, uint16_t tx_queue_id,
> }
> if (dev->data->dev_started)
> return -EBUSY;
> - txq = dev->data->tx_queues;
> - if (txq[tx_queue_id] != NULL) {
> - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops-
> >tx_queue_release,
> - -ENOTSUP);
> - (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
> - txq[tx_queue_id] = NULL;
> - }
> + eth_dev_txq_release(dev, tx_queue_id);
> ret = (*dev->dev_ops->tx_hairpin_queue_setup)
> (dev, tx_queue_id, nb_tx_desc, conf);
> if (ret == 0)
> --
> 2.33.0
Reviewed-by: Rosen Xu <rosen.xu@intel.com>
@@ -163,16 +163,6 @@ virtual_ethdev_tx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
return -1;
}
-static void
-virtual_ethdev_rx_queue_release(void *q __rte_unused)
-{
-}
-
-static void
-virtual_ethdev_tx_queue_release(void *q __rte_unused)
-{
-}
-
static int
virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
int wait_to_complete __rte_unused)
@@ -243,8 +233,6 @@ static const struct eth_dev_ops virtual_ethdev_default_dev_ops = {
.dev_infos_get = virtual_ethdev_info_get,
.rx_queue_setup = virtual_ethdev_rx_queue_setup_success,
.tx_queue_setup = virtual_ethdev_tx_queue_setup_success,
- .rx_queue_release = virtual_ethdev_rx_queue_release,
- .tx_queue_release = virtual_ethdev_tx_queue_release,
.link_update = virtual_ethdev_link_update_success,
.mac_addr_set = virtual_ethdev_mac_address_set,
.stats_get = virtual_ethdev_stats_get,
@@ -427,11 +427,6 @@ eth_dev_close(struct rte_eth_dev *dev)
return 0;
}
-static void
-eth_queue_release(void *q __rte_unused)
-{
-}
-
static int
eth_link_update(struct rte_eth_dev *dev __rte_unused,
int wait_to_complete __rte_unused)
@@ -594,8 +589,6 @@ static const struct eth_dev_ops ops = {
.promiscuous_disable = eth_dev_promiscuous_disable,
.rx_queue_setup = eth_rx_queue_setup,
.tx_queue_setup = eth_tx_queue_setup,
- .rx_queue_release = eth_queue_release,
- .tx_queue_release = eth_queue_release,
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
@@ -989,11 +989,6 @@ eth_dev_close(struct rte_eth_dev *dev)
return 0;
}
-static void
-eth_queue_release(void *q __rte_unused)
-{
-}
-
static int
eth_link_update(struct rte_eth_dev *dev __rte_unused,
int wait_to_complete __rte_unused)
@@ -1474,8 +1469,6 @@ static const struct eth_dev_ops ops = {
.promiscuous_disable = eth_dev_promiscuous_disable,
.rx_queue_setup = eth_rx_queue_setup,
.tx_queue_setup = eth_tx_queue_setup,
- .rx_queue_release = eth_queue_release,
- .tx_queue_release = eth_queue_release,
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
@@ -1233,12 +1233,6 @@ dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
return 0;
}
-static
-void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-}
-
static
int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc __rte_unused,
@@ -1272,11 +1266,6 @@ int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return 0;
}
-static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-}
-
static uint32_t
dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -1571,8 +1560,6 @@ static struct eth_dev_ops dpaa_devops = {
.rx_queue_setup = dpaa_eth_rx_queue_setup,
.tx_queue_setup = dpaa_eth_tx_queue_setup,
- .rx_queue_release = dpaa_eth_rx_queue_release,
- .tx_queue_release = dpaa_eth_tx_queue_release,
.rx_burst_mode_get = dpaa_dev_rx_burst_mode_get,
.tx_burst_mode_get = dpaa_dev_tx_burst_mode_get,
.rxq_info_get = dpaa_rxq_info_get,
@@ -1004,12 +1004,6 @@ dpaa2_dev_rx_queue_release(void *q __rte_unused)
}
}
-static void
-dpaa2_dev_tx_queue_release(void *q __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-}
-
static uint32_t
dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -2427,7 +2421,6 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
.rx_queue_setup = dpaa2_dev_rx_queue_setup,
.rx_queue_release = dpaa2_dev_rx_queue_release,
.tx_queue_setup = dpaa2_dev_tx_queue_setup,
- .tx_queue_release = dpaa2_dev_tx_queue_release,
.rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get,
.tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get,
.flow_ctrl_get = dpaa2_flow_ctrl_get,
@@ -288,11 +288,6 @@ ipn3ke_rpst_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
return 0;
}
-static void
-ipn3ke_rpst_rx_queue_release(__rte_unused void *rxq)
-{
-}
-
static int
ipn3ke_rpst_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
__rte_unused uint16_t queue_idx, __rte_unused uint16_t nb_desc,
@@ -302,11 +297,6 @@ ipn3ke_rpst_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
return 0;
}
-static void
-ipn3ke_rpst_tx_queue_release(__rte_unused void *txq)
-{
-}
-
/* Statistics collected by each port, VSI, VEB, and S-channel */
struct ipn3ke_rpst_eth_stats {
uint64_t tx_bytes; /* gotc */
@@ -2865,9 +2855,7 @@ static const struct eth_dev_ops ipn3ke_rpst_dev_ops = {
.tx_queue_start = ipn3ke_rpst_tx_queue_start,
.tx_queue_stop = ipn3ke_rpst_tx_queue_stop,
.rx_queue_setup = ipn3ke_rpst_rx_queue_setup,
- .rx_queue_release = ipn3ke_rpst_rx_queue_release,
.tx_queue_setup = ipn3ke_rpst_tx_queue_setup,
- .tx_queue_release = ipn3ke_rpst_tx_queue_release,
.dev_set_link_up = ipn3ke_rpst_dev_set_link_up,
.dev_set_link_down = ipn3ke_rpst_dev_set_link_down,
@@ -284,11 +284,6 @@ eth_kni_tx_queue_setup(struct rte_eth_dev *dev,
return 0;
}
-static void
-eth_kni_queue_release(void *q __rte_unused)
-{
-}
-
static int
eth_kni_link_update(struct rte_eth_dev *dev __rte_unused,
int wait_to_complete __rte_unused)
@@ -362,8 +357,6 @@ static const struct eth_dev_ops eth_kni_ops = {
.dev_infos_get = eth_kni_dev_info,
.rx_queue_setup = eth_kni_rx_queue_setup,
.tx_queue_setup = eth_kni_tx_queue_setup,
- .rx_queue_release = eth_kni_queue_release,
- .tx_queue_release = eth_kni_queue_release,
.link_update = eth_kni_link_update,
.stats_get = eth_kni_stats_get,
.stats_reset = eth_kni_stats_reset,
@@ -857,11 +857,6 @@ eth_dev_close(struct rte_eth_dev *dev)
return 0;
}
-static void
-eth_queue_release(void *q __rte_unused)
-{
-}
-
static int
eth_link_update(struct rte_eth_dev *dev __rte_unused,
int wait_to_complete __rte_unused)
@@ -1006,8 +1001,6 @@ static const struct eth_dev_ops ops = {
.tx_queue_start = eth_tx_queue_start,
.rx_queue_stop = eth_rx_queue_stop,
.tx_queue_stop = eth_tx_queue_stop,
- .rx_queue_release = eth_queue_release,
- .tx_queue_release = eth_queue_release,
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
@@ -494,18 +494,6 @@ pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return 0;
}
-static void
-pfe_rx_queue_release(void *q __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-}
-
-static void
-pfe_tx_queue_release(void *q __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-}
-
static int
pfe_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -759,9 +747,7 @@ static const struct eth_dev_ops ops = {
.dev_configure = pfe_eth_configure,
.dev_infos_get = pfe_eth_info,
.rx_queue_setup = pfe_rx_queue_setup,
- .rx_queue_release = pfe_rx_queue_release,
.tx_queue_setup = pfe_tx_queue_setup,
- .tx_queue_release = pfe_tx_queue_release,
.dev_supported_ptypes_get = pfe_supported_ptypes_get,
.link_update = pfe_eth_link_update,
.promiscuous_enable = pfe_promiscuous_enable,
@@ -225,8 +225,6 @@ eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
return 0;
}
-static void
-eth_queue_release(void *q __rte_unused) { ; }
static int
eth_link_update(struct rte_eth_dev *dev __rte_unused,
int wait_to_complete __rte_unused) { return 0; }
@@ -272,8 +270,6 @@ static const struct eth_dev_ops ops = {
.dev_infos_get = eth_dev_info,
.rx_queue_setup = eth_rx_queue_setup,
.tx_queue_setup = eth_tx_queue_setup,
- .rx_queue_release = eth_queue_release,
- .tx_queue_release = eth_queue_release,
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
@@ -370,12 +370,6 @@ virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
return 0;
}
-static void
-virtio_dev_queue_release(void *queue __rte_unused)
-{
- /* do nothing */
-}
-
static uint16_t
virtio_get_nr_vq(struct virtio_hw *hw)
{
@@ -981,9 +975,7 @@ static const struct eth_dev_ops virtio_eth_dev_ops = {
.rx_queue_setup = virtio_dev_rx_queue_setup,
.rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable,
- .rx_queue_release = virtio_dev_queue_release,
.tx_queue_setup = virtio_dev_tx_queue_setup,
- .tx_queue_release = virtio_dev_queue_release,
/* collect stats per queue */
.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
.vlan_filter_set = virtio_vlan_filter_set,
@@ -889,6 +889,32 @@ eth_err(uint16_t port_id, int ret)
return ret;
}
+static void
+eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ void **rxq = dev->data->rx_queues;
+
+ if (rxq[qid] == NULL)
+ return;
+
+ if (dev->dev_ops->rx_queue_release != NULL)
+ (*dev->dev_ops->rx_queue_release)(rxq[qid]);
+ rxq[qid] = NULL;
+}
+
+static void
+eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ void **txq = dev->data->tx_queues;
+
+ if (txq[qid] == NULL)
+ return;
+
+ if (dev->dev_ops->tx_queue_release != NULL)
+ (*dev->dev_ops->tx_queue_release)(txq[qid]);
+ txq[qid] = NULL;
+}
+
static int
eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
{
@@ -905,12 +931,10 @@ eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
return -(ENOMEM);
}
} else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
+ for (i = nb_queues; i < old_nb_queues; i++)
+ eth_dev_rxq_release(dev, i);
rxq = dev->data->rx_queues;
-
- for (i = nb_queues; i < old_nb_queues; i++)
- (*dev->dev_ops->rx_queue_release)(rxq[i]);
rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
RTE_CACHE_LINE_SIZE);
if (rxq == NULL)
@@ -925,12 +949,8 @@ eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
dev->data->rx_queues = rxq;
} else if (dev->data->rx_queues != NULL && nb_queues == 0) {
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
-
- rxq = dev->data->rx_queues;
-
for (i = nb_queues; i < old_nb_queues; i++)
- (*dev->dev_ops->rx_queue_release)(rxq[i]);
+ eth_dev_rxq_release(dev, i);
rte_free(dev->data->rx_queues);
dev->data->rx_queues = NULL;
@@ -1145,12 +1165,10 @@ eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
return -(ENOMEM);
}
} else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
+ for (i = nb_queues; i < old_nb_queues; i++)
+ eth_dev_txq_release(dev, i);
txq = dev->data->tx_queues;
-
- for (i = nb_queues; i < old_nb_queues; i++)
- (*dev->dev_ops->tx_queue_release)(txq[i]);
txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
RTE_CACHE_LINE_SIZE);
if (txq == NULL)
@@ -1165,12 +1183,8 @@ eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
dev->data->tx_queues = txq;
} else if (dev->data->tx_queues != NULL && nb_queues == 0) {
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
-
- txq = dev->data->tx_queues;
-
for (i = nb_queues; i < old_nb_queues; i++)
- (*dev->dev_ops->tx_queue_release)(txq[i]);
+ eth_dev_txq_release(dev, i);
rte_free(dev->data->tx_queues);
dev->data->tx_queues = NULL;
@@ -2006,7 +2020,6 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
struct rte_eth_rxconf local_conf;
- void **rxq;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -2110,13 +2123,7 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
RTE_ETH_QUEUE_STATE_STOPPED))
return -EBUSY;
- rxq = dev->data->rx_queues;
- if (rxq[rx_queue_id]) {
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
- -ENOTSUP);
- (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
- rxq[rx_queue_id] = NULL;
- }
+ eth_dev_rxq_release(dev, rx_queue_id);
if (rx_conf == NULL)
rx_conf = &dev_info.default_rxconf;
@@ -2189,7 +2196,6 @@ rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
int ret;
struct rte_eth_dev *dev;
struct rte_eth_hairpin_cap cap;
- void **rxq;
int i;
int count;
@@ -2246,13 +2252,7 @@ rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
}
if (dev->data->dev_started)
return -EBUSY;
- rxq = dev->data->rx_queues;
- if (rxq[rx_queue_id] != NULL) {
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
- -ENOTSUP);
- (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
- rxq[rx_queue_id] = NULL;
- }
+ eth_dev_rxq_release(dev, rx_queue_id);
ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
nb_rx_desc, conf);
if (ret == 0)
@@ -2269,7 +2269,6 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf local_conf;
- void **txq;
int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
@@ -2314,13 +2313,7 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
RTE_ETH_QUEUE_STATE_STOPPED))
return -EBUSY;
- txq = dev->data->tx_queues;
- if (txq[tx_queue_id]) {
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
- -ENOTSUP);
- (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
- txq[tx_queue_id] = NULL;
- }
+ eth_dev_txq_release(dev, tx_queue_id);
if (tx_conf == NULL)
tx_conf = &dev_info.default_txconf;
@@ -2368,7 +2361,6 @@ rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
{
struct rte_eth_dev *dev;
struct rte_eth_hairpin_cap cap;
- void **txq;
int i;
int count;
int ret;
@@ -2426,13 +2418,7 @@ rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
}
if (dev->data->dev_started)
return -EBUSY;
- txq = dev->data->tx_queues;
- if (txq[tx_queue_id] != NULL) {
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
- -ENOTSUP);
- (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
- txq[tx_queue_id] = NULL;
- }
+ eth_dev_txq_release(dev, tx_queue_id);
ret = (*dev->dev_ops->tx_hairpin_queue_setup)
(dev, tx_queue_id, nb_tx_desc, conf);
if (ret == 0)