[2/2] ethdev: cleanup shared data with the last port
Checks
Commit Message
If no port is allocated, ethdev (from a primary process) can release the
memzone used to store port data.
This makes it possible for the DPDK memory allocator to release
associated resources back to the OS.
Signed-off-by: David Marchand <david.marchand@redhat.com>
---
lib/ethdev/ethdev_driver.c | 6 ++++++
lib/ethdev/ethdev_private.c | 16 +++++++++++++++-
lib/ethdev/ethdev_private.h | 3 +++
3 files changed, 24 insertions(+), 1 deletion(-)
Comments
On Fri, Aug 18, 2023 at 11:13 AM David Marchand
<david.marchand@redhat.com> wrote:
> @@ -253,6 +255,10 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
> rte_free(eth_dev->data->dev_private);
> pthread_mutex_destroy(ð_dev->data->flow_ops_mutex);
> memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
At device cleanup stage (rte_eal_cleanup -> bus -> device),
eth_dev_allocated() may still be called and use a leftover reference
to (freed) data.
So here, we need to reset it to NULL (caught by ASan with test-null.sh
in the CI).
This will be in v2.
> +
> + eth_dev_shared_data->allocated_count--;
> + if (eth_dev_shared_data->allocated_count == 0)
> + eth_dev_shared_data_release();
> }
>
> rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
@@ -113,6 +113,8 @@ rte_eth_dev_allocate(const char *name)
eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS;
eth_dev->data->mtu = RTE_ETHER_MTU;
pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL);
+ RTE_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ eth_dev_shared_data->allocated_count++;
unlock:
rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
@@ -253,6 +255,10 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
rte_free(eth_dev->data->dev_private);
pthread_mutex_destroy(ð_dev->data->flow_ops_mutex);
memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
+
+ eth_dev_shared_data->allocated_count--;
+ if (eth_dev_shared_data->allocated_count == 0)
+ eth_dev_shared_data_release();
}
rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
@@ -11,6 +11,7 @@
static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
+static const struct rte_memzone *eth_dev_shared_mz;
struct eth_dev_shared *eth_dev_shared_data;
/* spinlock for eth device callbacks */
@@ -324,7 +325,7 @@ eth_dev_shared_data_prepare(void)
const unsigned int flags = 0;
const struct rte_memzone *mz;
- if (eth_dev_shared_data == NULL) {
+ if (eth_dev_shared_mz == NULL) {
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
/* Allocate port data and ownership shared memory. */
mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
@@ -335,16 +336,29 @@ eth_dev_shared_data_prepare(void)
if (mz == NULL)
rte_panic("Cannot allocate ethdev shared data\n");
+ eth_dev_shared_mz = mz;
eth_dev_shared_data = mz->addr;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
eth_dev_shared_data->next_owner_id =
RTE_ETH_DEV_NO_OWNER + 1;
+ eth_dev_shared_data->allocated_count = 0;
memset(eth_dev_shared_data->data, 0,
sizeof(eth_dev_shared_data->data));
}
}
}
+void
+eth_dev_shared_data_release(void)
+{
+ RTE_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ if (eth_dev_shared_mz != NULL) {
+ rte_memzone_free(eth_dev_shared_mz);
+ eth_dev_shared_mz = NULL;
+ eth_dev_shared_data = NULL;
+ }
+}
+
void
eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid)
{
@@ -15,6 +15,7 @@
struct eth_dev_shared {
uint64_t next_owner_id;
+ uint64_t allocated_count;
struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
};
@@ -69,6 +70,8 @@ void eth_dev_fp_ops_setup(struct rte_eth_fp_ops *fpo,
void eth_dev_shared_data_prepare(void)
__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock());
+void eth_dev_shared_data_release(void)
+ __rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock());
void eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid);
void eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid);