@@ -317,9 +317,9 @@ mlx5_dev_to_pci_str(const struct rte_device *dev, char *addr, size_t size)
*/
static int
mlx5_dev_mempool_register(struct mlx5_common_device *cdev,
- struct rte_mempool *mp)
+ struct rte_mempool *mp, bool is_extmem)
{
- return mlx5_mr_mempool_register(cdev, mp);
+ return mlx5_mr_mempool_register(cdev, mp, is_extmem);
}
/**
@@ -353,7 +353,7 @@ mlx5_dev_mempool_register_cb(struct rte_mempool *mp, void *arg)
struct mlx5_common_device *cdev = arg;
int ret;
- ret = mlx5_dev_mempool_register(cdev, mp);
+ ret = mlx5_dev_mempool_register(cdev, mp, false);
if (ret < 0 && rte_errno != EEXIST)
DRV_LOG(ERR,
"Failed to register existing mempool %s for PD %p: %s",
@@ -390,13 +390,10 @@ mlx5_dev_mempool_event_cb(enum rte_mempool_event event, struct rte_mempool *mp,
void *arg)
{
struct mlx5_common_device *cdev = arg;
- bool extmem = mlx5_mempool_is_extmem(mp);
switch (event) {
case RTE_MEMPOOL_EVENT_READY:
- if (extmem)
- break;
- if (mlx5_dev_mempool_register(cdev, mp) < 0)
+ if (mlx5_dev_mempool_register(cdev, mp, false) < 0)
DRV_LOG(ERR,
"Failed to register new mempool %s for PD %p: %s",
mp->name, cdev->pd, rte_strerror(rte_errno));
@@ -65,7 +65,8 @@ mlx5_mp_req_mr_create(struct mlx5_common_device *cdev, uintptr_t addr)
*/
int
mlx5_mp_req_mempool_reg(struct mlx5_common_device *cdev,
- struct rte_mempool *mempool, bool reg)
+ struct rte_mempool *mempool, bool reg,
+ bool is_extmem)
{
struct rte_mp_msg mp_req;
struct rte_mp_msg *mp_res;
@@ -82,6 +83,7 @@ mlx5_mp_req_mempool_reg(struct mlx5_common_device *cdev,
MLX5_MP_REQ_MEMPOOL_UNREGISTER;
mp_init_port_agnostic_msg(&mp_req, type);
arg->mempool = mempool;
+ arg->is_extmem = is_extmem;
arg->cdev = cdev;
ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
if (ret) {
@@ -37,9 +37,12 @@ struct mlx5_mp_arg_queue_id {
struct mlx5_mp_arg_mr_manage {
struct mlx5_common_device *cdev;
+ RTE_STD_C11
union {
- struct rte_mempool *mempool;
- /* MLX5_MP_REQ_MEMPOOL_(UN)REGISTER */
+ struct {
+ struct rte_mempool *mempool;
+ bool is_extmem;
+ }; /* MLX5_MP_REQ_MEMPOOL_(UN)REGISTER */
uintptr_t addr; /* MLX5_MP_REQ_CREATE_MR */
};
};
@@ -134,7 +137,8 @@ __rte_internal
int mlx5_mp_req_mr_create(struct mlx5_common_device *cdev, uintptr_t addr);
__rte_internal
int mlx5_mp_req_mempool_reg(struct mlx5_common_device *cdev,
- struct rte_mempool *mempool, bool reg);
+ struct rte_mempool *mempool, bool reg,
+ bool is_extmem);
__rte_internal
int mlx5_mp_req_queue_state_modify(struct mlx5_mp_id *mp_id,
struct mlx5_mp_arg_queue_state_modify *sm);
@@ -47,6 +47,8 @@ struct mlx5_mempool_reg {
struct mlx5_mempool_mr *mrs;
/** Number of memory regions. */
unsigned int mrs_n;
+ /** Whether the MR were created for external pinned memory. */
+ bool is_extmem;
};
void
@@ -1403,6 +1405,8 @@ mlx5_mempool_get_extmem(struct rte_mempool *mp, struct mlx5_range **out,
*
* @param[in] mp
* Analyzed mempool.
+ * @param[in] is_extmem
+ * Whether the pool is contains only external pinned buffers.
* @param[out] out
* Receives the ranges, caller must release it with free().
* @param[out] ount_n
@@ -1412,17 +1416,16 @@ mlx5_mempool_get_extmem(struct rte_mempool *mp, struct mlx5_range **out,
* 0 on success, (-1) on failure.
*/
static int
-mlx5_get_mempool_ranges(struct rte_mempool *mp, struct mlx5_range **out,
- unsigned int *out_n)
+mlx5_get_mempool_ranges(struct rte_mempool *mp, bool is_extmem,
+ struct mlx5_range **out, unsigned int *out_n)
{
struct mlx5_range *chunks;
unsigned int chunks_n, contig_n, i;
int ret;
/* Collect the pool underlying memory. */
- ret = mlx5_mempool_is_extmem(mp) ?
- mlx5_mempool_get_extmem(mp, &chunks, &chunks_n) :
- mlx5_mempool_get_chunks(mp, &chunks, &chunks_n);
+ ret = is_extmem ? mlx5_mempool_get_extmem(mp, &chunks, &chunks_n) :
+ mlx5_mempool_get_chunks(mp, &chunks, &chunks_n);
if (ret < 0)
return ret;
/* Merge adjacent chunks and place them at the beginning. */
@@ -1446,6 +1449,8 @@ mlx5_get_mempool_ranges(struct rte_mempool *mp, struct mlx5_range **out,
*
* @param[in] mp
* Mempool to analyze.
+ * @param[in] is_extmem
+ * Whether the pool is contains only external pinned buffers.
* @param[out] out
* Receives memory ranges to register, aligned to the system page size.
* The caller must release them with free().
@@ -1458,14 +1463,15 @@ mlx5_get_mempool_ranges(struct rte_mempool *mp, struct mlx5_range **out,
* 0 on success, (-1) on failure.
*/
static int
-mlx5_mempool_reg_analyze(struct rte_mempool *mp, struct mlx5_range **out,
- unsigned int *out_n, bool *share_hugepage)
+mlx5_mempool_reg_analyze(struct rte_mempool *mp, bool is_extmem,
+ struct mlx5_range **out, unsigned int *out_n,
+ bool *share_hugepage)
{
struct mlx5_range *ranges = NULL;
unsigned int i, ranges_n = 0;
struct rte_memseg_list *msl;
- if (mlx5_get_mempool_ranges(mp, &ranges, &ranges_n) < 0) {
+ if (mlx5_get_mempool_ranges(mp, is_extmem, &ranges, &ranges_n) < 0) {
DRV_LOG(ERR, "Cannot get address ranges for mempool %s",
mp->name);
return -1;
@@ -1507,7 +1513,8 @@ mlx5_mempool_reg_analyze(struct rte_mempool *mp, struct mlx5_range **out,
/** Create a registration object for the mempool. */
static struct mlx5_mempool_reg *
-mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n)
+mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n,
+ bool is_extmem)
{
struct mlx5_mempool_reg *mpr = NULL;
@@ -1522,6 +1529,7 @@ mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n)
mpr->mp = mp;
mpr->mrs = (struct mlx5_mempool_mr *)(mpr + 1);
mpr->mrs_n = mrs_n;
+ mpr->is_extmem = is_extmem;
return mpr;
}
@@ -1586,31 +1594,32 @@ mlx5_mempool_reg_detach(struct mlx5_mempool_reg *mpr)
static int
mlx5_mr_mempool_register_primary(struct mlx5_mr_share_cache *share_cache,
- void *pd, struct rte_mempool *mp)
+ void *pd, struct rte_mempool *mp,
+ bool is_extmem)
{
struct mlx5_range *ranges = NULL;
- struct mlx5_mempool_reg *mpr, *new_mpr;
+ struct mlx5_mempool_reg *mpr, *old_mpr, *new_mpr;
unsigned int i, ranges_n;
- bool share_hugepage;
+ bool share_hugepage, standalone = false;
int ret = -1;
/* Early check to avoid unnecessary creation of MRs. */
rte_rwlock_read_lock(&share_cache->rwlock);
- mpr = mlx5_mempool_reg_lookup(share_cache, mp);
+ old_mpr = mlx5_mempool_reg_lookup(share_cache, mp);
rte_rwlock_read_unlock(&share_cache->rwlock);
- if (mpr != NULL) {
+ if (old_mpr != NULL && (!is_extmem || old_mpr->is_extmem)) {
DRV_LOG(DEBUG, "Mempool %s is already registered for PD %p",
mp->name, pd);
rte_errno = EEXIST;
goto exit;
}
- if (mlx5_mempool_reg_analyze(mp, &ranges, &ranges_n,
+ if (mlx5_mempool_reg_analyze(mp, is_extmem, &ranges, &ranges_n,
&share_hugepage) < 0) {
DRV_LOG(ERR, "Cannot get mempool %s memory ranges", mp->name);
rte_errno = ENOMEM;
goto exit;
}
- new_mpr = mlx5_mempool_reg_create(mp, ranges_n);
+ new_mpr = mlx5_mempool_reg_create(mp, ranges_n, is_extmem);
if (new_mpr == NULL) {
DRV_LOG(ERR,
"Cannot create a registration object for mempool %s in PD %p",
@@ -1670,6 +1679,12 @@ mlx5_mr_mempool_register_primary(struct mlx5_mr_share_cache *share_cache,
/* Concurrent registration is not supposed to happen. */
rte_rwlock_write_lock(&share_cache->rwlock);
mpr = mlx5_mempool_reg_lookup(share_cache, mp);
+ if (mpr == old_mpr && old_mpr != NULL) {
+ LIST_REMOVE(old_mpr, next);
+ standalone = mlx5_mempool_reg_detach(mpr);
+ /* No need to flush the cache: old MRs cannot be in use. */
+ mpr = NULL;
+ }
if (mpr == NULL) {
mlx5_mempool_reg_attach(new_mpr);
LIST_INSERT_HEAD(&share_cache->mempool_reg_list, new_mpr, next);
@@ -1682,6 +1697,10 @@ mlx5_mr_mempool_register_primary(struct mlx5_mr_share_cache *share_cache,
mlx5_mempool_reg_destroy(share_cache, new_mpr, true);
rte_errno = EEXIST;
goto exit;
+ } else if (old_mpr != NULL) {
+ DRV_LOG(DEBUG, "Mempool %s registration for PD %p updated for external memory",
+ mp->name, pd);
+ mlx5_mempool_reg_destroy(share_cache, old_mpr, standalone);
}
exit:
free(ranges);
@@ -1690,9 +1709,9 @@ mlx5_mr_mempool_register_primary(struct mlx5_mr_share_cache *share_cache,
static int
mlx5_mr_mempool_register_secondary(struct mlx5_common_device *cdev,
- struct rte_mempool *mp)
+ struct rte_mempool *mp, bool is_extmem)
{
- return mlx5_mp_req_mempool_reg(cdev, mp, true);
+ return mlx5_mp_req_mempool_reg(cdev, mp, true, is_extmem);
}
/**
@@ -1708,16 +1727,17 @@ mlx5_mr_mempool_register_secondary(struct mlx5_common_device *cdev,
*/
int
mlx5_mr_mempool_register(struct mlx5_common_device *cdev,
- struct rte_mempool *mp)
+ struct rte_mempool *mp, bool is_extmem)
{
if (mp->flags & RTE_MEMPOOL_F_NON_IO)
return 0;
switch (rte_eal_process_type()) {
case RTE_PROC_PRIMARY:
return mlx5_mr_mempool_register_primary(&cdev->mr_scache,
- cdev->pd, mp);
+ cdev->pd, mp,
+ is_extmem);
case RTE_PROC_SECONDARY:
- return mlx5_mr_mempool_register_secondary(cdev, mp);
+ return mlx5_mr_mempool_register_secondary(cdev, mp, is_extmem);
default:
return -1;
}
@@ -1756,7 +1776,7 @@ static int
mlx5_mr_mempool_unregister_secondary(struct mlx5_common_device *cdev,
struct rte_mempool *mp)
{
- return mlx5_mp_req_mempool_reg(cdev, mp, false);
+ return mlx5_mp_req_mempool_reg(cdev, mp, false, false /* is_extmem */);
}
/**
@@ -1868,6 +1888,65 @@ mlx5_lookup_mempool_regs(struct mlx5_mr_ctrl *mr_ctrl,
return lkey;
}
+/**
+ * Populate cache with LKeys of all MRs used by the mempool.
+ * It is intended to be used to register Rx mempools in advance.
+ *
+ * @param mr_ctrl
+ * Per-queue MR control handle.
+ * @param mp
+ * Registered memory pool.
+ *
+ * @return
+ * 0 on success, (-1) on failure and rte_errno is set.
+ */
+int
+mlx5_mr_mempool_populate_cache(struct mlx5_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp)
+{
+ struct mlx5_mr_share_cache *share_cache =
+ container_of(mr_ctrl->dev_gen_ptr, struct mlx5_mr_share_cache,
+ dev_gen);
+ struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
+ struct mlx5_mempool_reg *mpr;
+ unsigned int i;
+
+ /*
+ * Registration is valid after the lock is released,
+ * because the function is called after the mempool is registered.
+ */
+ rte_rwlock_read_lock(&share_cache->rwlock);
+ mpr = mlx5_mempool_reg_lookup(share_cache, mp);
+ rte_rwlock_read_unlock(&share_cache->rwlock);
+ if (mpr == NULL) {
+ DRV_LOG(ERR, "Mempool %s is not registered", mp->name);
+ rte_errno = ENOENT;
+ return -1;
+ }
+ for (i = 0; i < mpr->mrs_n; i++) {
+ struct mlx5_mempool_mr *mr = &mpr->mrs[i];
+ struct mr_cache_entry entry;
+ uint32_t lkey;
+ uint16_t idx;
+
+ lkey = mr_btree_lookup(bt, &idx, (uintptr_t)mr->pmd_mr.addr);
+ if (lkey != UINT32_MAX)
+ continue;
+ if (bt->len == bt->size)
+ mr_btree_expand(bt, bt->size << 1);
+ entry.start = (uintptr_t)mr->pmd_mr.addr;
+ entry.end = entry.start + mr->pmd_mr.len;
+ entry.lkey = rte_cpu_to_be_32(mr->pmd_mr.lkey);
+ if (mr_btree_insert(bt, &entry) < 0) {
+ DRV_LOG(ERR, "Cannot insert cache entry for mempool %s MR %08x",
+ mp->name, entry.lkey);
+ rte_errno = EINVAL;
+ return -1;
+ }
+ }
+ return 0;
+}
+
/**
* Bottom-half lookup for the address from the mempool.
*
@@ -1909,6 +1988,8 @@ mlx5_mr_mempool2mr_bh(struct mlx5_mr_ctrl *mr_ctrl,
uint32_t
mlx5_mr_mb2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mb)
{
+ struct rte_mempool *mp;
+ struct mlx5_mprq_buf *buf;
uint32_t lkey;
uintptr_t addr = (uintptr_t)mb->buf_addr;
struct mlx5_mr_share_cache *share_cache =
@@ -1917,27 +1998,26 @@ mlx5_mr_mb2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mb)
struct mlx5_common_device *cdev =
container_of(share_cache, struct mlx5_common_device, mr_scache);
- if (cdev->config.mr_mempool_reg_en) {
- struct rte_mempool *mp = NULL;
- struct mlx5_mprq_buf *buf;
-
- if (!RTE_MBUF_HAS_EXTBUF(mb)) {
- mp = mlx5_mb2mp(mb);
- } else if (mb->shinfo->free_cb == mlx5_mprq_buf_free_cb) {
- /* Recover MPRQ mempool. */
- buf = mb->shinfo->fcb_opaque;
- mp = buf->mp;
- }
- if (mp != NULL) {
- lkey = mlx5_mr_mempool2mr_bh(mr_ctrl, mp, addr);
- /*
- * Lookup can only fail on invalid input, e.g. "addr"
- * is not from "mp" or "mp" has MEMPOOL_F_NON_IO set.
- */
- if (lkey != UINT32_MAX)
- return lkey;
- }
- /* Fallback for generic mechanism in corner cases. */
+ /* Recover MPRQ mempool. */
+ if (RTE_MBUF_HAS_EXTBUF(mb) &&
+ mb->shinfo->free_cb == mlx5_mprq_buf_free_cb) {
+ buf = mb->shinfo->fcb_opaque;
+ mp = buf->mp;
+ } else {
+ mp = mlx5_mb2mp(mb);
+ }
+ lkey = mlx5_mr_mempool2mr_bh(mr_ctrl, mp, addr);
+ if (lkey != UINT32_MAX)
+ return lkey;
+ /* Register pinned external memory if the mempool is not used for Rx. */
+ if (cdev->config.mr_mempool_reg_en &&
+ (rte_pktmbuf_priv_flags(mp) & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)) {
+ if (mlx5_mr_mempool_register(cdev, mp, true) < 0)
+ return UINT32_MAX;
+ lkey = mlx5_mr_mempool2mr_bh(mr_ctrl, mp, addr);
+ MLX5_ASSERT(lkey != UINT32_MAX);
+ return lkey;
}
+ /* Fallback to generic mechanism in corner cases. */
return mlx5_mr_addr2mr_bh(mr_ctrl, addr);
}
@@ -255,20 +255,15 @@ mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, mlx5_dereg_mr_t *dereg_mr_cb);
__rte_internal
int
mlx5_mr_mempool_register(struct mlx5_common_device *cdev,
- struct rte_mempool *mp);
+ struct rte_mempool *mp, bool is_extmem);
__rte_internal
int
mlx5_mr_mempool_unregister(struct mlx5_common_device *cdev,
struct rte_mempool *mp);
-/** Check if @p mp has buffers pinned in external memory. */
-static inline bool
-mlx5_mempool_is_extmem(struct rte_mempool *mp)
-{
- return (mp->private_data_size ==
- sizeof(struct rte_pktmbuf_pool_private)) &&
- (mp->elt_size >= sizeof(struct rte_mbuf)) &&
- (rte_pktmbuf_priv_flags(mp) & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF);
-}
+__rte_internal
+int
+mlx5_mr_mempool_populate_cache(struct mlx5_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp);
#endif /* RTE_PMD_MLX5_COMMON_MR_H_ */
@@ -145,4 +145,5 @@ INTERNAL {
mlx5_mr_mempool_unregister;
mlx5_mp_req_mempool_reg;
mlx5_mr_mempool2mr_bh;
+ mlx5_mr_mempool_populate_cache;
};
@@ -48,7 +48,8 @@ mlx5_mp_os_handle_port_agnostic(const struct rte_mp_msg *mp_msg,
return rte_mp_reply(&mp_res, peer);
case MLX5_MP_REQ_MEMPOOL_REGISTER:
mp_init_port_agnostic_msg(&mp_res, param->type);
- res->result = mlx5_mr_mempool_register(mng->cdev, mng->mempool);
+ res->result = mlx5_mr_mempool_register(mng->cdev, mng->mempool,
+ mng->is_extmem);
return rte_mp_reply(&mp_res, peer);
case MLX5_MP_REQ_MEMPOOL_UNREGISTER:
mp_init_port_agnostic_msg(&mp_res, param->type);
@@ -1458,7 +1458,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
rte_errno = ENOMEM;
return -rte_errno;
}
- ret = mlx5_mr_mempool_register(priv->sh->cdev, mp);
+ ret = mlx5_mr_mempool_register(priv->sh->cdev, mp, false);
if (ret < 0 && rte_errno != EEXIST) {
ret = rte_errno;
DRV_LOG(ERR, "port %u failed to register a mempool for Multi-Packet RQ",
@@ -105,21 +105,6 @@ mlx5_txq_start(struct rte_eth_dev *dev)
return -rte_errno;
}
-/**
- * Translate the chunk address to MR key in order to put in into the cache.
- */
-static void
-mlx5_rxq_mempool_register_cb(struct rte_mempool *mp, void *opaque,
- struct rte_mempool_memhdr *memhdr,
- unsigned int idx)
-{
- struct mlx5_rxq_data *rxq = opaque;
-
- RTE_SET_USED(mp);
- RTE_SET_USED(idx);
- mlx5_rx_addr2mr(rxq, (uintptr_t)memhdr->addr);
-}
-
/**
* Register Rx queue mempools and fill the Rx queue cache.
* This function tolerates repeated mempool registration.
@@ -139,24 +124,23 @@ mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)
mlx5_mr_flush_local_cache(&rxq_ctrl->rxq.mr_ctrl);
/* MPRQ mempool is registered on creation, just fill the cache. */
- if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
- rte_mempool_mem_iter(rxq_ctrl->rxq.mprq_mp,
- mlx5_rxq_mempool_register_cb,
- &rxq_ctrl->rxq);
- return 0;
- }
+ if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
+ return mlx5_mr_mempool_populate_cache(&rxq_ctrl->rxq.mr_ctrl,
+ rxq_ctrl->rxq.mprq_mp);
for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
- uint32_t flags;
+ bool is_extmem;
mp = rxq_ctrl->rxq.rxseg[s].mp;
- flags = mp != rxq_ctrl->rxq.mprq_mp ?
- rte_pktmbuf_priv_flags(mp) : 0;
- ret = mlx5_mr_mempool_register(rxq_ctrl->sh->cdev, mp);
+ is_extmem = (rte_pktmbuf_priv_flags(mp) &
+ RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) != 0;
+ ret = mlx5_mr_mempool_register(rxq_ctrl->sh->cdev, mp,
+ is_extmem);
if (ret < 0 && rte_errno != EEXIST)
return ret;
- if ((flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) == 0)
- rte_mempool_mem_iter(mp, mlx5_rxq_mempool_register_cb,
- &rxq_ctrl->rxq);
+ ret = mlx5_mr_mempool_populate_cache(&rxq_ctrl->rxq.mr_ctrl,
+ mp);
+ if (ret < 0)
+ return ret;
}
return 0;
}