[dpdk-dev,v2] mempool: limit cache_size

Message ID 1431963314-3701-1-git-send-email-zoltan.kiss@linaro.org (mailing list archive)
State Accepted, archived
Headers

Commit Message

Zoltan Kiss May 18, 2015, 3:35 p.m. UTC
  Otherwise cache_flushthresh can be bigger than n, and
a consumer can starve others by keeping every element
either in use or in the cache.

Signed-off-by: Zoltan Kiss <zoltan.kiss@linaro.org>
---
v2: use macro for calculation, with proper casting

 lib/librte_mempool/rte_mempool.c | 8 +++++---
 lib/librte_mempool/rte_mempool.h | 2 +-
 2 files changed, 6 insertions(+), 4 deletions(-)
  

Comments

Ananyev, Konstantin May 18, 2015, 3:51 p.m. UTC | #1
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Zoltan Kiss
> Sent: Monday, May 18, 2015 4:35 PM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v2] mempool: limit cache_size
> 
> Otherwise cache_flushthresh can be bigger than n, and
> a consumer can starve others by keeping every element
> either in use or in the cache.
> 
> Signed-off-by: Zoltan Kiss <zoltan.kiss@linaro.org>

Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>

> ---
> v2: use macro for calculation, with proper casting
> 
>  lib/librte_mempool/rte_mempool.c | 8 +++++---
>  lib/librte_mempool/rte_mempool.h | 2 +-
>  2 files changed, 6 insertions(+), 4 deletions(-)
> 
> diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
> index cf7ed76..5cfb96b 100644
> --- a/lib/librte_mempool/rte_mempool.c
> +++ b/lib/librte_mempool/rte_mempool.c
> @@ -68,6 +68,8 @@ static struct rte_tailq_elem rte_mempool_tailq = {
>  EAL_REGISTER_TAILQ(rte_mempool_tailq)
> 
>  #define CACHE_FLUSHTHRESH_MULTIPLIER 1.5
> +#define CALC_CACHE_FLUSHTHRESH(c)	\
> +	((typeof (c))((c) *  CACHE_FLUSHTHRESH_MULTIPLIER))
> 
>  /*
>   * return the greatest common divisor between a and b (fast algorithm)
> @@ -440,7 +442,8 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
>  	mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
> 
>  	/* asked cache too big */
> -	if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE) {
> +	if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE ||
> +	    CALC_CACHE_FLUSHTHRESH(cache_size) > n) {
>  		rte_errno = EINVAL;
>  		return NULL;
>  	}
> @@ -565,8 +568,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
>  	mp->header_size = objsz.header_size;
>  	mp->trailer_size = objsz.trailer_size;
>  	mp->cache_size = cache_size;
> -	mp->cache_flushthresh = (uint32_t)
> -		(cache_size * CACHE_FLUSHTHRESH_MULTIPLIER);
> +	mp->cache_flushthresh = CALC_CACHE_FLUSHTHRESH(cache_size);
>  	mp->private_data_size = private_data_size;
> 
>  	/* calculate address of the first element for continuous mempool. */
> diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
> index 9001312..a4a9610 100644
> --- a/lib/librte_mempool/rte_mempool.h
> +++ b/lib/librte_mempool/rte_mempool.h
> @@ -468,7 +468,7 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
>   *   If cache_size is non-zero, the rte_mempool library will try to
>   *   limit the accesses to the common lockless pool, by maintaining a
>   *   per-lcore object cache. This argument must be lower or equal to
> - *   CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose
> + *   CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose
>   *   cache_size to have "n modulo cache_size == 0": if this is
>   *   not the case, some elements will always stay in the pool and will
>   *   never be used. The access to the per-lcore table is of course
> --
> 1.9.1
  
Olivier Matz May 20, 2015, 8:32 a.m. UTC | #2
On 05/18/2015 05:35 PM, Zoltan Kiss wrote:
> Otherwise cache_flushthresh can be bigger than n, and
> a consumer can starve others by keeping every element
> either in use or in the cache.
>
> Signed-off-by: Zoltan Kiss <zoltan.kiss@linaro.org>

Acked-by: Olivier Matz <olivier.matz@6wind.com>


> ---
> v2: use macro for calculation, with proper casting
>
>   lib/librte_mempool/rte_mempool.c | 8 +++++---
>   lib/librte_mempool/rte_mempool.h | 2 +-
>   2 files changed, 6 insertions(+), 4 deletions(-)
>
> diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
> index cf7ed76..5cfb96b 100644
> --- a/lib/librte_mempool/rte_mempool.c
> +++ b/lib/librte_mempool/rte_mempool.c
> @@ -68,6 +68,8 @@ static struct rte_tailq_elem rte_mempool_tailq = {
>   EAL_REGISTER_TAILQ(rte_mempool_tailq)
>
>   #define CACHE_FLUSHTHRESH_MULTIPLIER 1.5
> +#define CALC_CACHE_FLUSHTHRESH(c)	\
> +	((typeof (c))((c) *  CACHE_FLUSHTHRESH_MULTIPLIER))
>
>   /*
>    * return the greatest common divisor between a and b (fast algorithm)
> @@ -440,7 +442,8 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
>   	mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
>
>   	/* asked cache too big */
> -	if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE) {
> +	if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE ||
> +	    CALC_CACHE_FLUSHTHRESH(cache_size) > n) {
>   		rte_errno = EINVAL;
>   		return NULL;
>   	}
> @@ -565,8 +568,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
>   	mp->header_size = objsz.header_size;
>   	mp->trailer_size = objsz.trailer_size;
>   	mp->cache_size = cache_size;
> -	mp->cache_flushthresh = (uint32_t)
> -		(cache_size * CACHE_FLUSHTHRESH_MULTIPLIER);
> +	mp->cache_flushthresh = CALC_CACHE_FLUSHTHRESH(cache_size);
>   	mp->private_data_size = private_data_size;
>
>   	/* calculate address of the first element for continuous mempool. */
> diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
> index 9001312..a4a9610 100644
> --- a/lib/librte_mempool/rte_mempool.h
> +++ b/lib/librte_mempool/rte_mempool.h
> @@ -468,7 +468,7 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
>    *   If cache_size is non-zero, the rte_mempool library will try to
>    *   limit the accesses to the common lockless pool, by maintaining a
>    *   per-lcore object cache. This argument must be lower or equal to
> - *   CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose
> + *   CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose
>    *   cache_size to have "n modulo cache_size == 0": if this is
>    *   not the case, some elements will always stay in the pool and will
>    *   never be used. The access to the per-lcore table is of course
>
  
Thomas Monjalon May 20, 2015, 8:46 a.m. UTC | #3
2015-05-20 10:32, Olivier MATZ:
> On 05/18/2015 05:35 PM, Zoltan Kiss wrote:
> > Otherwise cache_flushthresh can be bigger than n, and
> > a consumer can starve others by keeping every element
> > either in use or in the cache.
> >
> > Signed-off-by: Zoltan Kiss <zoltan.kiss@linaro.org>
> 
> Acked-by: Olivier Matz <olivier.matz@6wind.com>

Applied, thanks

> > +#define CALC_CACHE_FLUSHTHRESH(c)	\
> > +	((typeof (c))((c) *  CACHE_FLUSHTHRESH_MULTIPLIER))

I fixed spacing here:
+	((typeof(c))((c) * CACHE_FLUSHTHRESH_MULTIPLIER))
  

Patch

diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index cf7ed76..5cfb96b 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -68,6 +68,8 @@  static struct rte_tailq_elem rte_mempool_tailq = {
 EAL_REGISTER_TAILQ(rte_mempool_tailq)
 
 #define CACHE_FLUSHTHRESH_MULTIPLIER 1.5
+#define CALC_CACHE_FLUSHTHRESH(c)	\
+	((typeof (c))((c) *  CACHE_FLUSHTHRESH_MULTIPLIER))
 
 /*
  * return the greatest common divisor between a and b (fast algorithm)
@@ -440,7 +442,8 @@  rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
 	mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
 
 	/* asked cache too big */
-	if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE) {
+	if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE ||
+	    CALC_CACHE_FLUSHTHRESH(cache_size) > n) {
 		rte_errno = EINVAL;
 		return NULL;
 	}
@@ -565,8 +568,7 @@  rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
 	mp->header_size = objsz.header_size;
 	mp->trailer_size = objsz.trailer_size;
 	mp->cache_size = cache_size;
-	mp->cache_flushthresh = (uint32_t)
-		(cache_size * CACHE_FLUSHTHRESH_MULTIPLIER);
+	mp->cache_flushthresh = CALC_CACHE_FLUSHTHRESH(cache_size);
 	mp->private_data_size = private_data_size;
 
 	/* calculate address of the first element for continuous mempool. */
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 9001312..a4a9610 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -468,7 +468,7 @@  typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
  *   If cache_size is non-zero, the rte_mempool library will try to
  *   limit the accesses to the common lockless pool, by maintaining a
  *   per-lcore object cache. This argument must be lower or equal to
- *   CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose
+ *   CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose
  *   cache_size to have "n modulo cache_size == 0": if this is
  *   not the case, some elements will always stay in the pool and will
  *   never be used. The access to the per-lcore table is of course