event/dlb2: add support for single 512B write of 4 QEs

Message ID 20220409151849.1007602-1-timothy.mcdaniel@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Jerin Jacob
Headers
Series event/dlb2: add support for single 512B write of 4 QEs |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail Compilation issues
ci/github-robot: build success github build: passed
ci/intel-Testing success Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-abi-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-intel-Performance success Performance Testing PASS

Commit Message

Timothy McDaniel April 9, 2022, 3:18 p.m. UTC
  On Xeon, as 512b accesses are available, movdir64 instruction is able to
perform 512b read and write to DLB producer port. In order for movdir64
to be able to pull its data from store buffers (store-buffer-forwarding)
(before actual write), data should be in single 512b write format.
This commit add change when code is built for Xeon with 512b AVX support
to make single 512b write of all 4 QEs instead of 4x64b writes.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
---
 drivers/event/dlb2/dlb2.c | 86 ++++++++++++++++++++++++++++++---------
 1 file changed, 67 insertions(+), 19 deletions(-)
  

Comments

Jerin Jacob May 14, 2022, 12:07 p.m. UTC | #1
On Sat, Apr 9, 2022 at 8:48 PM Timothy McDaniel
<timothy.mcdaniel@intel.com> wrote:
>
> On Xeon, as 512b accesses are available, movdir64 instruction is able to
> perform 512b read and write to DLB producer port. In order for movdir64
> to be able to pull its data from store buffers (store-buffer-forwarding)
> (before actual write), data should be in single 512b write format.
> This commit add change when code is built for Xeon with 512b AVX support
> to make single 512b write of all 4 QEs instead of 4x64b writes.
>
> Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
> ---
>  drivers/event/dlb2/dlb2.c | 86 ++++++++++++++++++++++++++++++---------
>  1 file changed, 67 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
> index 36f07d0061..e2a5303310 100644
> --- a/drivers/event/dlb2/dlb2.c
> +++ b/drivers/event/dlb2/dlb2.c
> @@ -2776,25 +2776,73 @@ dlb2_event_build_hcws(struct dlb2_port *qm_port,
>                                                 ev[3].event_type,
>                                              DLB2_QE_EV_TYPE_WORD + 4);
>
> -               /* Store the metadata to memory (use the double-precision
> -                * _mm_storeh_pd because there is no integer function for
> -                * storing the upper 64b):
> -                * qe[0] metadata = sse_qe[0][63:0]
> -                * qe[1] metadata = sse_qe[0][127:64]
> -                * qe[2] metadata = sse_qe[1][63:0]
> -                * qe[3] metadata = sse_qe[1][127:64]
> -                */
> -               _mm_storel_epi64((__m128i *)&qe[0].u.opaque_data, sse_qe[0]);
> -               _mm_storeh_pd((double *)&qe[1].u.opaque_data,
> -                             (__m128d)sse_qe[0]);
> -               _mm_storel_epi64((__m128i *)&qe[2].u.opaque_data, sse_qe[1]);
> -               _mm_storeh_pd((double *)&qe[3].u.opaque_data,
> -                             (__m128d)sse_qe[1]);
> -
> -               qe[0].data = ev[0].u64;
> -               qe[1].data = ev[1].u64;
> -               qe[2].data = ev[2].u64;
> -               qe[3].data = ev[3].u64;
> + #ifdef __AVX512VL__

+ x86 maintainers

We need a runtime check based on CPU flags. Right? As the build and
run machine can be different?

> +
> +                       /*
> +                        * 1) Build avx512 QE store and build each
> +                        *    QE individually as XMM register
> +                        * 2) Merge the 4 XMM registers/QEs into single AVX512
> +                        *    register
> +                        * 3) Store single avx512 register to &qe[0] (4x QEs
> +                        *    stored in 1x store)
> +                        */
> +
> +                       __m128i v_qe0 = _mm_setzero_si128();
> +                       uint64_t meta = _mm_extract_epi64(sse_qe[0], 0);
> +                       v_qe0 = _mm_insert_epi64(v_qe0, ev[0].u64, 0);
> +                       v_qe0 = _mm_insert_epi64(v_qe0, meta, 1);
> +
> +                       __m128i v_qe1 = _mm_setzero_si128();
> +                       meta = _mm_extract_epi64(sse_qe[0], 1);
> +                       v_qe1 = _mm_insert_epi64(v_qe1, ev[1].u64, 0);
> +                       v_qe1 = _mm_insert_epi64(v_qe1, meta, 1);
> +
> +                       __m128i v_qe2 = _mm_setzero_si128();
> +                       meta = _mm_extract_epi64(sse_qe[1], 0);
> +                       v_qe2 = _mm_insert_epi64(v_qe2, ev[2].u64, 0);
> +                       v_qe2 = _mm_insert_epi64(v_qe2, meta, 1);
> +
> +                       __m128i v_qe3 = _mm_setzero_si128();
> +                       meta = _mm_extract_epi64(sse_qe[1], 1);
> +                       v_qe3 = _mm_insert_epi64(v_qe3, ev[3].u64, 0);
> +                       v_qe3 = _mm_insert_epi64(v_qe3, meta, 1);
> +
> +                       /* we have 4x XMM registers, one per QE. */
> +                       __m512i v_all_qes = _mm512_setzero_si512();
> +                       v_all_qes = _mm512_inserti32x4(v_all_qes, v_qe0, 0);
> +                       v_all_qes = _mm512_inserti32x4(v_all_qes, v_qe1, 1);
> +                       v_all_qes = _mm512_inserti32x4(v_all_qes, v_qe2, 2);
> +                       v_all_qes = _mm512_inserti32x4(v_all_qes, v_qe3, 3);
> +
> +                       /*
> +                        * store the 4x QEs in a single register to the scratch
> +                        * space of the PMD
> +                        */
> +                       _mm512_store_si512(&qe[0], v_all_qes);
> +#else
> +                       /*
> +                        * Store the metadata to memory (use the double-precision
> +                        * _mm_storeh_pd because there is no integer function for
> +                        * storing the upper 64b):
> +                        * qe[0] metadata = sse_qe[0][63:0]
> +                        * qe[1] metadata = sse_qe[0][127:64]
> +                        * qe[2] metadata = sse_qe[1][63:0]
> +                        * qe[3] metadata = sse_qe[1][127:64]
> +                        */
> +                       _mm_storel_epi64((__m128i *)&qe[0].u.opaque_data,
> +                                        sse_qe[0]);
> +                       _mm_storeh_pd((double *)&qe[1].u.opaque_data,
> +                                     (__m128d)sse_qe[0]);
> +                       _mm_storel_epi64((__m128i *)&qe[2].u.opaque_data,
> +                                        sse_qe[1]);
> +                       _mm_storeh_pd((double *)&qe[3].u.opaque_data,
> +                                     (__m128d)sse_qe[1]);
> +
> +                       qe[0].data = ev[0].u64;
> +                       qe[1].data = ev[1].u64;
> +                       qe[2].data = ev[2].u64;
> +                       qe[3].data = ev[3].u64;
> +#endif
>
>                 break;
>         case 3:
> --
> 2.25.1
>
  
Bruce Richardson May 16, 2022, 8:42 a.m. UTC | #2
On Sat, May 14, 2022 at 05:37:39PM +0530, Jerin Jacob wrote:
> On Sat, Apr 9, 2022 at 8:48 PM Timothy McDaniel
> <timothy.mcdaniel@intel.com> wrote:
> >
> > On Xeon, as 512b accesses are available, movdir64 instruction is able
> > to perform 512b read and write to DLB producer port. In order for
> > movdir64 to be able to pull its data from store buffers
> > (store-buffer-forwarding) (before actual write), data should be in
> > single 512b write format.  This commit add change when code is built
> > for Xeon with 512b AVX support to make single 512b write of all 4 QEs
> > instead of 4x64b writes.
> >
> > Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com> ---
> > drivers/event/dlb2/dlb2.c | 86 ++++++++++++++++++++++++++++++---------
> > 1 file changed, 67 insertions(+), 19 deletions(-)
> >
> > diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
> > index 36f07d0061..e2a5303310 100644 --- a/drivers/event/dlb2/dlb2.c +++
> > b/drivers/event/dlb2/dlb2.c @@ -2776,25 +2776,73 @@
> > dlb2_event_build_hcws(struct dlb2_port *qm_port, ev[3].event_type,
> > DLB2_QE_EV_TYPE_WORD + 4);
> >
> > -               /* Store the metadata to memory (use the
> > double-precision -                * _mm_storeh_pd because there is no
> > integer function for -                * storing the upper 64b): -
> > * qe[0] metadata = sse_qe[0][63:0] -                * qe[1] metadata =
> > sse_qe[0][127:64] -                * qe[2] metadata = sse_qe[1][63:0] -
> > * qe[3] metadata = sse_qe[1][127:64] -                */ -
> > _mm_storel_epi64((__m128i *)&qe[0].u.opaque_data, sse_qe[0]); -
> > _mm_storeh_pd((double *)&qe[1].u.opaque_data, -
> > (__m128d)sse_qe[0]); -               _mm_storel_epi64((__m128i
> > *)&qe[2].u.opaque_data, sse_qe[1]); -
> > _mm_storeh_pd((double *)&qe[3].u.opaque_data, -
> > (__m128d)sse_qe[1]); - -               qe[0].data = ev[0].u64; -
> > qe[1].data = ev[1].u64; -               qe[2].data = ev[2].u64; -
> > qe[3].data = ev[3].u64; + #ifdef __AVX512VL__
> 
> + x86 maintainers
> 
> We need a runtime check based on CPU flags. Right? As the build and run
> machine can be different?
> 
Ideally, yes, this should be a run-time decision. There are quite a number
of examples of this in DPDK. However, most uses of runtime decisions are in
functions called via function pointer, so not sure if those schemes apply
here. It's certainly worth investigating, though.

/Bruce
  
Timothy McDaniel May 16, 2022, 5 p.m. UTC | #3
> -----Original Message-----
> From: Jerin Jacob <jerinjacobk@gmail.com>
> Sent: Saturday, May 14, 2022 7:08 AM
> To: McDaniel, Timothy <timothy.mcdaniel@intel.com>; Richardson, Bruce
> <bruce.richardson@intel.com>; konstantin.v.ananyev@yandex.ru
> Cc: Jerin Jacob <jerinj@marvell.com>; dpdk-dev <dev@dpdk.org>
> Subject: Re: [PATCH] event/dlb2: add support for single 512B write of 4 QEs
> 
> On Sat, Apr 9, 2022 at 8:48 PM Timothy McDaniel
> <timothy.mcdaniel@intel.com> wrote:
> >
> > On Xeon, as 512b accesses are available, movdir64 instruction is able to
> > perform 512b read and write to DLB producer port. In order for movdir64
> > to be able to pull its data from store buffers (store-buffer-forwarding)
> > (before actual write), data should be in single 512b write format.
> > This commit add change when code is built for Xeon with 512b AVX support
> > to make single 512b write of all 4 QEs instead of 4x64b writes.
> >
> > Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
> > ---
> >  drivers/event/dlb2/dlb2.c | 86 ++++++++++++++++++++++++++++++---------
> >  1 file changed, 67 insertions(+), 19 deletions(-)
> >
> > diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
> > index 36f07d0061..e2a5303310 100644
> > --- a/drivers/event/dlb2/dlb2.c
> > +++ b/drivers/event/dlb2/dlb2.c
> > @@ -2776,25 +2776,73 @@ dlb2_event_build_hcws(struct dlb2_port
> *qm_port,
> >                                                 ev[3].event_type,
> >                                              DLB2_QE_EV_TYPE_WORD + 4);
> >
> > -               /* Store the metadata to memory (use the double-precision
> > -                * _mm_storeh_pd because there is no integer function for
> > -                * storing the upper 64b):
> > -                * qe[0] metadata = sse_qe[0][63:0]
> > -                * qe[1] metadata = sse_qe[0][127:64]
> > -                * qe[2] metadata = sse_qe[1][63:0]
> > -                * qe[3] metadata = sse_qe[1][127:64]
> > -                */
> > -               _mm_storel_epi64((__m128i *)&qe[0].u.opaque_data, sse_qe[0]);
> > -               _mm_storeh_pd((double *)&qe[1].u.opaque_data,
> > -                             (__m128d)sse_qe[0]);
> > -               _mm_storel_epi64((__m128i *)&qe[2].u.opaque_data, sse_qe[1]);
> > -               _mm_storeh_pd((double *)&qe[3].u.opaque_data,
> > -                             (__m128d)sse_qe[1]);
> > -
> > -               qe[0].data = ev[0].u64;
> > -               qe[1].data = ev[1].u64;
> > -               qe[2].data = ev[2].u64;
> > -               qe[3].data = ev[3].u64;
> > + #ifdef __AVX512VL__
> 
> + x86 maintainers
> 
> We need a runtime check based on CPU flags. Right? As the build and
> run machine can be different?

Thanks Jerin. I will convert to a runtime check.
  

Patch

diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index 36f07d0061..e2a5303310 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -2776,25 +2776,73 @@  dlb2_event_build_hcws(struct dlb2_port *qm_port,
 						ev[3].event_type,
 					     DLB2_QE_EV_TYPE_WORD + 4);
 
-		/* Store the metadata to memory (use the double-precision
-		 * _mm_storeh_pd because there is no integer function for
-		 * storing the upper 64b):
-		 * qe[0] metadata = sse_qe[0][63:0]
-		 * qe[1] metadata = sse_qe[0][127:64]
-		 * qe[2] metadata = sse_qe[1][63:0]
-		 * qe[3] metadata = sse_qe[1][127:64]
-		 */
-		_mm_storel_epi64((__m128i *)&qe[0].u.opaque_data, sse_qe[0]);
-		_mm_storeh_pd((double *)&qe[1].u.opaque_data,
-			      (__m128d)sse_qe[0]);
-		_mm_storel_epi64((__m128i *)&qe[2].u.opaque_data, sse_qe[1]);
-		_mm_storeh_pd((double *)&qe[3].u.opaque_data,
-			      (__m128d)sse_qe[1]);
-
-		qe[0].data = ev[0].u64;
-		qe[1].data = ev[1].u64;
-		qe[2].data = ev[2].u64;
-		qe[3].data = ev[3].u64;
+ #ifdef __AVX512VL__
+
+			/*
+			 * 1) Build avx512 QE store and build each
+			 *    QE individually as XMM register
+			 * 2) Merge the 4 XMM registers/QEs into single AVX512
+			 *    register
+			 * 3) Store single avx512 register to &qe[0] (4x QEs
+			 *    stored in 1x store)
+			 */
+
+			__m128i v_qe0 = _mm_setzero_si128();
+			uint64_t meta = _mm_extract_epi64(sse_qe[0], 0);
+			v_qe0 = _mm_insert_epi64(v_qe0, ev[0].u64, 0);
+			v_qe0 = _mm_insert_epi64(v_qe0, meta, 1);
+
+			__m128i v_qe1 = _mm_setzero_si128();
+			meta = _mm_extract_epi64(sse_qe[0], 1);
+			v_qe1 = _mm_insert_epi64(v_qe1, ev[1].u64, 0);
+			v_qe1 = _mm_insert_epi64(v_qe1, meta, 1);
+
+			__m128i v_qe2 = _mm_setzero_si128();
+			meta = _mm_extract_epi64(sse_qe[1], 0);
+			v_qe2 = _mm_insert_epi64(v_qe2, ev[2].u64, 0);
+			v_qe2 = _mm_insert_epi64(v_qe2, meta, 1);
+
+			__m128i v_qe3 = _mm_setzero_si128();
+			meta = _mm_extract_epi64(sse_qe[1], 1);
+			v_qe3 = _mm_insert_epi64(v_qe3, ev[3].u64, 0);
+			v_qe3 = _mm_insert_epi64(v_qe3, meta, 1);
+
+			/* we have 4x XMM registers, one per QE. */
+			__m512i v_all_qes = _mm512_setzero_si512();
+			v_all_qes = _mm512_inserti32x4(v_all_qes, v_qe0, 0);
+			v_all_qes = _mm512_inserti32x4(v_all_qes, v_qe1, 1);
+			v_all_qes = _mm512_inserti32x4(v_all_qes, v_qe2, 2);
+			v_all_qes = _mm512_inserti32x4(v_all_qes, v_qe3, 3);
+
+			/*
+			 * store the 4x QEs in a single register to the scratch
+			 * space of the PMD
+			 */
+			_mm512_store_si512(&qe[0], v_all_qes);
+#else
+			/*
+			 * Store the metadata to memory (use the double-precision
+			 * _mm_storeh_pd because there is no integer function for
+			 * storing the upper 64b):
+			 * qe[0] metadata = sse_qe[0][63:0]
+			 * qe[1] metadata = sse_qe[0][127:64]
+			 * qe[2] metadata = sse_qe[1][63:0]
+			 * qe[3] metadata = sse_qe[1][127:64]
+			 */
+			_mm_storel_epi64((__m128i *)&qe[0].u.opaque_data,
+					 sse_qe[0]);
+			_mm_storeh_pd((double *)&qe[1].u.opaque_data,
+				      (__m128d)sse_qe[0]);
+			_mm_storel_epi64((__m128i *)&qe[2].u.opaque_data,
+					 sse_qe[1]);
+			_mm_storeh_pd((double *)&qe[3].u.opaque_data,
+				      (__m128d)sse_qe[1]);
+
+			qe[0].data = ev[0].u64;
+			qe[1].data = ev[1].u64;
+			qe[2].data = ev[2].u64;
+			qe[3].data = ev[3].u64;
+#endif
 
 		break;
 	case 3: