[v5,31/45] baseband/acc: use rte stdatomic API
Checks
Commit Message
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/baseband/acc/rte_acc100_pmd.c | 36 +++++++++++++--------------
drivers/baseband/acc/rte_vrb_pmd.c | 46 +++++++++++++++++++++++------------
2 files changed, 48 insertions(+), 34 deletions(-)
Comments
Hi Tyler,
Still some issues with indentation when the sed is causing split across 2 lines.
Please fix indentation, nack like that.
Thanks
Nic
> -----Original Message-----
> From: Tyler Retzlaff <roretzla@linux.microsoft.com>
> Sent: Monday, May 6, 2024 10:58 AM
> To: dev@dpdk.org
> Cc: Mattias Rönnblom <mattias.ronnblom@ericsson.com>; Morten Brørup
> <mb@smartsharesystems.com>; Sevincer, Abdullah
> <abdullah.sevincer@intel.com>; Ajit Khaparde
> <ajit.khaparde@broadcom.com>; Alok Prasad <palok@marvell.com>;
> Burakov, Anatoly <anatoly.burakov@intel.com>; Andrew Rybchenko
> <andrew.rybchenko@oktetlabs.ru>; Anoob Joseph <anoobj@marvell.com>;
> Richardson, Bruce <bruce.richardson@intel.com>; Marohn, Byron
> <byron.marohn@intel.com>; Chenbo Xia <chenbox@nvidia.com>;
> Chengwen Feng <fengchengwen@huawei.com>; Loftus, Ciara
> <ciara.loftus@intel.com>; Power, Ciara <ciara.power@intel.com>; Dariusz
> Sosnowski <dsosnowski@nvidia.com>; Hunt, David <david.hunt@intel.com>;
> Devendra Singh Rawat <dsinghrawat@marvell.com>; Carrillo, Erik G
> <erik.g.carrillo@intel.com>; Guoyang Zhou <zhouguoyang@huawei.com>;
> Harman Kalra <hkalra@marvell.com>; Van Haaren, Harry
> <harry.van.haaren@intel.com>; Nagarahalli, Honnappa
> <Honnappa.Nagarahalli@arm.com>; Jakub Grajciar <jgrajcia@cisco.com>;
> Jerin Jacob <jerinj@marvell.com>; Jeroen de Borst <jeroendb@google.com>;
> Jian Wang <jianwang@trustnetic.com>; Jiawen Wu
> <jiawenwu@trustnetic.com>; Jie Hai <haijie1@huawei.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Joshua Washington <joshwash@google.com>;
> Joyce Kong <joyce.kong@arm.com>; Guo, Junfeng <junfeng.guo@intel.com>;
> Laatz, Kevin <kevin.laatz@intel.com>; Konstantin Ananyev
> <konstantin.v.ananyev@yandex.ru>; Liang Ma <liangma@liangbit.com>;
> Long Li <longli@microsoft.com>; Maciej Czekaj <mczekaj@marvell.com>;
> Matan Azrad <matan@nvidia.com>; Maxime Coquelin
> <maxime.coquelin@redhat.com>; Chautru, Nicolas
> <nicolas.chautru@intel.com>; Ori Kam <orika@nvidia.com>; Pavan Nikhilesh
> <pbhagavatula@marvell.com>; Mccarthy, Peter
> <peter.mccarthy@intel.com>; Rahul Lakkireddy
> <rahul.lakkireddy@chelsio.com>; Pattan, Reshma
> <reshma.pattan@intel.com>; Xu, Rosen <rosen.xu@intel.com>; Ruifeng
> Wang <ruifeng.wang@arm.com>; Rushil Gupta <rushilg@google.com>;
> Gobriel, Sameh <sameh.gobriel@intel.com>; Sivaprasad Tummala
> <sivaprasad.tummala@amd.com>; Somnath Kotur
> <somnath.kotur@broadcom.com>; Stephen Hemminger
> <stephen@networkplumber.org>; Suanming Mou
> <suanmingm@nvidia.com>; Sunil Kumar Kori <skori@marvell.com>; Sunil
> Uttarwar <sunilprakashrao.uttarwar@amd.com>; Tetsuya Mukawa
> <mtetsuyah@gmail.com>; Vamsi Attunuru <vattunuru@marvell.com>;
> Viacheslav Ovsiienko <viacheslavo@nvidia.com>; Medvedkin, Vladimir
> <vladimir.medvedkin@intel.com>; Xiaoyun Wang
> <cloud.wangxiaoyun@huawei.com>; Wang, Yipeng1
> <yipeng1.wang@intel.com>; Yisen Zhuang <yisen.zhuang@huawei.com>;
> Ziyang Xuan <xuanziyang2@huawei.com>; Tyler Retzlaff
> <roretzla@linux.microsoft.com>
> Subject: [PATCH v5 31/45] baseband/acc: use rte stdatomic API
>
> Replace the use of gcc builtin __atomic_xxx intrinsics with corresponding
> rte_atomic_xxx optional rte stdatomic API.
>
> Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
> Acked-by: Stephen Hemminger <stephen@networkplumber.org>
> ---
> drivers/baseband/acc/rte_acc100_pmd.c | 36 +++++++++++++--------------
> drivers/baseband/acc/rte_vrb_pmd.c | 46 +++++++++++++++++++++++-----
> -------
> 2 files changed, 48 insertions(+), 34 deletions(-)
>
> diff --git a/drivers/baseband/acc/rte_acc100_pmd.c
> b/drivers/baseband/acc/rte_acc100_pmd.c
> index 4f666e5..ee50b9c 100644
> --- a/drivers/baseband/acc/rte_acc100_pmd.c
> +++ b/drivers/baseband/acc/rte_acc100_pmd.c
> @@ -3673,8 +3673,8 @@
>
> desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
> desc = q->ring_addr + desc_idx;
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> - __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -3728,8 +3728,8 @@
> uint16_t current_dequeued_descs = 0, descs_in_tb;
>
> desc = acc_desc_tail(q, *dequeued_descs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> - __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -3742,8 +3742,8 @@
> /* Check if last CB in TB is ready to dequeue (and thus
> * the whole TB) - checking sdone bit. If not return.
> */
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
> - __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)last_desc,
> + rte_memory_order_relaxed);
> if (!(atom_desc.rsp.val & ACC_SDONE))
> return -1;
>
> @@ -3755,8 +3755,8 @@
>
> while (i < descs_in_tb) {
> desc = acc_desc_tail(q, *dequeued_descs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> - __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
> rsp.val = atom_desc.rsp.val;
> rte_bbdev_log_debug("Resp. desc %p: %x descs %d cbs
> %d\n",
> desc, rsp.val, descs_in_tb, desc-
> >req.numCBs); @@ -3793,8 +3793,8 @@
> struct rte_bbdev_dec_op *op;
>
> desc = acc_desc_tail(q, dequeued_cbs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> - __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -3846,8 +3846,8 @@
> struct rte_bbdev_dec_op *op;
>
> desc = acc_desc_tail(q, dequeued_cbs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> - __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -3902,8 +3902,8 @@
> uint8_t cbs_in_tb = 1, cb_idx = 0;
>
> desc = acc_desc_tail(q, dequeued_cbs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> - __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -3919,8 +3919,8 @@
> /* Check if last CB in TB is ready to dequeue (and thus
> * the whole TB) - checking sdone bit. If not return.
> */
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
> - __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)last_desc,
> + rte_memory_order_relaxed);
> if (!(atom_desc.rsp.val & ACC_SDONE))
> return -1;
>
> @@ -3930,8 +3930,8 @@
> /* Read remaining CBs if exists */
> while (cb_idx < cbs_in_tb) {
> desc = acc_desc_tail(q, dequeued_cbs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> - __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
> rsp.val = atom_desc.rsp.val;
> rte_bbdev_log_debug("Resp. desc %p: %x r %d c %d\n",
> desc, rsp.val, cb_idx,
> cbs_in_tb); diff --git a/drivers/baseband/acc/rte_vrb_pmd.c
> b/drivers/baseband/acc/rte_vrb_pmd.c
> index 88b1104..f7c54be 100644
> --- a/drivers/baseband/acc/rte_vrb_pmd.c
> +++ b/drivers/baseband/acc/rte_vrb_pmd.c
> @@ -3119,7 +3119,8 @@
>
> desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
> desc = q->ring_addr + desc_idx;
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> if (*dequeued_ops + desc->req.numCBs > max_requested_ops)
> return -1;
> @@ -3157,7 +3158,8 @@
> struct rte_bbdev_enc_op *op;
>
> desc = acc_desc_tail(q, *dequeued_descs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit. */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -3192,7 +3194,8 @@
> uint16_t current_dequeued_descs = 0, descs_in_tb;
>
> desc = acc_desc_tail(q, *dequeued_descs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> if (*dequeued_ops + 1 > max_requested_ops)
> return -1;
> @@ -3208,7 +3211,8 @@
> /* Check if last CB in TB is ready to dequeue (and thus
> * the whole TB) - checking sdone bit. If not return.
> */
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)last_desc,
> + rte_memory_order_relaxed);
> if (!(atom_desc.rsp.val & ACC_SDONE))
> return -1;
>
> @@ -3220,7 +3224,8 @@
>
> while (i < descs_in_tb) {
> desc = acc_desc_tail(q, *dequeued_descs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
> rsp.val = atom_desc.rsp.val;
>
> vrb_update_dequeued_operation(desc, rsp, &op->status,
> aq_dequeued, true, false); @@ -3246,7 +3251,8 @@
> struct rte_bbdev_dec_op *op;
>
> desc = acc_desc_tail(q, dequeued_cbs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit. */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -3290,7 +3296,8 @@
> struct rte_bbdev_dec_op *op;
>
> desc = acc_desc_tail(q, dequeued_cbs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit. */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -3346,7 +3353,8 @@
> uint32_t tb_crc_check = 0;
>
> desc = acc_desc_tail(q, dequeued_cbs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit. */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -3362,7 +3370,8 @@
> /* Check if last CB in TB is ready to dequeue (and thus the whole TB) -
> checking sdone bit.
> * If not return.
> */
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)last_desc,
> + rte_memory_order_relaxed);
> if (!(atom_desc.rsp.val & ACC_SDONE))
> return -1;
>
> @@ -3372,7 +3381,8 @@
> /* Read remaining CBs if exists. */
> while (cb_idx < cbs_in_tb) {
> desc = acc_desc_tail(q, dequeued_cbs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
> rsp.val = atom_desc.rsp.val;
> rte_bbdev_log_debug("Resp. desc %p: %x %x %x", desc,
> rsp.val, desc->rsp.add_info_0,
> @@ -3790,7 +3800,8 @@
> struct rte_bbdev_fft_op *op;
>
> desc = acc_desc_tail(q, dequeued_cbs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -4116,7 +4127,8 @@
> uint8_t descs_in_op, i;
>
> desc = acc_desc_tail(q, dequeued_ops);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit. */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -4127,7 +4139,8 @@
> /* Get last CB. */
> last_desc = acc_desc_tail(q, dequeued_ops + descs_in_op -
> 1);
> /* Check if last op is ready to dequeue by checking fdone bit.
> If not exit. */
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t
> *)last_desc, __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)last_desc,
> + rte_memory_order_relaxed);
> if (!(atom_desc.rsp.val & ACC_FDONE))
> return -1;
> #ifdef RTE_LIBRTE_BBDEV_DEBUG
> @@ -4137,8 +4150,8 @@
> for (i = 1; i < descs_in_op - 1; i++) {
> last_desc = q->ring_addr + ((q->sw_ring_tail +
> dequeued_ops + i)
> & q->sw_ring_wrap_mask);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t
> *)last_desc,
> - __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit(
> + (uint64_t __rte_atomic *)last_desc,
> rte_memory_order_relaxed);
> if (!(atom_desc.rsp.val & ACC_FDONE))
> return -1;
> }
> @@ -4154,7 +4167,8 @@
>
> for (i = 0; i < descs_in_op; i++) {
> desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i)
> & q->sw_ring_wrap_mask);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
> rsp.val = atom_desc.rsp.val;
>
> vrb_update_dequeued_operation(desc, rsp, &op->status,
> aq_dequeued, true, false);
> --
> 1.8.3.1
Tyler, it looks like the line continuations in /drivers/baseband/acc/rte_vrb_pmd.c are indented with four spaces instead of double <TAB>.
Med venlig hilsen / Kind regards,
-Morten Brørup
> From: Chautru, Nicolas [mailto:nicolas.chautru@intel.com]
> Sent: Monday, 13 May 2024 20.13
>
> Hi Tyler,
>
> Still some issues with indentation when the sed is causing split across
> 2 lines.
> Please fix indentation, nack like that.
>
> Thanks
> Nic
>
> > From: Tyler Retzlaff <roretzla@linux.microsoft.com>
> > Sent: Monday, May 6, 2024 10:58 AM
> >
> > Replace the use of gcc builtin __atomic_xxx intrinsics with
> corresponding
> > rte_atomic_xxx optional rte stdatomic API.
> >
> > Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
> > Acked-by: Stephen Hemminger <stephen@networkplumber.org>
> > ---
> > drivers/baseband/acc/rte_acc100_pmd.c | 36 +++++++++++++-------------
> -
> > drivers/baseband/acc/rte_vrb_pmd.c | 46 +++++++++++++++++++++++---
> --
> > -------
> > 2 files changed, 48 insertions(+), 34 deletions(-)
> >
> > diff --git a/drivers/baseband/acc/rte_acc100_pmd.c
> > b/drivers/baseband/acc/rte_acc100_pmd.c
> > index 4f666e5..ee50b9c 100644
> > --- a/drivers/baseband/acc/rte_acc100_pmd.c
> > +++ b/drivers/baseband/acc/rte_acc100_pmd.c
> > @@ -3673,8 +3673,8 @@
> >
> > desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
> > desc = q->ring_addr + desc_idx;
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > - __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -3728,8 +3728,8 @@
> > uint16_t current_dequeued_descs = 0, descs_in_tb;
> >
> > desc = acc_desc_tail(q, *dequeued_descs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > - __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -3742,8 +3742,8 @@
> > /* Check if last CB in TB is ready to dequeue (and thus
> > * the whole TB) - checking sdone bit. If not return.
> > */
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
> > - __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)last_desc,
> > + rte_memory_order_relaxed);
> > if (!(atom_desc.rsp.val & ACC_SDONE))
> > return -1;
> >
> > @@ -3755,8 +3755,8 @@
> >
> > while (i < descs_in_tb) {
> > desc = acc_desc_tail(q, *dequeued_descs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > - __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> > rsp.val = atom_desc.rsp.val;
> > rte_bbdev_log_debug("Resp. desc %p: %x descs %d cbs
> > %d\n",
> > desc, rsp.val, descs_in_tb, desc-
> > >req.numCBs); @@ -3793,8 +3793,8 @@
> > struct rte_bbdev_dec_op *op;
> >
> > desc = acc_desc_tail(q, dequeued_cbs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > - __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -3846,8 +3846,8 @@
> > struct rte_bbdev_dec_op *op;
> >
> > desc = acc_desc_tail(q, dequeued_cbs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > - __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -3902,8 +3902,8 @@
> > uint8_t cbs_in_tb = 1, cb_idx = 0;
> >
> > desc = acc_desc_tail(q, dequeued_cbs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > - __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -3919,8 +3919,8 @@
> > /* Check if last CB in TB is ready to dequeue (and thus
> > * the whole TB) - checking sdone bit. If not return.
> > */
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
> > - __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)last_desc,
> > + rte_memory_order_relaxed);
> > if (!(atom_desc.rsp.val & ACC_SDONE))
> > return -1;
> >
> > @@ -3930,8 +3930,8 @@
> > /* Read remaining CBs if exists */
> > while (cb_idx < cbs_in_tb) {
> > desc = acc_desc_tail(q, dequeued_cbs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > - __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> > rsp.val = atom_desc.rsp.val;
> > rte_bbdev_log_debug("Resp. desc %p: %x r %d c %d\n",
> > desc, rsp.val, cb_idx,
> > cbs_in_tb); diff --git a/drivers/baseband/acc/rte_vrb_pmd.c
> > b/drivers/baseband/acc/rte_vrb_pmd.c
> > index 88b1104..f7c54be 100644
> > --- a/drivers/baseband/acc/rte_vrb_pmd.c
> > +++ b/drivers/baseband/acc/rte_vrb_pmd.c
> > @@ -3119,7 +3119,8 @@
> >
> > desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
> > desc = q->ring_addr + desc_idx;
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > if (*dequeued_ops + desc->req.numCBs > max_requested_ops)
> > return -1;
> > @@ -3157,7 +3158,8 @@
> > struct rte_bbdev_enc_op *op;
> >
> > desc = acc_desc_tail(q, *dequeued_descs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit. */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -3192,7 +3194,8 @@
> > uint16_t current_dequeued_descs = 0, descs_in_tb;
> >
> > desc = acc_desc_tail(q, *dequeued_descs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > if (*dequeued_ops + 1 > max_requested_ops)
> > return -1;
> > @@ -3208,7 +3211,8 @@
> > /* Check if last CB in TB is ready to dequeue (and thus
> > * the whole TB) - checking sdone bit. If not return.
> > */
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)last_desc,
> > + rte_memory_order_relaxed);
> > if (!(atom_desc.rsp.val & ACC_SDONE))
> > return -1;
> >
> > @@ -3220,7 +3224,8 @@
> >
> > while (i < descs_in_tb) {
> > desc = acc_desc_tail(q, *dequeued_descs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> > rsp.val = atom_desc.rsp.val;
> >
> > vrb_update_dequeued_operation(desc, rsp, &op->status,
> > aq_dequeued, true, false); @@ -3246,7 +3251,8 @@
> > struct rte_bbdev_dec_op *op;
> >
> > desc = acc_desc_tail(q, dequeued_cbs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit. */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -3290,7 +3296,8 @@
> > struct rte_bbdev_dec_op *op;
> >
> > desc = acc_desc_tail(q, dequeued_cbs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit. */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -3346,7 +3353,8 @@
> > uint32_t tb_crc_check = 0;
> >
> > desc = acc_desc_tail(q, dequeued_cbs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit. */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -3362,7 +3370,8 @@
> > /* Check if last CB in TB is ready to dequeue (and thus the whole
> TB) -
> > checking sdone bit.
> > * If not return.
> > */
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)last_desc,
> > + rte_memory_order_relaxed);
> > if (!(atom_desc.rsp.val & ACC_SDONE))
> > return -1;
> >
> > @@ -3372,7 +3381,8 @@
> > /* Read remaining CBs if exists. */
> > while (cb_idx < cbs_in_tb) {
> > desc = acc_desc_tail(q, dequeued_cbs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> > rsp.val = atom_desc.rsp.val;
> > rte_bbdev_log_debug("Resp. desc %p: %x %x %x", desc,
> > rsp.val, desc->rsp.add_info_0,
> > @@ -3790,7 +3800,8 @@
> > struct rte_bbdev_fft_op *op;
> >
> > desc = acc_desc_tail(q, dequeued_cbs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -4116,7 +4127,8 @@
> > uint8_t descs_in_op, i;
> >
> > desc = acc_desc_tail(q, dequeued_ops);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit. */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -4127,7 +4139,8 @@
> > /* Get last CB. */
> > last_desc = acc_desc_tail(q, dequeued_ops + descs_in_op -
> > 1);
> > /* Check if last op is ready to dequeue by checking fdone
> bit.
> > If not exit. */
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t
> > *)last_desc, __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)last_desc,
> > + rte_memory_order_relaxed);
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > return -1;
> > #ifdef RTE_LIBRTE_BBDEV_DEBUG
> > @@ -4137,8 +4150,8 @@
> > for (i = 1; i < descs_in_op - 1; i++) {
> > last_desc = q->ring_addr + ((q->sw_ring_tail +
> > dequeued_ops + i)
> > & q->sw_ring_wrap_mask);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t
> > *)last_desc,
> > - __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit(
> > + (uint64_t __rte_atomic *)last_desc,
> > rte_memory_order_relaxed);
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > return -1;
> > }
> > @@ -4154,7 +4167,8 @@
> >
> > for (i = 0; i < descs_in_op; i++) {
> > desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i)
> > & q->sw_ring_wrap_mask);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> > rsp.val = atom_desc.rsp.val;
> >
> > vrb_update_dequeued_operation(desc, rsp, &op->status,
> > aq_dequeued, true, false);
> > --
> > 1.8.3.1
On Mon, May 13, 2024 at 9:05 PM Morten Brørup <mb@smartsharesystems.com> wrote:
>
> Tyler, it looks like the line continuations in /drivers/baseband/acc/rte_vrb_pmd.c are indented with four spaces instead of double <TAB>.
For Tyler defence, indentation is a nice mess in a lot of DPDK code
(for legacy, superficial reviews, not enough coffee etc... reasons).
mlx5 code is also a big pain as it seems to have its own coding style.
I was in the progress of taking this series yesterday and I was
looking at those indents before getting EINTR.
Tyler, can you please fix those? This will save me some time and I can
get this merged soon.
Thanks.
On Tue, May 14, 2024 at 09:31:18AM +0200, David Marchand wrote:
> On Mon, May 13, 2024 at 9:05 PM Morten Brørup <mb@smartsharesystems.com> wrote:
> >
> > Tyler, it looks like the line continuations in /drivers/baseband/acc/rte_vrb_pmd.c are indented with four spaces instead of double <TAB>.
>
> For Tyler defence, indentation is a nice mess in a lot of DPDK code
> (for legacy, superficial reviews, not enough coffee etc... reasons).
> mlx5 code is also a big pain as it seems to have its own coding style.
>
> I was in the progress of taking this series yesterday and I was
> looking at those indents before getting EINTR.
> Tyler, can you please fix those? This will save me some time and I can
> get this merged soon.
sure David i'll tweak this patch.
>
>
> Thanks.
>
> --
> David Marchand
@@ -3673,8 +3673,8 @@
desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
desc = q->ring_addr + desc_idx;
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3728,8 +3728,8 @@
uint16_t current_dequeued_descs = 0, descs_in_tb;
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3742,8 +3742,8 @@
/* Check if last CB in TB is ready to dequeue (and thus
* the whole TB) - checking sdone bit. If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3755,8 +3755,8 @@
while (i < descs_in_tb) {
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
rte_bbdev_log_debug("Resp. desc %p: %x descs %d cbs %d\n",
desc, rsp.val, descs_in_tb, desc->req.numCBs);
@@ -3793,8 +3793,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3846,8 +3846,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3902,8 +3902,8 @@
uint8_t cbs_in_tb = 1, cb_idx = 0;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3919,8 +3919,8 @@
/* Check if last CB in TB is ready to dequeue (and thus
* the whole TB) - checking sdone bit. If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3930,8 +3930,8 @@
/* Read remaining CBs if exists */
while (cb_idx < cbs_in_tb) {
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
rte_bbdev_log_debug("Resp. desc %p: %x r %d c %d\n",
desc, rsp.val, cb_idx, cbs_in_tb);
@@ -3119,7 +3119,8 @@
desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
desc = q->ring_addr + desc_idx;
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
if (*dequeued_ops + desc->req.numCBs > max_requested_ops)
return -1;
@@ -3157,7 +3158,8 @@
struct rte_bbdev_enc_op *op;
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3192,7 +3194,8 @@
uint16_t current_dequeued_descs = 0, descs_in_tb;
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
if (*dequeued_ops + 1 > max_requested_ops)
return -1;
@@ -3208,7 +3211,8 @@
/* Check if last CB in TB is ready to dequeue (and thus
* the whole TB) - checking sdone bit. If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3220,7 +3224,8 @@
while (i < descs_in_tb) {
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
vrb_update_dequeued_operation(desc, rsp, &op->status, aq_dequeued, true, false);
@@ -3246,7 +3251,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3290,7 +3296,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3346,7 +3353,8 @@
uint32_t tb_crc_check = 0;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3362,7 +3370,8 @@
/* Check if last CB in TB is ready to dequeue (and thus the whole TB) - checking sdone bit.
* If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3372,7 +3381,8 @@
/* Read remaining CBs if exists. */
while (cb_idx < cbs_in_tb) {
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
rte_bbdev_log_debug("Resp. desc %p: %x %x %x", desc,
rsp.val, desc->rsp.add_info_0,
@@ -3790,7 +3800,8 @@
struct rte_bbdev_fft_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -4116,7 +4127,8 @@
uint8_t descs_in_op, i;
desc = acc_desc_tail(q, dequeued_ops);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -4127,7 +4139,8 @@
/* Get last CB. */
last_desc = acc_desc_tail(q, dequeued_ops + descs_in_op - 1);
/* Check if last op is ready to dequeue by checking fdone bit. If not exit. */
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_FDONE))
return -1;
#ifdef RTE_LIBRTE_BBDEV_DEBUG
@@ -4137,8 +4150,8 @@
for (i = 1; i < descs_in_op - 1; i++) {
last_desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i)
& q->sw_ring_wrap_mask);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit(
+ (uint64_t __rte_atomic *)last_desc, rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_FDONE))
return -1;
}
@@ -4154,7 +4167,8 @@
for (i = 0; i < descs_in_op; i++) {
desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i) & q->sw_ring_wrap_mask);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
vrb_update_dequeued_operation(desc, rsp, &op->status, aq_dequeued, true, false);