[v1,2/8] examples/vhost_blk: replace smp with thread fence
Checks
Commit Message
Simply replace the rte_smp_mb barriers with SEQ_CST atomic thread fence,
if there is no load/store operations.
Signed-off-by: Joyce Kong <joyce.kong@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
---
examples/vhost_blk/vhost_blk.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
Comments
On 12/21/20 4:50 PM, Joyce Kong wrote:
> Simply replace the rte_smp_mb barriers with SEQ_CST atomic thread fence,
> if there is no load/store operations.
>
> Signed-off-by: Joyce Kong <joyce.kong@arm.com>
> Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
> ---
> examples/vhost_blk/vhost_blk.c | 8 ++++----
> 1 file changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/examples/vhost_blk/vhost_blk.c b/examples/vhost_blk/vhost_blk.c
> index bb293d492..7ea60863d 100644
> --- a/examples/vhost_blk/vhost_blk.c
> +++ b/examples/vhost_blk/vhost_blk.c
> @@ -86,9 +86,9 @@ enqueue_task(struct vhost_blk_task *task)
> */
> used->ring[used->idx & (vq->vring.size - 1)].id = task->req_idx;
> used->ring[used->idx & (vq->vring.size - 1)].len = task->data_len;
From here
> - rte_smp_mb();
> + rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
> used->idx++;
> - rte_smp_mb();
> + rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
to here, couldn't it be replaced with:
__atomic_add_fetch(&used->idx, 1, __ATOMIC_RELEASE);
?
> rte_vhost_clr_inflight_desc_split(task->ctrlr->vid,
> vq->id, used->idx, task->req_idx);
> @@ -112,12 +112,12 @@ enqueue_task_packed(struct vhost_blk_task *task)
> desc->id = task->buffer_id;
> desc->addr = 0;
>
> - rte_smp_mb();
> + rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
> if (vq->used_wrap_counter)
> desc->flags |= VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED;
> else
> desc->flags &= ~(VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED);
> - rte_smp_mb();
> + rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
>
> rte_vhost_clr_inflight_desc_packed(task->ctrlr->vid, vq->id,
> task->inflight_idx);
>
@@ -86,9 +86,9 @@ enqueue_task(struct vhost_blk_task *task)
*/
used->ring[used->idx & (vq->vring.size - 1)].id = task->req_idx;
used->ring[used->idx & (vq->vring.size - 1)].len = task->data_len;
- rte_smp_mb();
+ rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
used->idx++;
- rte_smp_mb();
+ rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
rte_vhost_clr_inflight_desc_split(task->ctrlr->vid,
vq->id, used->idx, task->req_idx);
@@ -112,12 +112,12 @@ enqueue_task_packed(struct vhost_blk_task *task)
desc->id = task->buffer_id;
desc->addr = 0;
- rte_smp_mb();
+ rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
if (vq->used_wrap_counter)
desc->flags |= VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED;
else
desc->flags &= ~(VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED);
- rte_smp_mb();
+ rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
rte_vhost_clr_inflight_desc_packed(task->ctrlr->vid, vq->id,
task->inflight_idx);