From patchwork Sun Sep 17 10:42:02 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Shachar Beiser X-Patchwork-Id: 28804 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C921D127E; Sun, 17 Sep 2017 12:42:11 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 9B0491041 for ; Sun, 17 Sep 2017 12:42:09 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from shacharbe@mellanox.com) with ESMTPS (AES256-SHA encrypted); 17 Sep 2017 13:42:07 +0300 Received: from pegasus08.mtr.labs.mlnx (pegasus08.mtr.labs.mlnx [10.210.16.114]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id v8HAg7ks008100; Sun, 17 Sep 2017 13:42:07 +0300 Received: from pegasus08.mtr.labs.mlnx (localhost [127.0.0.1]) by pegasus08.mtr.labs.mlnx (8.14.7/8.14.7) with ESMTP id v8HAg7v4030006; Sun, 17 Sep 2017 10:42:07 GMT Received: (from shacharbe@localhost) by pegasus08.mtr.labs.mlnx (8.14.7/8.14.7/Submit) id v8HAg6Oj030005; Sun, 17 Sep 2017 10:42:06 GMT From: Shachar Beiser To: dev@dpdk.org Cc: Shachar Beiser , Adrien Mazarguil , Nelio Laranjeiro Date: Sun, 17 Sep 2017 10:42:02 +0000 Message-Id: <5fa648b5ff9a246b0f5b4c32a53477f62531d325.1505643166.git.shacharbe@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: References: Subject: [dpdk-dev] [PATCH v4 1/2] net/mlx5: replace network to host macros X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Signed-off-by: Shachar Beiser Acked-by: Yongseok Koh Acked-by: Nelio Laranjeiro --- I have rebased [PATCH v3 1/2] net/mlx5: replace network to host macros I have fixed a rebase conflict in mlx5_rxtx.h in line 609 -rte_wmb(); +rte_io_wmb(); --- drivers/net/mlx5/mlx5_mac.c | 8 +- drivers/net/mlx5/mlx5_mr.c | 2 +- drivers/net/mlx5/mlx5_rxmode.c | 8 +- drivers/net/mlx5/mlx5_rxq.c | 9 ++- drivers/net/mlx5/mlx5_rxtx.c | 137 ++++++++++++++++++++--------------- drivers/net/mlx5/mlx5_rxtx.h | 10 +-- drivers/net/mlx5/mlx5_rxtx_vec_sse.c | 12 +-- 7 files changed, 107 insertions(+), 79 deletions(-) diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index 45d23e4..b3c3fa2 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -263,11 +263,15 @@ (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5] }, - .vlan_tag = (vlan_enabled ? htons(vlan_id) : 0), + .vlan_tag = (vlan_enabled ? + rte_cpu_to_be_16(vlan_id) + : 0), }, .mask = { .dst_mac = "\xff\xff\xff\xff\xff\xff", - .vlan_tag = (vlan_enabled ? htons(0xfff) : 0), + .vlan_tag = (vlan_enabled ? + rte_cpu_to_be_16(0xfff) : + 0), }, }; DEBUG("%p: adding MAC address %02x:%02x:%02x:%02x:%02x:%02x index %u" diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 9593830..9a9f73a 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -203,7 +203,7 @@ struct ibv_mr * txq_ctrl->txq.mp2mr[idx].start = (uintptr_t)mr->addr; txq_ctrl->txq.mp2mr[idx].end = (uintptr_t)mr->addr + mr->length; txq_ctrl->txq.mp2mr[idx].mr = mr; - txq_ctrl->txq.mp2mr[idx].lkey = htonl(mr->lkey); + txq_ctrl->txq.mp2mr[idx].lkey = rte_cpu_to_be_32(mr->lkey); DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, (void *)txq_ctrl, mp->name, (void *)mp, txq_ctrl->txq.mp2mr[idx].lkey); diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c index 4a51e47..db2e05b 100644 --- a/drivers/net/mlx5/mlx5_rxmode.c +++ b/drivers/net/mlx5/mlx5_rxmode.c @@ -159,14 +159,18 @@ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], }, - .vlan_tag = (vlan_enabled ? htons(vlan_id) : 0), + .vlan_tag = (vlan_enabled ? + rte_cpu_to_be_16(vlan_id) : + 0), }, .mask = { .dst_mac = { mask[0], mask[1], mask[2], mask[3], mask[4], mask[5], }, - .vlan_tag = (vlan_enabled ? htons(0xfff) : 0), + .vlan_tag = (vlan_enabled ? + rte_cpu_to_be_16(0xfff) : + 0), }, }; diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 35c5cb4..437dc02 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -672,9 +672,10 @@ /* scat->addr must be able to store a pointer. */ assert(sizeof(scat->addr) >= sizeof(uintptr_t)); *scat = (struct mlx5_wqe_data_seg){ - .addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)), - .byte_count = htonl(DATA_LEN(buf)), - .lkey = htonl(rxq_ctrl->mr->lkey), + .addr = + rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)), + .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)), + .lkey = rte_cpu_to_be_32(rxq_ctrl->mr->lkey), }; (*rxq_ctrl->rxq.elts)[i] = buf; } @@ -1077,7 +1078,7 @@ /* Update doorbell counter. */ rxq_ctrl->rxq.rq_ci = desc >> rxq_ctrl->rxq.sges_n; rte_wmb(); - *rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci); + *rxq_ctrl->rxq.rq_db = rte_cpu_to_be_32(rxq_ctrl->rxq.rq_ci); DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl); assert(ret == 0); return 0; diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index bc1f85c..3f1e2f4 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -306,7 +306,7 @@ op_own = cqe->op_own; if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) - n = ntohl(cqe->byte_cnt); + n = rte_be_to_cpu_32(cqe->byte_cnt); else n = 1; cq_ci += n; @@ -436,7 +436,8 @@ raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE; /* Replace the Ethernet type by the VLAN if necessary. */ if (buf->ol_flags & PKT_TX_VLAN_PKT) { - uint32_t vlan = htonl(0x81000000 | buf->vlan_tci); + uint32_t vlan = rte_cpu_to_be_32(0x81000000 | + buf->vlan_tci); unsigned int len = 2 * ETHER_ADDR_LEN - 2; addr += 2; @@ -511,8 +512,10 @@ } else { /* NOP WQE. */ wqe->ctrl = (rte_v128u32_t){ - htonl(txq->wqe_ci << 8), - htonl(txq->qp_num_8s | 1), + rte_cpu_to_be_32( + txq->wqe_ci << 8), + rte_cpu_to_be_32( + txq->qp_num_8s | 1), 0, 0, }; @@ -551,7 +554,14 @@ break; max_wqe -= n; if (tso) { - inl = htonl(copy_b | MLX5_INLINE_SEG); + uint32_t inl = + rte_cpu_to_be_32(copy_b | + MLX5_INLINE_SEG); + + pkt_inline_sz = + MLX5_WQE_DS(tso_header_sz) * + MLX5_WQE_DWORD_SIZE; + rte_memcpy((void *)raw, (void *)&inl, sizeof(inl)); raw += sizeof(inl); @@ -600,9 +610,9 @@ ds = 3; use_dseg: /* Add the remaining packet as a simple ds. */ - naddr = htonll(addr); + naddr = rte_cpu_to_be_64(addr); *dseg = (rte_v128u32_t){ - htonl(length), + rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), naddr, naddr >> 32, @@ -639,9 +649,9 @@ total_length += length; #endif /* Store segment information. */ - naddr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)); + naddr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)); *dseg = (rte_v128u32_t){ - htonl(length), + rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), naddr, naddr >> 32, @@ -664,21 +674,23 @@ /* Initialize known and common part of the WQE structure. */ if (tso) { wqe->ctrl = (rte_v128u32_t){ - htonl((txq->wqe_ci << 8) | MLX5_OPCODE_TSO), - htonl(txq->qp_num_8s | ds), + rte_cpu_to_be_32((txq->wqe_ci << 8) | + MLX5_OPCODE_TSO), + rte_cpu_to_be_32(txq->qp_num_8s | ds), 0, 0, }; wqe->eseg = (rte_v128u32_t){ 0, - cs_flags | (htons(tso_segsz) << 16), + cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16), 0, - (ehdr << 16) | htons(tso_header_sz), + (ehdr << 16) | rte_cpu_to_be_16(tso_header_sz), }; } else { wqe->ctrl = (rte_v128u32_t){ - htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND), - htonl(txq->qp_num_8s | ds), + rte_cpu_to_be_32((txq->wqe_ci << 8) | + MLX5_OPCODE_SEND), + rte_cpu_to_be_32(txq->qp_num_8s | ds), 0, 0, }; @@ -686,7 +698,7 @@ 0, cs_flags, 0, - (ehdr << 16) | htons(pkt_inline_sz), + (ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz), }; } next_wqe: @@ -706,7 +718,7 @@ comp = txq->elts_comp + i + j + k; if (comp >= MLX5_TX_COMP_THRESH) { /* Request completion on last WQE. */ - last_wqe->ctrl2 = htonl(8); + last_wqe->ctrl2 = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ last_wqe->ctrl3 = txq->elts_head; txq->elts_comp = 0; @@ -745,13 +757,14 @@ mpw->len = length; mpw->total_len = 0; mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); - mpw->wqe->eseg.mss = htons(length); + mpw->wqe->eseg.mss = rte_cpu_to_be_16(length); mpw->wqe->eseg.inline_hdr_sz = 0; mpw->wqe->eseg.rsvd0 = 0; mpw->wqe->eseg.rsvd1 = 0; mpw->wqe->eseg.rsvd2 = 0; - mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) | - (txq->wqe_ci << 8) | MLX5_OPCODE_TSO); + mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_TSO); mpw->wqe->ctrl[2] = 0; mpw->wqe->ctrl[3] = 0; mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *) @@ -780,7 +793,7 @@ * Store size in multiple of 16 bytes. Control and Ethernet segments * count as 2. */ - mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | (2 + num)); + mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | (2 + num)); mpw->state = MLX5_MPW_STATE_CLOSED; if (num < 3) ++txq->wqe_ci; @@ -889,9 +902,9 @@ dseg = mpw.data.dseg[mpw.pkts_n]; addr = rte_pktmbuf_mtod(buf, uintptr_t); *dseg = (struct mlx5_wqe_data_seg){ - .byte_count = htonl(DATA_LEN(buf)), + .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)), .lkey = mlx5_tx_mb2mr(txq, buf), - .addr = htonll(addr), + .addr = rte_cpu_to_be_64(addr), }; #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) length += DATA_LEN(buf); @@ -919,7 +932,7 @@ volatile struct mlx5_wqe *wqe = mpw.wqe; /* Request completion on last WQE. */ - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; @@ -959,12 +972,12 @@ mpw->len = length; mpw->total_len = 0; mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); - mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) | - (txq->wqe_ci << 8) | - MLX5_OPCODE_TSO); + mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_TSO); mpw->wqe->ctrl[2] = 0; mpw->wqe->ctrl[3] = 0; - mpw->wqe->eseg.mss = htons(length); + mpw->wqe->eseg.mss = rte_cpu_to_be_16(length); mpw->wqe->eseg.inline_hdr_sz = 0; mpw->wqe->eseg.cs_flags = 0; mpw->wqe->eseg.rsvd0 = 0; @@ -995,9 +1008,10 @@ * Store size in multiple of 16 bytes. Control and Ethernet segments * count as 2. */ - mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(size)); + mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | + MLX5_WQE_DS(size)); mpw->state = MLX5_MPW_STATE_CLOSED; - inl->byte_cnt = htonl(mpw->total_len | MLX5_INLINE_SEG); + inl->byte_cnt = rte_cpu_to_be_32(mpw->total_len | MLX5_INLINE_SEG); txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE; } @@ -1137,9 +1151,10 @@ dseg = mpw.data.dseg[mpw.pkts_n]; addr = rte_pktmbuf_mtod(buf, uintptr_t); *dseg = (struct mlx5_wqe_data_seg){ - .byte_count = htonl(DATA_LEN(buf)), + .byte_count = + rte_cpu_to_be_32(DATA_LEN(buf)), .lkey = mlx5_tx_mb2mr(txq, buf), - .addr = htonll(addr), + .addr = rte_cpu_to_be_64(addr), }; #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) length += DATA_LEN(buf); @@ -1211,7 +1226,7 @@ volatile struct mlx5_wqe *wqe = mpw.wqe; /* Request completion on last WQE. */ - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; @@ -1251,9 +1266,10 @@ mpw->pkts_n = 0; mpw->total_len = sizeof(struct mlx5_wqe); mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); - mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_ENHANCED_MPSW << 24) | - (txq->wqe_ci << 8) | - MLX5_OPCODE_ENHANCED_MPSW); + mpw->wqe->ctrl[0] = + rte_cpu_to_be_32((MLX5_OPC_MOD_ENHANCED_MPSW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_ENHANCED_MPSW); mpw->wqe->ctrl[2] = 0; mpw->wqe->ctrl[3] = 0; memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE); @@ -1261,9 +1277,9 @@ uintptr_t addr = (uintptr_t)(mpw->wqe + 1); /* Pad the first 2 DWORDs with zero-length inline header. */ - *(volatile uint32_t *)addr = htonl(MLX5_INLINE_SEG); + *(volatile uint32_t *)addr = rte_cpu_to_be_32(MLX5_INLINE_SEG); *(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) = - htonl(MLX5_INLINE_SEG); + rte_cpu_to_be_32(MLX5_INLINE_SEG); mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE; /* Start from the next WQEBB. */ mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1)); @@ -1291,7 +1307,8 @@ /* Store size in multiple of 16 bytes. Control and Ethernet segments * count as 2. */ - mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(mpw->total_len)); + mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | + MLX5_WQE_DS(mpw->total_len)); mpw->state = MLX5_MPW_STATE_CLOSED; ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE; txq->wqe_ci += ret; @@ -1446,9 +1463,10 @@ dseg = mpw.data.dseg[mpw.pkts_n]; addr = rte_pktmbuf_mtod(buf, uintptr_t); *dseg = (struct mlx5_wqe_data_seg){ - .byte_count = htonl(DATA_LEN(buf)), + .byte_count = rte_cpu_to_be_32( + DATA_LEN(buf)), .lkey = mlx5_tx_mb2mr(txq, buf), - .addr = htonll(addr), + .addr = rte_cpu_to_be_64(addr), }; #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) length += DATA_LEN(buf); @@ -1471,7 +1489,7 @@ assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED); assert(length == DATA_LEN(buf)); - inl_hdr = htonl(length | MLX5_INLINE_SEG); + inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG); addr = rte_pktmbuf_mtod(buf, uintptr_t); mpw.data.raw = (volatile void *) ((uintptr_t)mpw.data.raw + inl_pad); @@ -1527,9 +1545,9 @@ for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++) rte_prefetch2((void *)(addr + n * RTE_CACHE_LINE_SIZE)); - naddr = htonll(addr); + naddr = rte_cpu_to_be_64(addr); *dseg = (rte_v128u32_t) { - htonl(length), + rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), naddr, naddr >> 32, @@ -1557,7 +1575,7 @@ volatile struct mlx5_wqe *wqe = mpw.wqe; /* Request completion on last WQE. */ - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; @@ -1641,8 +1659,8 @@ (volatile struct mlx5_mini_cqe8 (*)[8]) (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info); - len = ntohl((*mc)[zip->ai & 7].byte_cnt); - *rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result); + len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt); + *rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result); if ((++zip->ai & 7) == 0) { /* Invalidate consumed CQEs */ idx = zip->ca; @@ -1690,7 +1708,7 @@ cqe_cnt].pkt_info); /* Fix endianness. */ - zip->cqe_cnt = ntohl(cqe->byte_cnt); + zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt); /* * Current mini array position is the one returned by * check_cqe64(). @@ -1705,8 +1723,8 @@ --rxq->cq_ci; zip->cq_ci = rxq->cq_ci + zip->cqe_cnt; /* Get packet size to return. */ - len = ntohl((*mc)[0].byte_cnt); - *rss_hash = ntohl((*mc)[0].rx_hash_result); + len = rte_be_to_cpu_32((*mc)[0].byte_cnt); + *rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result); zip->ai = 1; /* Prefetch all the entries to be invalidated */ idx = zip->ca; @@ -1716,8 +1734,8 @@ ++idx; } } else { - len = ntohl(cqe->byte_cnt); - *rss_hash = ntohl(cqe->rx_hash_res); + len = rte_be_to_cpu_32(cqe->byte_cnt); + *rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res); } /* Error while receiving packet. */ if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR)) @@ -1741,7 +1759,7 @@ rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe) { uint32_t ol_flags = 0; - uint16_t flags = ntohs(cqe->hdr_type_etc); + uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc); ol_flags = TRANSPOSE(flags, @@ -1848,7 +1866,7 @@ MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) { pkt->ol_flags |= PKT_RX_FDIR; if (cqe->sop_drop_qpn != - htonl(MLX5_FLOW_MARK_DEFAULT)) { + rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) { uint32_t mark = cqe->sop_drop_qpn; pkt->ol_flags |= PKT_RX_FDIR_ID; @@ -1860,10 +1878,11 @@ pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe); if (rxq->vlan_strip && (cqe->hdr_type_etc & - htons(MLX5_CQE_VLAN_STRIPPED))) { + rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) { pkt->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; - pkt->vlan_tci = ntohs(cqe->vlan_info); + pkt->vlan_tci = + rte_be_to_cpu_16(cqe->vlan_info); } if (rxq->crc_present) len -= ETHER_CRC_LEN; @@ -1879,7 +1898,7 @@ * of the buffers are already known, only the buffer address * changes. */ - wqe->addr = htonll(rte_pktmbuf_mtod(rep, uintptr_t)); + wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t)); if (len > DATA_LEN(seg)) { len -= DATA_LEN(seg); ++NB_SEGS(pkt); @@ -1907,9 +1926,9 @@ /* Update the consumer index. */ rxq->rq_ci = rq_ci >> sges_n; rte_wmb(); - *rxq->cq_db = htonl(rxq->cq_ci); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); rte_wmb(); - *rxq->rq_db = htonl(rxq->rq_ci); + *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); #ifdef MLX5_PMD_SOFT_COUNTERS /* Increment packets counter. */ rxq->stats.ipackets += i; diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 107ada0..9375aa8 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -269,7 +269,7 @@ struct txq { uintptr_t start; /* Start address of MR */ uintptr_t end; /* End address of MR */ struct ibv_mr *mr; /* Memory Region (for mp). */ - uint32_t lkey; /* htonl(mr->lkey) */ + uint32_t lkey; /* rte_cpu_to_be_32(mr->lkey) */ } mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MP to MR translation table. */ uint16_t mr_cache_idx; /* Index of last hit entry. */ struct rte_mbuf *(*elts)[]; /* TX elements. */ @@ -492,7 +492,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, } #endif /* NDEBUG */ ++cq_ci; - txq->wqe_pi = ntohs(cqe->wqe_counter); + txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter); ctrl = (volatile struct mlx5_wqe_ctrl *) tx_mlx5_wqe(txq, txq->wqe_pi); elts_tail = ctrl->ctrl3; @@ -530,7 +530,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, txq->elts_tail = elts_tail; /* Update the consumer index. */ rte_wmb(); - *txq->cq_db = htonl(cq_ci); + *txq->cq_db = rte_cpu_to_be_32(cq_ci); } /** @@ -581,7 +581,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, if (txq->mp2mr[i].start <= addr && txq->mp2mr[i].end >= addr) { assert(txq->mp2mr[i].lkey != (uint32_t)-1); - assert(htonl(txq->mp2mr[i].mr->lkey) == + assert(rte_cpu_to_be_32(txq->mp2mr[i].mr->lkey) == txq->mp2mr[i].lkey); txq->mr_cache_idx = i; return txq->mp2mr[i].lkey; @@ -606,7 +606,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, volatile uint64_t *src = ((volatile uint64_t *)wqe); rte_io_wmb(); - *txq->qp_db = htonl(txq->wqe_ci); + *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci); /* Ensure ordering between DB record and BF copy. */ rte_wmb(); *dst = *src; diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c index 39c7325..aff3359 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c +++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c @@ -293,7 +293,7 @@ /* Fill ESEG in the header. */ _mm_store_si128(t_wqe + 1, _mm_set_epi16(0, 0, 0, 0, - htons(len), cs_flags, + rte_cpu_to_be_16(len), cs_flags, 0, 0)); txq->wqe_ci = wqe_ci; } @@ -302,7 +302,7 @@ txq->elts_comp += (uint16_t)(elts_head - txq->elts_head); txq->elts_head = elts_head; if (txq->elts_comp >= MLX5_TX_COMP_THRESH) { - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); wqe->ctrl[3] = txq->elts_head; txq->elts_comp = 0; ++txq->cq_pi; @@ -564,11 +564,11 @@ return; } for (i = 0; i < n; ++i) - wq[i].addr = htonll((uintptr_t)elts[i]->buf_addr + - RTE_PKTMBUF_HEADROOM); + wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr + + RTE_PKTMBUF_HEADROOM); rxq->rq_ci += n; rte_wmb(); - *rxq->rq_db = htonl(rxq->rq_ci); + *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); } /** @@ -1251,7 +1251,7 @@ } } rte_wmb(); - *rxq->cq_db = htonl(rxq->cq_ci); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); return rcvd_pkt; }