From patchwork Tue Mar 3 08:27:04 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: xuelin.shi@freescale.com X-Patchwork-Id: 3820 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 0B4955A74; Tue, 3 Mar 2015 10:16:00 +0100 (CET) Received: from na01-bl2-obe.outbound.protection.outlook.com (mail-bl2on0106.outbound.protection.outlook.com [65.55.169.106]) by dpdk.org (Postfix) with ESMTP id 30BFF106B for ; Tue, 3 Mar 2015 10:15:58 +0100 (CET) Received: from BN3PR0301CA0003.namprd03.prod.outlook.com (25.160.180.141) by BN1PR0301MB0673.namprd03.prod.outlook.com (25.160.171.22) with Microsoft SMTP Server (TLS) id 15.1.93.16; Tue, 3 Mar 2015 09:15:56 +0000 Received: from BN1BFFO11FD007.protection.gbl (2a01:111:f400:7c10::1:124) by BN3PR0301CA0003.outlook.office365.com (2a01:111:e400:4000::13) with Microsoft SMTP Server (TLS) id 15.1.99.9 via Frontend Transport; Tue, 3 Mar 2015 09:15:56 +0000 Received: from az84smr01.freescale.net (192.88.158.2) by BN1BFFO11FD007.mail.protection.outlook.com (10.58.144.70) with Microsoft SMTP Server (TLS) id 15.1.99.6 via Frontend Transport; Tue, 3 Mar 2015 09:15:56 +0000 Received: from localhost (rock.ap.freescale.net [10.193.20.106]) by az84smr01.freescale.net (8.14.3/8.14.0) with ESMTP id t239FrKC015276; Tue, 3 Mar 2015 02:15:54 -0700 From: To: Date: Tue, 3 Mar 2015 16:27:04 +0800 Message-ID: <1425371224-15631-1-git-send-email-xuelin.shi@freescale.com> X-Mailer: git-send-email 1.8.4 X-EOPAttributedMessage: 0 Received-SPF: Fail (protection.outlook.com: domain of freescale.com does not designate 192.88.158.2 as permitted sender) receiver=protection.outlook.com; client-ip=192.88.158.2; helo=az84smr01.freescale.net; Authentication-Results: spf=fail (sender IP is 192.88.158.2) smtp.mailfrom=b29237@freescale.com; freescale.mail.onmicrosoft.com; dkim=none (message not signed) header.d=none; X-Forefront-Antispam-Report: CIP:192.88.158.2; CTRY:US; IPV:NLI; EFV:NLI; SFV:NSPM; SFS:(10019020)(6009001)(339900001)(47776003)(105606002)(19580405001)(92566002)(104016003)(6806004)(48376002)(106466001)(50986999)(50466002)(229853001)(19580395003)(50226001)(86362001)(77156002)(77096005)(57986006)(2351001)(86152002)(62966003)(85426001)(76506005)(110136001)(87936001)(36756003)(33646002)(46102003); DIR:OUT; SFP:1102; SCL:1; SRVR:BN1PR0301MB0673; H:az84smr01.freescale.net; FPR:; SPF:Fail; MLV:sfv; LANG:en; MIME-Version: 1.0 X-Microsoft-Antispam: UriScan:; X-Microsoft-Antispam: BCL:0;PCL:0;RULEID:;SRVR:BN1PR0301MB0673; X-Microsoft-Antispam-PRVS: X-Exchange-Antispam-Report-Test: UriScan:; X-Exchange-Antispam-Report-CFA-Test: BCL:0; PCL:0; RULEID:(601004)(5005006); SRVR:BN1PR0301MB0673; X-Forefront-PRVS: 0504F29D72 X-Exchange-Antispam-Report-CFA-Test: BCL:0; PCL:0; RULEID:; SRVR:BN1PR0301MB0673; X-OriginatorOrg: freescale.com X-MS-Exchange-CrossTenant-OriginalArrivalTime: 03 Mar 2015 09:15:56.3795 (UTC) X-MS-Exchange-CrossTenant-Id: 710a03f5-10f6-4d38-9ff4-a80b81da590d X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp: TenantId=710a03f5-10f6-4d38-9ff4-a80b81da590d; Ip=[192.88.158.2] X-MS-Exchange-CrossTenant-FromEntityHeader: HybridOnPrem X-MS-Exchange-Transport-CrossTenantHeadersStamped: BN1PR0301MB0673 Cc: dev@dpdk.org Subject: [dpdk-dev] [PATCH] ixgbe: fix data access on big endian cpu X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Xuelin Shi enforce rules for cpu and ixgbe exchanging data. 1. cpu use data owned by ixgbe must use rte_le_to_cpu_xx(...) 2. cpu fill data to ixgbe must use rte_cpu_to_le_xx(...) Signed-off-by: Xuelin Shi --- lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 115 ++++++++++++++++++++++++-------------- 1 file changed, 72 insertions(+), 43 deletions(-) diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c index e6766b3..fb01a4a 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c @@ -140,7 +140,7 @@ ixgbe_tx_free_bufs(struct igb_tx_queue *txq) int i; /* check DD bit on threshold descriptor */ - status = txq->tx_ring[txq->tx_next_dd].wb.status; + status = rte_le_to_cpu_32(txq->tx_ring[txq->tx_next_dd].wb.status); if (! (status & IXGBE_ADVTXD_STAT_DD)) return 0; @@ -186,11 +186,14 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) pkt_len = (*pkts)->data_len; /* write data to descriptor */ - txdp->read.buffer_addr = buf_dma_addr; + txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); + txdp->read.cmd_type_len = - ((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + txdp->read.olinfo_status = - (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_prefetch0(&(*pkts)->pool); } } @@ -206,11 +209,14 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) pkt_len = (*pkts)->data_len; /* write data to descriptor */ - txdp->read.buffer_addr = buf_dma_addr; + txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr); + txdp->read.cmd_type_len = - ((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len); + txdp->read.olinfo_status = - (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_prefetch0(&(*pkts)->pool); } @@ -297,7 +303,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * a divisor of the ring size */ tx_r[txq->tx_next_rs].read.cmd_type_len |= - rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS); + rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS); txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); txq->tx_tail = 0; @@ -316,7 +322,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, */ if (txq->tx_tail > txq->tx_next_rs) { tx_r[txq->tx_next_rs].read.cmd_type_len |= - rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS); + rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS); txq->tx_next_rs = (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); if (txq->tx_next_rs >= txq->nb_tx_desc) @@ -517,6 +523,7 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq) uint16_t nb_tx_desc = txq->nb_tx_desc; uint16_t desc_to_clean_to; uint16_t nb_tx_to_clean; + uint32_t stat; /* Determine the last descriptor needing to be cleaned */ desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh); @@ -525,7 +532,9 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq) /* Check to make sure the last descriptor to clean is done */ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; - if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD)) + + stat = rte_le_to_cpu_32(txr[desc_to_clean_to].wb.status); + if (! (stat & IXGBE_TXD_STAT_DD)) { PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done" @@ -556,7 +565,7 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq) * up to the last descriptor with the RS bit set * are done. Only reset the threshold descriptor. */ - txr[desc_to_clean_to].wb.status = 0; + txr[desc_to_clean_to].wb.status = rte_cpu_to_le_32(0); /* Update the txq to reflect the last descriptor that was cleaned */ txq->last_desc_cleaned = desc_to_clean_to; @@ -813,12 +822,14 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, */ slen = m_seg->data_len; buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg); + txd->read.buffer_addr = - rte_cpu_to_le_64(buf_dma_addr); + rte_cpu_to_le_64(buf_dma_addr); txd->read.cmd_type_len = - rte_cpu_to_le_32(cmd_type_len | slen); + rte_cpu_to_le_32(cmd_type_len | slen); txd->read.olinfo_status = - rte_cpu_to_le_32(olinfo_status); + rte_cpu_to_le_32(olinfo_status); + txe->last_id = tx_last; tx_id = txe->next_id; txe = txn; @@ -958,14 +969,16 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq) uint64_t pkt_flags; int s[LOOK_AHEAD], nb_dd; int i, j, nb_rx = 0; + uint32_t stat; /* get references to current descriptor and S/W ring entry */ rxdp = &rxq->rx_ring[rxq->rx_tail]; rxep = &rxq->sw_ring[rxq->rx_tail]; + stat = rte_le_to_cpu_32(rxdp->wb.upper.status_error); /* check to make sure there is at least 1 packet to receive */ - if (! (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) + if (! (stat & IXGBE_RXDADV_STAT_DD)) return 0; /* @@ -977,7 +990,7 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq) { /* Read desc statuses backwards to avoid race condition */ for (j = LOOK_AHEAD-1; j >= 0; --j) - s[j] = rxdp[j].wb.upper.status_error; + s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error); /* Compute how many status bits were set */ nb_dd = 0; @@ -988,28 +1001,36 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq) /* Translate descriptor info to mbuf format */ for (j = 0; j < nb_dd; ++j) { + uint16_t tmp16; + uint32_t tmp32; + mb = rxep[j].mbuf; - pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len); + tmp16 = rte_le_to_cpu_16(rxdp[j].wb.upper.length); + pkt_len = tmp16 - rxq->crc_len; mb->data_len = pkt_len; mb->pkt_len = pkt_len; - mb->vlan_tci = rxdp[j].wb.upper.vlan; mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan); /* convert descriptor fields to rte mbuf flags */ - pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags( + tmp32 = rte_le_to_cpu_32( rxdp[j].wb.lower.lo_dword.data); + pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(tmp32); + /* reuse status field from scan list */ pkt_flags |= rx_desc_status_to_pkt_flags(s[j]); pkt_flags |= rx_desc_error_to_pkt_flags(s[j]); mb->ol_flags = pkt_flags; if (likely(pkt_flags & PKT_RX_RSS_HASH)) - mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss; + mb->hash.rss = rte_le_to_cpu_32( + rxdp[j].wb.lower.hi_dword.rss); else if (pkt_flags & PKT_RX_FDIR) { - mb->hash.fdir.hash = - (uint16_t)((rxdp[j].wb.lower.hi_dword.csum_ip.csum) - & IXGBE_ATR_HASH_MASK); - mb->hash.fdir.id = rxdp[j].wb.lower.hi_dword.csum_ip.ip_id; + tmp16 = rxdp[j].wb.lower.hi_dword.csum_ip.csum; + mb->hash.fdir.hash = rte_le_to_cpu_16( + tmp16 & IXGBE_ATR_HASH_MASK); + + tmp16 = rxdp[j].wb.lower.hi_dword.csum_ip.ip_id; + mb->hash.fdir.id = rte_le_to_cpu_16(tmp16); } } @@ -1063,8 +1084,8 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq) /* populate the descriptors */ dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM; - rxdp[i].read.hdr_addr = dma_addr; - rxdp[i].read.pkt_addr = dma_addr; + rxdp[i].read.hdr_addr = rte_cpu_to_le_64(dma_addr); + rxdp[i].read.pkt_addr = rte_cpu_to_le_64(dma_addr); } /* update tail pointer */ @@ -1221,8 +1242,8 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * using invalid descriptor fields when read from rxd. */ rxdp = &rx_ring[rx_id]; - staterr = rxdp->wb.upper.status_error; - if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) + staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error); + if (! (staterr & IXGBE_RXDADV_STAT_DD)) break; rxd = *rxdp; @@ -1326,12 +1347,17 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm->ol_flags = pkt_flags; if (likely(pkt_flags & PKT_RX_RSS_HASH)) - rxm->hash.rss = rxd.wb.lower.hi_dword.rss; + rxm->hash.rss = rte_le_to_cpu_32( + rxd.wb.lower.hi_dword.rss); else if (pkt_flags & PKT_RX_FDIR) { - rxm->hash.fdir.hash = - (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum) - & IXGBE_ATR_HASH_MASK); - rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id; + uint16_t tmp16; + + tmp16 = rxd.wb.lower.hi_dword.csum_ip.csum; + rxm->hash.fdir.hash = rte_le_to_cpu_16( + tmp16 & IXGBE_ATR_HASH_MASK); + + rxm->hash.fdir.id = rte_le_to_cpu_16( + rxd.wb.lower.hi_dword.csum_ip.ip_id); } /* * Store the mbuf address into the next entry of the array @@ -1413,8 +1439,8 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * using invalid descriptor fields when read from rxd. */ rxdp = &rx_ring[rx_id]; - staterr = rxdp->wb.upper.status_error; - if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) + staterr = rte_cpu_to_le_32(rxdp->wb.upper.status_error); + if (! (staterr & IXGBE_RXDADV_STAT_DD)) break; rxd = *rxdp; @@ -1570,13 +1596,14 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, first_seg->ol_flags = pkt_flags; if (likely(pkt_flags & PKT_RX_RSS_HASH)) - first_seg->hash.rss = rxd.wb.lower.hi_dword.rss; + first_seg->hash.rss = rte_le_to_cpu_32( + rxd.wb.lower.hi_dword.rss); else if (pkt_flags & PKT_RX_FDIR) { - first_seg->hash.fdir.hash = + first_seg->hash.fdir.hash = rte_le_to_cpu_16( (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum) - & IXGBE_ATR_HASH_MASK); - first_seg->hash.fdir.id = - rxd.wb.lower.hi_dword.csum_ip.ip_id; + & IXGBE_ATR_HASH_MASK)); + first_seg->hash.fdir.id = rte_le_to_cpu_16( + rxd.wb.lower.hi_dword.csum_ip.ip_id); } /* Prefetch data of first segment, if configured to do so. */ @@ -1742,7 +1769,7 @@ ixgbe_reset_tx_queue(struct igb_tx_queue *txq) prev = (uint16_t) (txq->nb_tx_desc - 1); for (i = 0; i < txq->nb_tx_desc; i++) { volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i]; - txd->wb.status = IXGBE_TXD_STAT_DD; + txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD); txe[i].mbuf = NULL; txe[i].last_id = i; txe[prev].next_id = i; @@ -2306,7 +2333,8 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxdp = &(rxq->rx_ring[rxq->rx_tail]); while ((desc < rxq->nb_rx_desc) && - (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) { + (rte_le_to_cpu_32(rxdp->wb.upper.status_error) & + IXGBE_RXDADV_STAT_DD)) { desc += IXGBE_RXQ_SCAN_INTERVAL; rxdp += IXGBE_RXQ_SCAN_INTERVAL; if (rxq->rx_tail + desc >= rxq->nb_rx_desc) @@ -2331,7 +2359,8 @@ ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) desc -= rxq->nb_rx_desc; rxdp = &rxq->rx_ring[desc]; - return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD); + return !!(rte_le_to_cpu_32(rxdp->wb.upper.status_error) & + IXGBE_RXDADV_STAT_DD); } void