[dpdk-dev,v2,03/17] ixgbe: clean log messages
Commit Message
Clean log messages:
- remove leading \n in some messages,
- remove trailing \n in some messages,
- split multi lines messages,
- replace some PMD_INIT_LOG(DEBUG, "some_func") with PMD_INIT_FUNC_TRACE().
Signed-off-by: David Marchand <david.marchand@6wind.com>
---
lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 150 +++++++++++++++++------------------
lib/librte_pmd_ixgbe/ixgbe_fdir.c | 6 +-
lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 93 +++++++++++-----------
3 files changed, 124 insertions(+), 125 deletions(-)
Comments
Looks good
On Mon, Sep 1, 2014 at 5:24 AM, David Marchand <david.marchand@6wind.com>
wrote:
> Clean log messages:
> - remove leading \n in some messages,
> - remove trailing \n in some messages,
> - split multi lines messages,
> - replace some PMD_INIT_LOG(DEBUG, "some_func") with PMD_INIT_FUNC_TRACE().
>
> Signed-off-by: David Marchand <david.marchand@6wind.com>
> ---
> lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 150
> +++++++++++++++++------------------
> lib/librte_pmd_ixgbe/ixgbe_fdir.c | 6 +-
> lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 93 +++++++++++-----------
> 3 files changed, 124 insertions(+), 125 deletions(-)
>
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> index a8a7ed6..1419494 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> @@ -547,12 +547,12 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev
> *eth_dev,
> if ((hw->mac.type != ixgbe_mac_82599EB) && (hw->mac.type !=
> ixgbe_mac_X540))
> return -ENOSYS;
>
> - PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index
> %d\n",
> + PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index
> %d",
> (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
> queue_id, stat_idx);
>
> n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
> if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
> - PMD_INIT_LOG(ERR, "Nb of stat mapping registers
> exceeded\n");
> + PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
> return -EIO;
> }
> offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
> @@ -572,19 +572,20 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev
> *eth_dev,
> else
> stat_mappings->rqsmr[n] |= qsmr_mask;
>
> - PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d\n"
> - "%s[%d] = 0x%08x\n",
> - (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
> queue_id, stat_idx,
> - is_rx ? "RQSMR" : "TQSM",n, is_rx ?
> stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
> + PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d",
> + (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
> + queue_id, stat_idx);
> + PMD_INIT_LOG(INFO, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
> + is_rx ? stat_mappings->rqsmr[n] :
> stat_mappings->tqsm[n]);
>
> /* Now write the mapping in the appropriate register */
> if (is_rx) {
> - PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping
> reg:%d\n",
> + PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping
> reg:%d",
> stat_mappings->rqsmr[n], n);
> IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n),
> stat_mappings->rqsmr[n]);
> }
> else {
> - PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping
> reg:%d\n",
> + PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping
> reg:%d",
> stat_mappings->tqsm[n], n);
> IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
> }
> @@ -790,12 +791,13 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct
> eth_driver *eth_drv,
>
> if (diag == IXGBE_ERR_EEPROM_VERSION) {
> PMD_INIT_LOG(ERR, "This device is a pre-production
> adapter/"
> - "LOM. Please be aware there may be issues associated "
> - "with your hardware.\n If you are experiencing
> problems "
> + "LOM. Please be aware there may be issues associated "
> + "with your hardware.");
> + PMD_INIT_LOG(ERR, "If you are experiencing problems "
> "please contact your Intel or hardware representative "
> - "who provided you with this hardware.\n");
> + "who provided you with this hardware.");
> } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
> - PMD_INIT_LOG(ERR, "Unsupported SFP+ Module\n");
> + PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
> if (diag) {
> PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d",
> diag);
> return -EIO;
> @@ -811,10 +813,9 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct
> eth_driver *eth_drv,
> eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
> hw->mac.num_rar_entries, 0);
> if (eth_dev->data->mac_addrs == NULL) {
> - PMD_INIT_LOG(ERR,
> - "Failed to allocate %u bytes needed to store "
> - "MAC addresses",
> - ETHER_ADDR_LEN * hw->mac.num_rar_entries);
> + PMD_INIT_LOG(ERR, "Failed to allocate %u bytes needed to
> store "
> + "MAC addresses",
> + ETHER_ADDR_LEN * hw->mac.num_rar_entries);
> return -ENOMEM;
> }
> /* Copy the permanent MAC address */
> @@ -825,9 +826,9 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct
> eth_driver *eth_drv,
> eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe",
> ETHER_ADDR_LEN *
> IXGBE_VMDQ_NUM_UC_MAC, 0);
> if (eth_dev->data->hash_mac_addrs == NULL) {
> - PMD_INIT_LOG(ERR,
> - "Failed to allocate %d bytes needed to store MAC
> addresses",
> - ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
> + PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to
> store "
> + "MAC addresses",
> + ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
> return -ENOMEM;
> }
>
> @@ -849,12 +850,11 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct
> eth_driver *eth_drv,
> IXGBE_WRITE_FLUSH(hw);
>
> if (ixgbe_is_sfp(hw) && hw->phy.sfp_type !=
> ixgbe_sfp_type_not_present)
> - PMD_INIT_LOG(DEBUG,
> - "MAC: %d, PHY: %d, SFP+: %d<n",
> + PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
> (int) hw->mac.type, (int) hw->phy.type,
> (int) hw->phy.sfp_type);
> else
> - PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d\n",
> + PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
> (int) hw->mac.type, (int) hw->phy.type);
>
> PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
> @@ -933,7 +933,7 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct
> eth_driver *eth_drv,
>
> IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
> struct ether_addr *perm_addr = (struct ether_addr *)
> hw->mac.perm_addr;
>
> - PMD_INIT_LOG(DEBUG, "eth_ixgbevf_dev_init");
> + PMD_INIT_FUNC_TRACE();
>
> eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
> eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
> @@ -963,7 +963,8 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct
> eth_driver *eth_drv,
> /* Initialize the shared code (base driver) */
> diag = ixgbe_init_shared_code(hw);
> if (diag != IXGBE_SUCCESS) {
> - PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf:
> %d", diag);
> + PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf:
> %d",
> + diag);
> return -EIO;
> }
>
> @@ -996,10 +997,9 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct
> eth_driver *eth_drv,
> eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
> hw->mac.num_rar_entries, 0);
> if (eth_dev->data->mac_addrs == NULL) {
> - PMD_INIT_LOG(ERR,
> - "Failed to allocate %u bytes needed to store "
> - "MAC addresses",
> - ETHER_ADDR_LEN * hw->mac.num_rar_entries);
> + PMD_INIT_LOG(ERR, "Failed to allocate %u bytes needed to
> store "
> + "MAC addresses",
> + ETHER_ADDR_LEN * hw->mac.num_rar_entries);
> return -ENOMEM;
> }
>
> @@ -1033,13 +1033,14 @@ eth_ixgbevf_dev_init(__attribute__((unused))
> struct eth_driver *eth_drv,
> break;
>
> default:
> - PMD_INIT_LOG(ERR, "VF Initialization Failure: %d",
> diag);
> + PMD_INIT_LOG(ERR, "VF Initialization Failure: %d",
> + diag);
> return (-EIO);
> }
>
> - PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x
> mac.type=%s\n",
> - eth_dev->data->port_id, pci_dev->id.vendor_id,
> pci_dev->id.device_id,
> - "ixgbe_mac_82599_vf");
> + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x
> mac.type=%s",
> + eth_dev->data->port_id, pci_dev->id.vendor_id,
> + pci_dev->id.device_id, "ixgbe_mac_82599_vf");
>
> return 0;
> }
> @@ -1416,8 +1417,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
>
> /* IXGBE devices don't support half duplex */
> if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
> - (dev->data->dev_conf.link_duplex !=
> ETH_LINK_FULL_DUPLEX)) {
> - PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port
> %hhu\n",
> + (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
> + PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port
> %hhu",
> dev->data->dev_conf.link_duplex,
> dev->data->port_id);
> return -EINVAL;
> @@ -1443,7 +1444,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
> /* This can fail when allocating mbufs for descriptor rings */
> err = ixgbe_dev_rx_init(dev);
> if (err) {
> - PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n");
> + PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
> goto error;
> }
>
> @@ -1490,9 +1491,9 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
> speed = IXGBE_LINK_SPEED_10GB_FULL;
> break;
> default:
> - PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port
> %hhu\n",
> - dev->data->dev_conf.link_speed,
> - dev->data->port_id);
> + PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu",
> + dev->data->dev_conf.link_speed,
> + dev->data->port_id);
> goto error;
> }
>
> @@ -1598,10 +1599,8 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
> #ifdef RTE_NIC_BYPASS
> if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
> /* Not suported in bypass mode */
> - PMD_INIT_LOG(ERR,
> - "\nSet link up is not supported "
> - "by device id 0x%x\n",
> - hw->device_id);
> + PMD_INIT_LOG(ERR, "Set link up is not supported "
> + "by device id 0x%x", hw->device_id);
> return -ENOTSUP;
> }
> #endif
> @@ -1610,8 +1609,8 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
> return 0;
> }
>
> - PMD_INIT_LOG(ERR, "\nSet link up is not supported by device id
> 0x%x\n",
> - hw->device_id);
> + PMD_INIT_LOG(ERR, "Set link up is not supported by device id 0x%x",
> + hw->device_id);
> return -ENOTSUP;
> }
>
> @@ -1627,10 +1626,8 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
> #ifdef RTE_NIC_BYPASS
> if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
> /* Not suported in bypass mode */
> - PMD_INIT_LOG(ERR,
> - "\nSet link down is not supported "
> - "by device id 0x%x\n",
> - hw->device_id);
> + PMD_INIT_LOG(ERR, "Set link down is not supported "
> + "by device id 0x%x", hw->device_id);
> return -ENOTSUP;
> }
> #endif
> @@ -1639,9 +1636,8 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
> return 0;
> }
>
> - PMD_INIT_LOG(ERR,
> - "\nSet link down is not supported by device id 0x%x\n",
> - hw->device_id);
> + PMD_INIT_LOG(ERR, "Set link down is not supported by device id
> 0x%x",
> + hw->device_id);
> return -ENOTSUP;
> }
>
> @@ -2179,7 +2175,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
> struct rte_eth_link link;
> int intr_enable_delay = false;
>
> - PMD_DRV_LOG(DEBUG, "intr action type %d\n", intr->flags);
> + PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
>
> if (intr->flags & IXGBE_FLAG_MAILBOX) {
> ixgbe_pf_mbx_process(dev);
> @@ -2209,7 +2205,8 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
>
> if (intr_enable_delay) {
> if (rte_eal_alarm_set(timeout * 1000,
> - ixgbe_dev_interrupt_delayed_handler,
> (void*)dev) < 0)
> + ixgbe_dev_interrupt_delayed_handler,
> + (void *)dev) < 0)
> PMD_DRV_LOG(ERR, "Error setting alarm");
> } else {
> PMD_DRV_LOG(DEBUG, "enable intr immediately");
> @@ -2256,7 +2253,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
> _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
> }
>
> - PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]\n",
> eicr);
> + PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
> ixgbe_enable_intr(dev);
> rte_intr_enable(&(dev->pci_dev->intr_handle));
> }
> @@ -2370,7 +2367,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct
> rte_eth_fc_conf *fc_conf)
> if (fc_conf->autoneg != !hw->fc.disable_fc_autoneg)
> return -ENOTSUP;
> rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
> - PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n",
> rx_buf_size);
> + PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
>
> /*
> * At least reserve one Ethernet frame for watermark
> @@ -2379,8 +2376,8 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct
> rte_eth_fc_conf *fc_conf)
> max_high_water = (rx_buf_size - ETHER_MAX_LEN) >>
> IXGBE_RXPBSIZE_SHIFT;
> if ((fc_conf->high_water > max_high_water) ||
> (fc_conf->high_water < fc_conf->low_water)) {
> - PMD_INIT_LOG(ERR, "Invalid high/low water setup value in
> KB\n");
> - PMD_INIT_LOG(ERR, "High_water must <= 0x%x\n",
> max_high_water);
> + PMD_INIT_LOG(ERR, "Invalid high/low water setup value in
> KB");
> + PMD_INIT_LOG(ERR, "High_water must <= 0x%x",
> max_high_water);
> return (-EINVAL);
> }
>
> @@ -2412,7 +2409,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct
> rte_eth_fc_conf *fc_conf)
> return 0;
> }
>
> - PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x \n", err);
> + PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
> return -EIO;
> }
>
> @@ -2442,13 +2439,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw
> *hw,uint8_t tc_num)
> if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
> /* High/Low water can not be 0 */
> if( (!hw->fc.high_water[tc_num])||
> (!hw->fc.low_water[tc_num])) {
> - PMD_INIT_LOG(ERR,"Invalid water mark
> configuration\n");
> + PMD_INIT_LOG(ERR, "Invalid water mark
> configuration");
> ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
> goto out;
> }
>
> if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
> - PMD_INIT_LOG(ERR,"Invalid water mark
> configuration\n");
> + PMD_INIT_LOG(ERR, "Invalid water mark
> configuration");
> ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
> goto out;
> }
> @@ -2592,7 +2589,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev
> *dev, struct rte_eth_pfc_conf *p
> ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
> tc_num = map[pfc_conf->priority];
> rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
> - PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n",
> rx_buf_size);
> + PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
> /*
> * At least reserve one Ethernet frame for watermark
> * high_water/low_water in kilo bytes for ixgbe
> @@ -2600,8 +2597,8 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev
> *dev, struct rte_eth_pfc_conf *p
> max_high_water = (rx_buf_size - ETHER_MAX_LEN) >>
> IXGBE_RXPBSIZE_SHIFT;
> if ((pfc_conf->fc.high_water > max_high_water) ||
> (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
> - PMD_INIT_LOG(ERR, "Invalid high/low water setup value in
> KB\n");
> - PMD_INIT_LOG(ERR, "High_water must <= 0x%x\n",
> max_high_water);
> + PMD_INIT_LOG(ERR, "Invalid high/low water setup value in
> KB");
> + PMD_INIT_LOG(ERR, "High_water must <= 0x%x",
> max_high_water);
> return (-EINVAL);
> }
>
> @@ -2617,7 +2614,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev
> *dev, struct rte_eth_pfc_conf *p
> if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
> return 0;
>
> - PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x \n", err);
> + PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
> return -EIO;
> }
>
> @@ -2764,7 +2761,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t
> mtu)
> static void
> ixgbevf_intr_disable(struct ixgbe_hw *hw)
> {
> - PMD_INIT_LOG(DEBUG, "ixgbevf_intr_disable");
> + PMD_INIT_FUNC_TRACE();
>
> /* Clear interrupt mask to stop from interrupts being generated */
> IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
> @@ -2777,8 +2774,8 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
> {
> struct rte_eth_conf* conf = &dev->data->dev_conf;
>
> - PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n",
> - dev->data->port_id);
> + PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
> + dev->data->port_id);
>
> /*
> * VF has no ability to enable/disable HW CRC
> @@ -2786,12 +2783,12 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
> */
> #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
> if (!conf->rxmode.hw_strip_crc) {
> - PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n");
> + PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
> conf->rxmode.hw_strip_crc = 1;
> }
> #else
> if (conf->rxmode.hw_strip_crc) {
> - PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip\n");
> + PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
> conf->rxmode.hw_strip_crc = 0;
> }
> #endif
> @@ -2806,7 +2803,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
> IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> int err, mask = 0;
>
> - PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start");
> + PMD_INIT_FUNC_TRACE();
>
> hw->mac.ops.reset_hw(hw);
>
> @@ -2818,7 +2815,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
> /* This can fail when allocating mbufs for descriptor rings */
> err = ixgbevf_dev_rx_init(dev);
> if (err) {
> - PMD_INIT_LOG(ERR, "Unable to initialize RX hardware
> (%d)\n", err);
> + PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)",
> err);
> ixgbe_dev_clear_queues(dev);
> return err;
> }
> @@ -2841,7 +2838,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
> {
> struct ixgbe_hw *hw =
> IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
>
> - PMD_INIT_LOG(DEBUG, "ixgbevf_dev_stop");
> + PMD_INIT_FUNC_TRACE();
>
> hw->adapter_stopped = TRUE;
> ixgbe_stop_adapter(hw);
> @@ -2860,7 +2857,7 @@ ixgbevf_dev_close(struct rte_eth_dev *dev)
> {
> struct ixgbe_hw *hw =
> IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
>
> - PMD_INIT_LOG(DEBUG, "ixgbevf_dev_close");
> + PMD_INIT_FUNC_TRACE();
>
> ixgbe_reset_hw(hw);
>
> @@ -2969,7 +2966,7 @@ ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
> /* we only need to do this if VMDq is enabled */
> reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
> if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
> - PMD_INIT_LOG(ERR, "VMDq must be enabled for this
> setting\n");
> + PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
> return (-1);
> }
>
> @@ -3098,7 +3095,7 @@ ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev,
> uint16_t pool,
>
> if (hw->mac.type == ixgbe_mac_82598EB) {
> PMD_INIT_LOG(ERR, "setting VF receive mode set should be
> done"
> - " on 82599 hardware and newer\n");
> + " on 82599 hardware and newer");
> return (-ENOTSUP);
> }
> if (ixgbe_vmdq_mode_check(hw) < 0)
> @@ -3513,8 +3510,7 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev,
> uint32_t index)
> continue;
> diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
> if (diag != 0)
> - PMD_DRV_LOG(ERR,
> - "Adding again MAC address "
> + PMD_DRV_LOG(ERR, "Adding again MAC address "
> "%02x:%02x:%02x:%02x:%02x:%02x failed "
> "diag=%d",
> mac_addr->addr_bytes[0],
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_fdir.c
> b/lib/librte_pmd_ixgbe/ixgbe_fdir.c
> index 6c0a530..8819aac 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_fdir.c
> +++ b/lib/librte_pmd_ixgbe/ixgbe_fdir.c
> @@ -112,7 +112,7 @@ static void fdir_enable_82599(struct ixgbe_hw *hw, u32
> fdirctrl)
> }
>
> if (i >= IXGBE_FDIR_INIT_DONE_POLL)
> - PMD_INIT_LOG(WARNING, "Flow Director poll time
> exceeded!\n");
> + PMD_INIT_LOG(WARNING, "Flow Director poll time exceeded!");
> }
>
> /*
> @@ -381,7 +381,7 @@ fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
> fdirhashcmd |= fdirhash;
> IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
>
> - PMD_INIT_LOG(DEBUG, "Tx Queue=%x hash=%x\n", queue,
> (u32)fdirhashcmd);
> + PMD_INIT_LOG(DEBUG, "Tx Queue=%x hash=%x", queue,
> (u32)fdirhashcmd);
> }
>
> /*
> @@ -614,7 +614,7 @@ fdir_set_input_mask_82599(struct ixgbe_hw *hw,
> /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
> fdirm |= IXGBE_FDIRM_L4P;
> if (input_mask->dst_port_mask ||
> input_mask->src_port_mask) {
> - PMD_INIT_LOG(ERR, " Error on src/dst port mask\n");
> + PMD_INIT_LOG(ERR, " Error on src/dst port mask");
> return -EINVAL;
> }
> }
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> index 46962bc..981df60 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> @@ -490,8 +490,7 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
> desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
> if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD))
> {
> - PMD_TX_FREE_LOG(DEBUG,
> - "TX descriptor %4u is not done"
> + PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done"
> "(port=%d queue=%d)",
> desc_to_clean_to,
> txq->port_id, txq->queue_id);
> @@ -507,8 +506,7 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
> nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
> last_desc_cleaned);
>
> - PMD_TX_FREE_LOG(DEBUG,
> - "Cleaning %4u TX descriptors: %4u to %4u "
> + PMD_TX_FREE_LOG(DEBUG, "Cleaning %4u TX descriptors: %4u to %4u "
> "(port=%d queue=%d)",
> nb_tx_to_clean, last_desc_cleaned,
> desc_to_clean_to,
> txq->port_id, txq->queue_id);
> @@ -614,7 +612,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts,
> tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
>
> PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
> - " tx_first=%u tx_last=%u\n",
> + " tx_first=%u tx_last=%u",
> (unsigned) txq->port_id,
> (unsigned) txq->queue_id,
> (unsigned) pkt_len,
> @@ -627,8 +625,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts,
> * nb_used better be less than or equal to
> txq->tx_rs_thresh
> */
> if (nb_used > txq->nb_tx_free) {
> - PMD_TX_FREE_LOG(DEBUG,
> - "Not enough free TX descriptors "
> + PMD_TX_FREE_LOG(DEBUG, "Not enough free TX
> descriptors "
> "nb_used=%4u nb_free=%4u "
> "(port=%d queue=%d)",
> nb_used, txq->nb_tx_free,
> @@ -1066,7 +1063,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf
> **rx_pkts,
> if (ixgbe_rx_alloc_bufs(rxq) != 0) {
> int i, j;
> PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u
> "
> - "queue_id=%u\n", (unsigned)
> rxq->port_id,
> + "queue_id=%u", (unsigned) rxq->port_id,
> (unsigned) rxq->queue_id);
>
>
> rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
> @@ -1193,7 +1190,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf
> **rx_pkts,
> * frames to its peer(s).
> */
> PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
> - "ext_err_stat=0x%08x pkt_len=%u\n",
> + "ext_err_stat=0x%08x pkt_len=%u",
> (unsigned) rxq->port_id, (unsigned)
> rxq->queue_id,
> (unsigned) rx_id, (unsigned) staterr,
> (unsigned)
> rte_le_to_cpu_16(rxd.wb.upper.length));
> @@ -1201,7 +1198,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf
> **rx_pkts,
> nmb = rte_rxmbuf_alloc(rxq->mb_pool);
> if (nmb == NULL) {
> PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u
> "
> - "queue_id=%u\n", (unsigned)
> rxq->port_id,
> + "queue_id=%u", (unsigned) rxq->port_id,
> (unsigned) rxq->queue_id);
>
> rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
> break;
> @@ -1296,7 +1293,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf
> **rx_pkts,
> nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
> if (nb_hold > rxq->rx_free_thresh) {
> PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
> - "nb_hold=%u nb_rx=%u\n",
> + "nb_hold=%u nb_rx=%u",
> (unsigned) rxq->port_id, (unsigned)
> rxq->queue_id,
> (unsigned) rx_id, (unsigned) nb_hold,
> (unsigned) nb_rx);
> @@ -1383,8 +1380,8 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct
> rte_mbuf **rx_pkts,
> * to happen by sending specific "back-pressure" flow
> control
> * frames to its peer(s).
> */
> - PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
> - "staterr=0x%x data_len=%u\n",
> + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
> + "staterr=0x%x data_len=%u",
> (unsigned) rxq->port_id, (unsigned)
> rxq->queue_id,
> (unsigned) rx_id, (unsigned) staterr,
> (unsigned)
> rte_le_to_cpu_16(rxd.wb.upper.length));
> @@ -1392,7 +1389,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct
> rte_mbuf **rx_pkts,
> nmb = rte_rxmbuf_alloc(rxq->mb_pool);
> if (nmb == NULL) {
> PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u
> "
> - "queue_id=%u\n", (unsigned)
> rxq->port_id,
> + "queue_id=%u", (unsigned) rxq->port_id,
> (unsigned) rxq->queue_id);
>
> rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
> break;
> @@ -1561,7 +1558,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct
> rte_mbuf **rx_pkts,
> nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
> if (nb_hold > rxq->rx_free_thresh) {
> PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
> - "nb_hold=%u nb_rx=%u\n",
> + "nb_hold=%u nb_rx=%u",
> (unsigned) rxq->port_id, (unsigned)
> rxq->queue_id,
> (unsigned) rx_id, (unsigned) nb_hold,
> (unsigned) nb_rx);
> @@ -1767,7 +1764,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
> if (tx_rs_thresh >= (nb_desc - 2)) {
> PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the
> number "
> "of TX descriptors minus 2. (tx_rs_thresh=%u "
> - "port=%d queue=%d)\n", (unsigned
> int)tx_rs_thresh,
> + "port=%d queue=%d)", (unsigned
> int)tx_rs_thresh,
> (int)dev->data->port_id, (int)queue_idx);
> return -(EINVAL);
> }
> @@ -1775,7 +1772,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
> PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
> "tx_free_thresh must be less than the number
> of "
> "TX descriptors minus 3. (tx_free_thresh=%u "
> - "port=%d queue=%d)\n",
> + "port=%d queue=%d)",
> (unsigned int)tx_free_thresh,
> (int)dev->data->port_id, (int)queue_idx);
> return -(EINVAL);
> @@ -1783,7 +1780,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
> if (tx_rs_thresh > tx_free_thresh) {
> PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal
> to "
> "tx_free_thresh. (tx_free_thresh=%u "
> - "tx_rs_thresh=%u port=%d queue=%d)\n",
> + "tx_rs_thresh=%u port=%d queue=%d)",
> (unsigned int)tx_free_thresh,
> (unsigned int)tx_rs_thresh,
> (int)dev->data->port_id,
> @@ -1793,7 +1790,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
> if ((nb_desc % tx_rs_thresh) != 0) {
> PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
> "number of TX descriptors. (tx_rs_thresh=%u "
> - "port=%d queue=%d)\n", (unsigned
> int)tx_rs_thresh,
> + "port=%d queue=%d)", (unsigned
> int)tx_rs_thresh,
> (int)dev->data->port_id, (int)queue_idx);
> return -(EINVAL);
> }
> @@ -1807,7 +1804,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
> if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
> PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
> "tx_rs_thresh is greater than 1.
> (tx_rs_thresh=%u "
> - "port=%d queue=%d)\n", (unsigned
> int)tx_rs_thresh,
> + "port=%d queue=%d)", (unsigned
> int)tx_rs_thresh,
> (int)dev->data->port_id, (int)queue_idx);
> return -(EINVAL);
> }
> @@ -1873,26 +1870,32 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
> ixgbe_tx_queue_release(txq);
> return (-ENOMEM);
> }
> - PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
> + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
> txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
>
> /* Use a simple Tx queue (no offloads, no multi segs) if possible
> */
> if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
> &&
> (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
> - PMD_INIT_LOG(INFO, "Using simple tx code path\n");
> + PMD_INIT_LOG(INFO, "Using simple tx code path");
> #ifdef RTE_IXGBE_INC_VECTOR
> if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
> ixgbe_txq_vec_setup(txq, socket_id) == 0) {
> - PMD_INIT_LOG(INFO, "Vector tx enabled.\n");
> + PMD_INIT_LOG(INFO, "Vector tx enabled.");
> dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
> }
> else
> #endif
> dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
> } else {
> - PMD_INIT_LOG(INFO, "Using full-featured tx code path\n");
> - PMD_INIT_LOG(INFO, " - txq_flags = %lx
> [IXGBE_SIMPLE_FLAGS=%lx]\n", (long unsigned)txq->txq_flags, (long
> unsigned)IXGBE_SIMPLE_FLAGS);
> - PMD_INIT_LOG(INFO, " - tx_rs_thresh = %lu
> [RTE_PMD_IXGBE_TX_MAX_BURST=%lu]\n", (long unsigned)txq->tx_rs_thresh,
> (long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST);
> + PMD_INIT_LOG(INFO, "Using full-featured tx code path");
> + PMD_INIT_LOG(INFO, " - txq_flags = %lx "
> + "[IXGBE_SIMPLE_FLAGS=%lx]",
> + (long unsigned)txq->txq_flags,
> + (long unsigned)IXGBE_SIMPLE_FLAGS);
> + PMD_INIT_LOG(INFO, " - tx_rs_thresh = %lu "
> + "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
> + (long unsigned)txq->tx_rs_thresh,
> + (long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST);
> dev->tx_pkt_burst = ixgbe_xmit_pkts;
> }
>
> @@ -2152,7 +2155,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
> ixgbe_rx_queue_release(rxq);
> return (-ENOMEM);
> }
> - PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
> + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
> rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
>
> /*
> @@ -2166,13 +2169,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
> #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
> PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are
> "
> "satisfied. Rx Burst Bulk Alloc function will
> be "
> - "used on port=%d, queue=%d.\n",
> + "used on port=%d, queue=%d.",
> rxq->port_id, rxq->queue_id);
> dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
> #ifdef RTE_IXGBE_INC_VECTOR
> if (!ixgbe_rx_vec_condition_check(dev)) {
> PMD_INIT_LOG(INFO, "Vector rx enabled, please make
> "
> - "sure RX burst size no less than
> 32.\n");
> + "sure RX burst size no less than
> 32.");
> ixgbe_rxq_vec_setup(rxq, socket_id);
> dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
> }
> @@ -2182,7 +2185,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
> PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions "
> "are not satisfied, Scattered Rx is
> requested, "
> "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC is
> not "
> - "enabled (port=%d, queue=%d).\n",
> + "enabled (port=%d, queue=%d).",
> rxq->port_id, rxq->queue_id);
> }
> dev->data->rx_queues[queue_idx] = rxq;
> @@ -2201,7 +2204,7 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
> uint16_t rx_queue_id)
> uint32_t desc = 0;
>
> if (rx_queue_id >= dev->data->nb_rx_queues) {
> - PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id);
> + PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
> return 0;
> }
>
> @@ -2917,7 +2920,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
> ixgbe_dcb_rx_hw_config(hw, dcb_config);
> break;
> default:
> - PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration\n");
> + PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
> break;
> }
> switch (dev->data->dev_conf.txmode.mq_mode) {
> @@ -2939,7 +2942,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
> ixgbe_dcb_tx_hw_config(hw, dcb_config);
> break;
> default:
> - PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration\n");
> + PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
> break;
> }
>
> @@ -3210,7 +3213,7 @@ ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
> volatile union ixgbe_adv_rx_desc *rxd;
> struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
> if (mbuf == NULL) {
> - PMD_INIT_LOG(ERR, "RX mbuf alloc failed
> queue_id=%u\n",
> + PMD_INIT_LOG(ERR, "RX mbuf alloc failed
> queue_id=%u",
> (unsigned) rxq->queue_id);
> return (-ENOMEM);
> }
> @@ -3282,7 +3285,7 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
> IXGBE_WRITE_REG(hw, IXGBE_MRQC,
> IXGBE_MRQC_VMDQRT8TCEN);
> break;
> default:
> - PMD_INIT_LOG(ERR, "invalid pool number in IOV
> mode\n");
> + PMD_INIT_LOG(ERR, "invalid pool number in IOV
> mode");
> }
> }
>
> @@ -3335,7 +3338,7 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
> break;
> default:
> mtqc = IXGBE_MTQC_64Q_1PB;
> - PMD_INIT_LOG(ERR, "invalid pool number in IOV
> mode\n");
> + PMD_INIT_LOG(ERR, "invalid pool number in IOV
> mode");
> }
> IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
> }
> @@ -3603,7 +3606,7 @@ ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
> if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
> if (hw->mac.ops.acquire_swfw_sync(hw,
> IXGBE_GSSR_MAC_CSR_SM) !=
> IXGBE_SUCCESS) {
> - PMD_INIT_LOG(ERR, "Could not enable loopback
> mode\n");
> + PMD_INIT_LOG(ERR, "Could not enable loopback
> mode");
> /* ignore error */
> return;
> }
> @@ -3699,7 +3702,7 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev,
> uint16_t rx_queue_id)
> /* Allocate buffers for descriptor rings */
> if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
> PMD_INIT_LOG(ERR,
> - "Could not alloc mbuf for queue:%d\n",
> + "Could not alloc mbuf for queue:%d",
> rx_queue_id);
> return -1;
> }
> @@ -3715,7 +3718,7 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev,
> uint16_t rx_queue_id)
> } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
> if (!poll_ms)
> PMD_INIT_LOG(ERR, "Could not enable "
> - "Rx Queue %d\n", rx_queue_id);
> + "Rx Queue %d", rx_queue_id);
> rte_wmb();
> IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
> IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx),
> rxq->nb_rx_desc - 1);
> @@ -3754,7 +3757,7 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev,
> uint16_t rx_queue_id)
> } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
> if (!poll_ms)
> PMD_INIT_LOG(ERR, "Could not disable "
> - "Rx Queue %d\n", rx_queue_id);
> + "Rx Queue %d", rx_queue_id);
>
> rte_delay_us(RTE_IXGBE_WAIT_100_US);
>
> @@ -3797,7 +3800,7 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev,
> uint16_t tx_queue_id)
> } while (--poll_ms && !(txdctl &
> IXGBE_TXDCTL_ENABLE));
> if (!poll_ms)
> PMD_INIT_LOG(ERR, "Could not enable "
> - "Tx Queue %d\n", tx_queue_id);
> + "Tx Queue %d", tx_queue_id);
> }
> rte_wmb();
> IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
> @@ -3838,7 +3841,7 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev,
> uint16_t tx_queue_id)
> } while (--poll_ms && (txtdh != txtdt));
> if (!poll_ms)
> PMD_INIT_LOG(ERR,
> - "Tx Queue %d is not empty when
> stopping.\n",
> + "Tx Queue %d is not empty when stopping.",
> tx_queue_id);
> }
>
> @@ -3856,7 +3859,7 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev,
> uint16_t tx_queue_id)
> } while (--poll_ms && (txdctl |
> IXGBE_TXDCTL_ENABLE));
> if (!poll_ms)
> PMD_INIT_LOG(ERR, "Could not disable "
> - "Tx Queue %d\n", tx_queue_id);
> + "Tx Queue %d", tx_queue_id);
> }
>
> if (txq->ops != NULL) {
> @@ -4073,7 +4076,7 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
> } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
> if (!poll_ms)
> PMD_INIT_LOG(ERR, "Could not enable "
> - "Tx Queue %d\n", i);
> + "Tx Queue %d", i);
> }
> for (i = 0; i < dev->data->nb_rx_queues; i++) {
>
> @@ -4091,7 +4094,7 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
> } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
> if (!poll_ms)
> PMD_INIT_LOG(ERR, "Could not enable "
> - "Rx Queue %d\n", i);
> + "Rx Queue %d", i);
> rte_wmb();
> IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
>
> --
> 1.7.10.4
>
>
Reviewed-By: Jay Rolette<rolette@infiniteio.com>
@@ -547,12 +547,12 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
if ((hw->mac.type != ixgbe_mac_82599EB) && (hw->mac.type != ixgbe_mac_X540))
return -ENOSYS;
- PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d\n",
+ PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d",
(int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx);
n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
- PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded\n");
+ PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
return -EIO;
}
offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
@@ -572,19 +572,20 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
else
stat_mappings->rqsmr[n] |= qsmr_mask;
- PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d\n"
- "%s[%d] = 0x%08x\n",
- (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx,
- is_rx ? "RQSMR" : "TQSM",n, is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
+ PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d",
+ (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+ queue_id, stat_idx);
+ PMD_INIT_LOG(INFO, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
+ is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
/* Now write the mapping in the appropriate register */
if (is_rx) {
- PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d\n",
+ PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d",
stat_mappings->rqsmr[n], n);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
}
else {
- PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d\n",
+ PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d",
stat_mappings->tqsm[n], n);
IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
}
@@ -790,12 +791,13 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
if (diag == IXGBE_ERR_EEPROM_VERSION) {
PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
- "LOM. Please be aware there may be issues associated "
- "with your hardware.\n If you are experiencing problems "
+ "LOM. Please be aware there may be issues associated "
+ "with your hardware.");
+ PMD_INIT_LOG(ERR, "If you are experiencing problems "
"please contact your Intel or hardware representative "
- "who provided you with this hardware.\n");
+ "who provided you with this hardware.");
} else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
- PMD_INIT_LOG(ERR, "Unsupported SFP+ Module\n");
+ PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
if (diag) {
PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
return -EIO;
@@ -811,10 +813,9 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
- PMD_INIT_LOG(ERR,
- "Failed to allocate %u bytes needed to store "
- "MAC addresses",
- ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ PMD_INIT_LOG(ERR, "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
+ ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
/* Copy the permanent MAC address */
@@ -825,9 +826,9 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
IXGBE_VMDQ_NUM_UC_MAC, 0);
if (eth_dev->data->hash_mac_addrs == NULL) {
- PMD_INIT_LOG(ERR,
- "Failed to allocate %d bytes needed to store MAC addresses",
- ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
+ PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to store "
+ "MAC addresses",
+ ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
return -ENOMEM;
}
@@ -849,12 +850,11 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
IXGBE_WRITE_FLUSH(hw);
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
- PMD_INIT_LOG(DEBUG,
- "MAC: %d, PHY: %d, SFP+: %d<n",
+ PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
(int) hw->mac.type, (int) hw->phy.type,
(int) hw->phy.sfp_type);
else
- PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d\n",
+ PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
(int) hw->mac.type, (int) hw->phy.type);
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
@@ -933,7 +933,7 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
- PMD_INIT_LOG(DEBUG, "eth_ixgbevf_dev_init");
+ PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
@@ -963,7 +963,8 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
/* Initialize the shared code (base driver) */
diag = ixgbe_init_shared_code(hw);
if (diag != IXGBE_SUCCESS) {
- PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
+ PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d",
+ diag);
return -EIO;
}
@@ -996,10 +997,9 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
- PMD_INIT_LOG(ERR,
- "Failed to allocate %u bytes needed to store "
- "MAC addresses",
- ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ PMD_INIT_LOG(ERR, "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
+ ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
@@ -1033,13 +1033,14 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
break;
default:
- PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
+ PMD_INIT_LOG(ERR, "VF Initialization Failure: %d",
+ diag);
return (-EIO);
}
- PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n",
- eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id,
- "ixgbe_mac_82599_vf");
+ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id, "ixgbe_mac_82599_vf");
return 0;
}
@@ -1416,8 +1417,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
/* IXGBE devices don't support half duplex */
if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
- (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
- PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu\n",
+ (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
+ PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
dev->data->dev_conf.link_duplex,
dev->data->port_id);
return -EINVAL;
@@ -1443,7 +1444,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
/* This can fail when allocating mbufs for descriptor rings */
err = ixgbe_dev_rx_init(dev);
if (err) {
- PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n");
+ PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
goto error;
}
@@ -1490,9 +1491,9 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
speed = IXGBE_LINK_SPEED_10GB_FULL;
break;
default:
- PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu\n",
- dev->data->dev_conf.link_speed,
- dev->data->port_id);
+ PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu",
+ dev->data->dev_conf.link_speed,
+ dev->data->port_id);
goto error;
}
@@ -1598,10 +1599,8 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
#ifdef RTE_NIC_BYPASS
if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
/* Not suported in bypass mode */
- PMD_INIT_LOG(ERR,
- "\nSet link up is not supported "
- "by device id 0x%x\n",
- hw->device_id);
+ PMD_INIT_LOG(ERR, "Set link up is not supported "
+ "by device id 0x%x", hw->device_id);
return -ENOTSUP;
}
#endif
@@ -1610,8 +1609,8 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
return 0;
}
- PMD_INIT_LOG(ERR, "\nSet link up is not supported by device id 0x%x\n",
- hw->device_id);
+ PMD_INIT_LOG(ERR, "Set link up is not supported by device id 0x%x",
+ hw->device_id);
return -ENOTSUP;
}
@@ -1627,10 +1626,8 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
#ifdef RTE_NIC_BYPASS
if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
/* Not suported in bypass mode */
- PMD_INIT_LOG(ERR,
- "\nSet link down is not supported "
- "by device id 0x%x\n",
- hw->device_id);
+ PMD_INIT_LOG(ERR, "Set link down is not supported "
+ "by device id 0x%x", hw->device_id);
return -ENOTSUP;
}
#endif
@@ -1639,9 +1636,8 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
return 0;
}
- PMD_INIT_LOG(ERR,
- "\nSet link down is not supported by device id 0x%x\n",
- hw->device_id);
+ PMD_INIT_LOG(ERR, "Set link down is not supported by device id 0x%x",
+ hw->device_id);
return -ENOTSUP;
}
@@ -2179,7 +2175,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
struct rte_eth_link link;
int intr_enable_delay = false;
- PMD_DRV_LOG(DEBUG, "intr action type %d\n", intr->flags);
+ PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
if (intr->flags & IXGBE_FLAG_MAILBOX) {
ixgbe_pf_mbx_process(dev);
@@ -2209,7 +2205,8 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
if (intr_enable_delay) {
if (rte_eal_alarm_set(timeout * 1000,
- ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
+ ixgbe_dev_interrupt_delayed_handler,
+ (void *)dev) < 0)
PMD_DRV_LOG(ERR, "Error setting alarm");
} else {
PMD_DRV_LOG(DEBUG, "enable intr immediately");
@@ -2256,7 +2253,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
}
- PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]\n", eicr);
+ PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
ixgbe_enable_intr(dev);
rte_intr_enable(&(dev->pci_dev->intr_handle));
}
@@ -2370,7 +2367,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
if (fc_conf->autoneg != !hw->fc.disable_fc_autoneg)
return -ENOTSUP;
rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
- PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+ PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
/*
* At least reserve one Ethernet frame for watermark
@@ -2379,8 +2376,8 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
if ((fc_conf->high_water > max_high_water) ||
(fc_conf->high_water < fc_conf->low_water)) {
- PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
- PMD_INIT_LOG(ERR, "High_water must <= 0x%x\n", max_high_water);
+ PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+ PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
return (-EINVAL);
}
@@ -2412,7 +2409,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
return 0;
}
- PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x \n", err);
+ PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
return -EIO;
}
@@ -2442,13 +2439,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
/* High/Low water can not be 0 */
if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
- PMD_INIT_LOG(ERR,"Invalid water mark configuration\n");
+ PMD_INIT_LOG(ERR, "Invalid water mark configuration");
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
- PMD_INIT_LOG(ERR,"Invalid water mark configuration\n");
+ PMD_INIT_LOG(ERR, "Invalid water mark configuration");
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
@@ -2592,7 +2589,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
tc_num = map[pfc_conf->priority];
rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
- PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+ PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
/*
* At least reserve one Ethernet frame for watermark
* high_water/low_water in kilo bytes for ixgbe
@@ -2600,8 +2597,8 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
if ((pfc_conf->fc.high_water > max_high_water) ||
(pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
- PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
- PMD_INIT_LOG(ERR, "High_water must <= 0x%x\n", max_high_water);
+ PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+ PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
return (-EINVAL);
}
@@ -2617,7 +2614,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
return 0;
- PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x \n", err);
+ PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
return -EIO;
}
@@ -2764,7 +2761,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
static void
ixgbevf_intr_disable(struct ixgbe_hw *hw)
{
- PMD_INIT_LOG(DEBUG, "ixgbevf_intr_disable");
+ PMD_INIT_FUNC_TRACE();
/* Clear interrupt mask to stop from interrupts being generated */
IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
@@ -2777,8 +2774,8 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
{
struct rte_eth_conf* conf = &dev->data->dev_conf;
- PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n",
- dev->data->port_id);
+ PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
+ dev->data->port_id);
/*
* VF has no ability to enable/disable HW CRC
@@ -2786,12 +2783,12 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
*/
#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
if (!conf->rxmode.hw_strip_crc) {
- PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n");
+ PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
conf->rxmode.hw_strip_crc = 1;
}
#else
if (conf->rxmode.hw_strip_crc) {
- PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip\n");
+ PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
conf->rxmode.hw_strip_crc = 0;
}
#endif
@@ -2806,7 +2803,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int err, mask = 0;
- PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start");
+ PMD_INIT_FUNC_TRACE();
hw->mac.ops.reset_hw(hw);
@@ -2818,7 +2815,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
/* This can fail when allocating mbufs for descriptor rings */
err = ixgbevf_dev_rx_init(dev);
if (err) {
- PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)\n", err);
+ PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
ixgbe_dev_clear_queues(dev);
return err;
}
@@ -2841,7 +2838,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- PMD_INIT_LOG(DEBUG, "ixgbevf_dev_stop");
+ PMD_INIT_FUNC_TRACE();
hw->adapter_stopped = TRUE;
ixgbe_stop_adapter(hw);
@@ -2860,7 +2857,7 @@ ixgbevf_dev_close(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- PMD_INIT_LOG(DEBUG, "ixgbevf_dev_close");
+ PMD_INIT_FUNC_TRACE();
ixgbe_reset_hw(hw);
@@ -2969,7 +2966,7 @@ ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
/* we only need to do this if VMDq is enabled */
reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
- PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting\n");
+ PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
return (-1);
}
@@ -3098,7 +3095,7 @@ ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
if (hw->mac.type == ixgbe_mac_82598EB) {
PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
- " on 82599 hardware and newer\n");
+ " on 82599 hardware and newer");
return (-ENOTSUP);
}
if (ixgbe_vmdq_mode_check(hw) < 0)
@@ -3513,8 +3510,7 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
continue;
diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
if (diag != 0)
- PMD_DRV_LOG(ERR,
- "Adding again MAC address "
+ PMD_DRV_LOG(ERR, "Adding again MAC address "
"%02x:%02x:%02x:%02x:%02x:%02x failed "
"diag=%d",
mac_addr->addr_bytes[0],
@@ -112,7 +112,7 @@ static void fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
}
if (i >= IXGBE_FDIR_INIT_DONE_POLL)
- PMD_INIT_LOG(WARNING, "Flow Director poll time exceeded!\n");
+ PMD_INIT_LOG(WARNING, "Flow Director poll time exceeded!");
}
/*
@@ -381,7 +381,7 @@ fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
fdirhashcmd |= fdirhash;
IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
- PMD_INIT_LOG(DEBUG, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
+ PMD_INIT_LOG(DEBUG, "Tx Queue=%x hash=%x", queue, (u32)fdirhashcmd);
}
/*
@@ -614,7 +614,7 @@ fdir_set_input_mask_82599(struct ixgbe_hw *hw,
/* use the L4 protocol mask for raw IPv4/IPv6 traffic */
fdirm |= IXGBE_FDIRM_L4P;
if (input_mask->dst_port_mask || input_mask->src_port_mask) {
- PMD_INIT_LOG(ERR, " Error on src/dst port mask\n");
+ PMD_INIT_LOG(ERR, " Error on src/dst port mask");
return -EINVAL;
}
}
@@ -490,8 +490,7 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD))
{
- PMD_TX_FREE_LOG(DEBUG,
- "TX descriptor %4u is not done"
+ PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done"
"(port=%d queue=%d)",
desc_to_clean_to,
txq->port_id, txq->queue_id);
@@ -507,8 +506,7 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
last_desc_cleaned);
- PMD_TX_FREE_LOG(DEBUG,
- "Cleaning %4u TX descriptors: %4u to %4u "
+ PMD_TX_FREE_LOG(DEBUG, "Cleaning %4u TX descriptors: %4u to %4u "
"(port=%d queue=%d)",
nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
txq->port_id, txq->queue_id);
@@ -614,7 +612,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
- " tx_first=%u tx_last=%u\n",
+ " tx_first=%u tx_last=%u",
(unsigned) txq->port_id,
(unsigned) txq->queue_id,
(unsigned) pkt_len,
@@ -627,8 +625,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* nb_used better be less than or equal to txq->tx_rs_thresh
*/
if (nb_used > txq->nb_tx_free) {
- PMD_TX_FREE_LOG(DEBUG,
- "Not enough free TX descriptors "
+ PMD_TX_FREE_LOG(DEBUG, "Not enough free TX descriptors "
"nb_used=%4u nb_free=%4u "
"(port=%d queue=%d)",
nb_used, txq->nb_tx_free,
@@ -1066,7 +1063,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
if (ixgbe_rx_alloc_bufs(rxq) != 0) {
int i, j;
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
- "queue_id=%u\n", (unsigned) rxq->port_id,
+ "queue_id=%u", (unsigned) rxq->port_id,
(unsigned) rxq->queue_id);
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
@@ -1193,7 +1190,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* frames to its peer(s).
*/
PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
- "ext_err_stat=0x%08x pkt_len=%u\n",
+ "ext_err_stat=0x%08x pkt_len=%u",
(unsigned) rxq->port_id, (unsigned) rxq->queue_id,
(unsigned) rx_id, (unsigned) staterr,
(unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
@@ -1201,7 +1198,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
nmb = rte_rxmbuf_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
- "queue_id=%u\n", (unsigned) rxq->port_id,
+ "queue_id=%u", (unsigned) rxq->port_id,
(unsigned) rxq->queue_id);
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
break;
@@ -1296,7 +1293,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
if (nb_hold > rxq->rx_free_thresh) {
PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
- "nb_hold=%u nb_rx=%u\n",
+ "nb_hold=%u nb_rx=%u",
(unsigned) rxq->port_id, (unsigned) rxq->queue_id,
(unsigned) rx_id, (unsigned) nb_hold,
(unsigned) nb_rx);
@@ -1383,8 +1380,8 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* to happen by sending specific "back-pressure" flow control
* frames to its peer(s).
*/
- PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
- "staterr=0x%x data_len=%u\n",
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "staterr=0x%x data_len=%u",
(unsigned) rxq->port_id, (unsigned) rxq->queue_id,
(unsigned) rx_id, (unsigned) staterr,
(unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
@@ -1392,7 +1389,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
nmb = rte_rxmbuf_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
- "queue_id=%u\n", (unsigned) rxq->port_id,
+ "queue_id=%u", (unsigned) rxq->port_id,
(unsigned) rxq->queue_id);
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
break;
@@ -1561,7 +1558,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
if (nb_hold > rxq->rx_free_thresh) {
PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
- "nb_hold=%u nb_rx=%u\n",
+ "nb_hold=%u nb_rx=%u",
(unsigned) rxq->port_id, (unsigned) rxq->queue_id,
(unsigned) rx_id, (unsigned) nb_hold,
(unsigned) nb_rx);
@@ -1767,7 +1764,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
if (tx_rs_thresh >= (nb_desc - 2)) {
PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
"of TX descriptors minus 2. (tx_rs_thresh=%u "
- "port=%d queue=%d)\n", (unsigned int)tx_rs_thresh,
+ "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
(int)dev->data->port_id, (int)queue_idx);
return -(EINVAL);
}
@@ -1775,7 +1772,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
"tx_free_thresh must be less than the number of "
"TX descriptors minus 3. (tx_free_thresh=%u "
- "port=%d queue=%d)\n",
+ "port=%d queue=%d)",
(unsigned int)tx_free_thresh,
(int)dev->data->port_id, (int)queue_idx);
return -(EINVAL);
@@ -1783,7 +1780,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
if (tx_rs_thresh > tx_free_thresh) {
PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
"tx_free_thresh. (tx_free_thresh=%u "
- "tx_rs_thresh=%u port=%d queue=%d)\n",
+ "tx_rs_thresh=%u port=%d queue=%d)",
(unsigned int)tx_free_thresh,
(unsigned int)tx_rs_thresh,
(int)dev->data->port_id,
@@ -1793,7 +1790,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
if ((nb_desc % tx_rs_thresh) != 0) {
PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
"number of TX descriptors. (tx_rs_thresh=%u "
- "port=%d queue=%d)\n", (unsigned int)tx_rs_thresh,
+ "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
(int)dev->data->port_id, (int)queue_idx);
return -(EINVAL);
}
@@ -1807,7 +1804,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
"tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
- "port=%d queue=%d)\n", (unsigned int)tx_rs_thresh,
+ "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
(int)dev->data->port_id, (int)queue_idx);
return -(EINVAL);
}
@@ -1873,26 +1870,32 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
ixgbe_tx_queue_release(txq);
return (-ENOMEM);
}
- PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
(txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
- PMD_INIT_LOG(INFO, "Using simple tx code path\n");
+ PMD_INIT_LOG(INFO, "Using simple tx code path");
#ifdef RTE_IXGBE_INC_VECTOR
if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
ixgbe_txq_vec_setup(txq, socket_id) == 0) {
- PMD_INIT_LOG(INFO, "Vector tx enabled.\n");
+ PMD_INIT_LOG(INFO, "Vector tx enabled.");
dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
}
else
#endif
dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
} else {
- PMD_INIT_LOG(INFO, "Using full-featured tx code path\n");
- PMD_INIT_LOG(INFO, " - txq_flags = %lx [IXGBE_SIMPLE_FLAGS=%lx]\n", (long unsigned)txq->txq_flags, (long unsigned)IXGBE_SIMPLE_FLAGS);
- PMD_INIT_LOG(INFO, " - tx_rs_thresh = %lu [RTE_PMD_IXGBE_TX_MAX_BURST=%lu]\n", (long unsigned)txq->tx_rs_thresh, (long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST);
+ PMD_INIT_LOG(INFO, "Using full-featured tx code path");
+ PMD_INIT_LOG(INFO, " - txq_flags = %lx "
+ "[IXGBE_SIMPLE_FLAGS=%lx]",
+ (long unsigned)txq->txq_flags,
+ (long unsigned)IXGBE_SIMPLE_FLAGS);
+ PMD_INIT_LOG(INFO, " - tx_rs_thresh = %lu "
+ "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
+ (long unsigned)txq->tx_rs_thresh,
+ (long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST);
dev->tx_pkt_burst = ixgbe_xmit_pkts;
}
@@ -2152,7 +2155,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
ixgbe_rx_queue_release(rxq);
return (-ENOMEM);
}
- PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
/*
@@ -2166,13 +2169,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
"satisfied. Rx Burst Bulk Alloc function will be "
- "used on port=%d, queue=%d.\n",
+ "used on port=%d, queue=%d.",
rxq->port_id, rxq->queue_id);
dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
#ifdef RTE_IXGBE_INC_VECTOR
if (!ixgbe_rx_vec_condition_check(dev)) {
PMD_INIT_LOG(INFO, "Vector rx enabled, please make "
- "sure RX burst size no less than 32.\n");
+ "sure RX burst size no less than 32.");
ixgbe_rxq_vec_setup(rxq, socket_id);
dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
}
@@ -2182,7 +2185,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions "
"are not satisfied, Scattered Rx is requested, "
"or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC is not "
- "enabled (port=%d, queue=%d).\n",
+ "enabled (port=%d, queue=%d).",
rxq->port_id, rxq->queue_id);
}
dev->data->rx_queues[queue_idx] = rxq;
@@ -2201,7 +2204,7 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
uint32_t desc = 0;
if (rx_queue_id >= dev->data->nb_rx_queues) {
- PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id);
+ PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
return 0;
}
@@ -2917,7 +2920,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
ixgbe_dcb_rx_hw_config(hw, dcb_config);
break;
default:
- PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration\n");
+ PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
break;
}
switch (dev->data->dev_conf.txmode.mq_mode) {
@@ -2939,7 +2942,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
ixgbe_dcb_tx_hw_config(hw, dcb_config);
break;
default:
- PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration\n");
+ PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
break;
}
@@ -3210,7 +3213,7 @@ ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
volatile union ixgbe_adv_rx_desc *rxd;
struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
if (mbuf == NULL) {
- PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u\n",
+ PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
(unsigned) rxq->queue_id);
return (-ENOMEM);
}
@@ -3282,7 +3285,7 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT8TCEN);
break;
default:
- PMD_INIT_LOG(ERR, "invalid pool number in IOV mode\n");
+ PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
}
}
@@ -3335,7 +3338,7 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
break;
default:
mtqc = IXGBE_MTQC_64Q_1PB;
- PMD_INIT_LOG(ERR, "invalid pool number in IOV mode\n");
+ PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
}
IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
}
@@ -3603,7 +3606,7 @@ ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
IXGBE_SUCCESS) {
- PMD_INIT_LOG(ERR, "Could not enable loopback mode\n");
+ PMD_INIT_LOG(ERR, "Could not enable loopback mode");
/* ignore error */
return;
}
@@ -3699,7 +3702,7 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
/* Allocate buffers for descriptor rings */
if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
PMD_INIT_LOG(ERR,
- "Could not alloc mbuf for queue:%d\n",
+ "Could not alloc mbuf for queue:%d",
rx_queue_id);
return -1;
}
@@ -3715,7 +3718,7 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
} while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
if (!poll_ms)
PMD_INIT_LOG(ERR, "Could not enable "
- "Rx Queue %d\n", rx_queue_id);
+ "Rx Queue %d", rx_queue_id);
rte_wmb();
IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
@@ -3754,7 +3757,7 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
} while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
if (!poll_ms)
PMD_INIT_LOG(ERR, "Could not disable "
- "Rx Queue %d\n", rx_queue_id);
+ "Rx Queue %d", rx_queue_id);
rte_delay_us(RTE_IXGBE_WAIT_100_US);
@@ -3797,7 +3800,7 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
} while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
if (!poll_ms)
PMD_INIT_LOG(ERR, "Could not enable "
- "Tx Queue %d\n", tx_queue_id);
+ "Tx Queue %d", tx_queue_id);
}
rte_wmb();
IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
@@ -3838,7 +3841,7 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
} while (--poll_ms && (txtdh != txtdt));
if (!poll_ms)
PMD_INIT_LOG(ERR,
- "Tx Queue %d is not empty when stopping.\n",
+ "Tx Queue %d is not empty when stopping.",
tx_queue_id);
}
@@ -3856,7 +3859,7 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
} while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
if (!poll_ms)
PMD_INIT_LOG(ERR, "Could not disable "
- "Tx Queue %d\n", tx_queue_id);
+ "Tx Queue %d", tx_queue_id);
}
if (txq->ops != NULL) {
@@ -4073,7 +4076,7 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
} while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
if (!poll_ms)
PMD_INIT_LOG(ERR, "Could not enable "
- "Tx Queue %d\n", i);
+ "Tx Queue %d", i);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -4091,7 +4094,7 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
} while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
if (!poll_ms)
PMD_INIT_LOG(ERR, "Could not enable "
- "Rx Queue %d\n", i);
+ "Rx Queue %d", i);
rte_wmb();
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);