@@ -18,6 +18,7 @@
#include <rte_lcore.h>
#include <rte_spinlock.h>
#include <rte_time.h>
+#include <rte_eal_paging.h>
#include "bnxt_cpr.h"
#include "bnxt_util.h"
@@ -119,6 +120,8 @@
(BNXT_CHIP_P5_P7(bp) ? TPA_MAX_SEGS_TH : \
TPA_MAX_SEGS)
+#define BNXT_TPA_MAX_PAGES 65536
+
/*
* Define the number of async completion rings to be used. Set to zero for
* configurations in which the maximum number of packet completion rings
@@ -815,6 +818,7 @@ struct bnxt {
#define BNXT_VNIC_CAP_ESP_SPI6_CAP BIT(12)
#define BNXT_VNIC_CAP_AH_SPI_CAP (BNXT_VNIC_CAP_AH_SPI4_CAP | BNXT_VNIC_CAP_AH_SPI6_CAP)
#define BNXT_VNIC_CAP_ESP_SPI_CAP (BNXT_VNIC_CAP_ESP_SPI4_CAP | BNXT_VNIC_CAP_ESP_SPI6_CAP)
+#define BNXT_VNIC_CAP_VNIC_TUNNEL_TPA BIT(13)
unsigned int rx_nr_rings;
unsigned int rx_cp_nr_rings;
@@ -1046,6 +1046,9 @@ int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
bp->vnic_cap_flags |= BNXT_VNIC_CAP_ESP_SPI6_CAP;
+ if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_HW_TUNNEL_TPA_CAP)
+ bp->vnic_cap_flags |= BNXT_VNIC_CAP_VNIC_TUNNEL_TPA;
+
bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
HWRM_UNLOCK();
@@ -2666,6 +2669,30 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
return rc;
}
+#define BNXT_DFLT_TUNL_TPA_BMAP \
+ (HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_GRE | \
+ HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_IPV4 | \
+ HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_IPV6)
+
+static void bnxt_vnic_update_tunl_tpa_bmap(struct bnxt *bp,
+ struct hwrm_vnic_tpa_cfg_input *req)
+{
+ uint32_t tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
+
+ if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_VNIC_TUNNEL_TPA))
+ return;
+
+ if (bp->vxlan_port_cnt)
+ tunl_tpa_bmap |= HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_VXLAN |
+ HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_VXLAN_GPE;
+
+ if (bp->geneve_port_cnt)
+ tunl_tpa_bmap |= HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_GENEVE;
+
+ req->enables |= rte_cpu_to_le_32(HWRM_VNIC_TPA_CFG_INPUT_ENABLES_TNL_TPA_EN);
+ req->tnl_tpa_en_bitmap = rte_cpu_to_le_32(tunl_tpa_bmap);
+}
+
int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
struct bnxt_vnic_info *vnic, bool enable)
{
@@ -2714,6 +2741,29 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
if (BNXT_CHIP_P5_P7(bp))
req.max_aggs = rte_cpu_to_le_16(bp->max_tpa_v2);
+
+ /* For tpa v2 handle as per spec mss and log2 units */
+ if (BNXT_CHIP_P7(bp)) {
+ uint32_t nsegs, n, segs = 0;
+ uint16_t mss = bp->eth_dev->data->mtu - 40;
+ size_t page_size = rte_mem_page_size();
+ uint32_t max_mbuf_frags =
+ BNXT_TPA_MAX_PAGES / (rte_mem_page_size() + 1);
+
+ /* Calculate the number of segs based on mss */
+ if (mss <= page_size) {
+ n = page_size / mss;
+ nsegs = (max_mbuf_frags - 1) * n;
+ } else {
+ n = mss / page_size;
+ if (mss & (page_size - 1))
+ n++;
+ nsegs = (max_mbuf_frags - n) / n;
+ }
+ segs = rte_log2_u32(nsegs);
+ req.max_agg_segs = rte_cpu_to_le_16(segs);
+ }
+ bnxt_vnic_update_tunl_tpa_bmap(bp, &req);
}
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
@@ -4242,6 +4292,27 @@ int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
return rc;
}
+static int bnxt_hwrm_set_tpa(struct bnxt *bp)
+{
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ uint64_t rx_offloads = dev_conf->rxmode.offloads;
+ bool tpa_flags = 0;
+ int rc, i;
+
+ tpa_flags = (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ? true : false;
+ for (i = 0; i < bp->max_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+ continue;
+
+ rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic, tpa_flags);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
uint8_t tunnel_type)
{
@@ -4278,6 +4349,8 @@ int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
HWRM_UNLOCK();
+ bnxt_hwrm_set_tpa(bp);
+
return rc;
}
@@ -4346,6 +4419,7 @@ int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
bp->ecpri_port_cnt = 0;
}
+ bnxt_hwrm_set_tpa(bp);
return rc;
}
@@ -153,7 +153,8 @@ static void bnxt_rx_ring_reset(void *arg)
rxr = rxq->rx_ring;
/* Disable and flush TPA before resetting the RX ring */
if (rxr->tpa_info)
- bnxt_hwrm_vnic_tpa_cfg(bp, rxq->vnic, false);
+ bnxt_vnic_tpa_cfg(bp, rxq->queue_id, false);
+
rc = bnxt_hwrm_rx_ring_reset(bp, i);
if (rc) {
PMD_DRV_LOG(ERR, "Rx ring%d reset failed\n", i);
@@ -163,12 +164,13 @@ static void bnxt_rx_ring_reset(void *arg)
bnxt_rx_queue_release_mbufs(rxq);
rxr->rx_raw_prod = 0;
rxr->ag_raw_prod = 0;
+ rxr->ag_cons = 0;
rxr->rx_next_cons = 0;
bnxt_init_one_rx_ring(rxq);
bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod);
if (rxr->tpa_info)
- bnxt_hwrm_vnic_tpa_cfg(bp, rxq->vnic, true);
+ bnxt_vnic_tpa_cfg(bp, rxq->queue_id, true);
rxq->in_reset = 0;
}
@@ -1151,7 +1153,8 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
return -EBUSY;
if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START ||
- cmp_type == RX_TPA_START_V2_CMPL_TYPE_RX_TPA_START_V2) {
+ cmp_type == RX_TPA_START_V2_CMPL_TYPE_RX_TPA_START_V2 ||
+ cmp_type == RX_TPA_START_V3_CMPL_TYPE_RX_TPA_START_V3) {
bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
(struct rx_tpa_start_cmpl_hi *)rxcmp1);
rc = -EINVAL; /* Continue w/o new mbuf */
@@ -464,7 +464,9 @@ bnxt_vnic_queue_delete(struct bnxt *bp, uint16_t vnic_idx)
static struct bnxt_vnic_info*
bnxt_vnic_queue_create(struct bnxt *bp, int32_t vnic_id, uint16_t q_index)
{
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
+ uint64_t rx_offloads = dev_conf->rxmode.offloads;
struct bnxt_vnic_info *vnic;
struct bnxt_rx_queue *rxq = NULL;
int32_t rc = -EINVAL;
@@ -523,6 +525,12 @@ bnxt_vnic_queue_create(struct bnxt *bp, int32_t vnic_id, uint16_t q_index)
goto cleanup;
}
+ rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic,
+ (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ?
+ true : false);
+ if (rc)
+ PMD_DRV_LOG(DEBUG, "Failed to configure TPA on this vnic %d\n", q_index);
+
rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
if (rc) {
PMD_DRV_LOG(DEBUG, "Failed to configure vnic plcmode %d\n",
@@ -658,7 +666,9 @@ bnxt_vnic_rss_create(struct bnxt *bp,
struct bnxt_vnic_rss_info *rss_info,
uint16_t vnic_id)
{
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
+ uint64_t rx_offloads = dev_conf->rxmode.offloads;
struct bnxt_vnic_info *vnic;
struct bnxt_rx_queue *rxq = NULL;
uint32_t idx, nr_ctxs, config_rss = 0;
@@ -741,6 +751,12 @@ bnxt_vnic_rss_create(struct bnxt *bp,
goto fail_cleanup;
}
+ rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic,
+ (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ?
+ true : false);
+ if (rc)
+ PMD_DRV_LOG(DEBUG, "Failed to configure TPA on this vnic %d\n", idx);
+
rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
if (rc) {
PMD_DRV_LOG(ERR, "Failed to configure vnic plcmode %d\n",