@@ -770,6 +770,11 @@ enum bnxt_session_type {
BNXT_SESSION_TYPE_LAST
};
+#define BNXT_MAX_BUFFER_SPLIT_SEGS 2
+#define BNXT_MULTI_POOL_BUF_SPLIT_CAP 1
+#define BNXT_BUF_SPLIT_OFFSET_CAP 1
+#define BNXT_BUF_SPLIT_ALIGN_CAP 0
+
struct bnxt {
void *bar0;
@@ -1268,6 +1268,11 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
dev_info->vmdq_pool_base = 0;
dev_info->vmdq_queue_base = 0;
+ dev_info->rx_seg_capa.max_nseg = BNXT_MAX_BUFFER_SPLIT_SEGS;
+ dev_info->rx_seg_capa.multi_pools = BNXT_MULTI_POOL_BUF_SPLIT_CAP;
+ dev_info->rx_seg_capa.offset_allowed = BNXT_BUF_SPLIT_OFFSET_CAP;
+ dev_info->rx_seg_capa.offset_align_log2 = BNXT_BUF_SPLIT_ALIGN_CAP;
+
dev_info->err_handle_mode = RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE;
return 0;
@@ -3041,10 +3041,14 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
struct bnxt_vnic_info *vnic)
{
- int rc = 0;
- struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
+ uint64_t rx_offloads = dev_conf->rxmode.offloads;
+ uint8_t rs = !!(rx_offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT);
+ uint32_t flags, enables;
uint16_t size;
+ int rc = 0;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
PMD_DRV_LOG_LINE(DEBUG, "VNIC ID %x", vnic->fw_vnic_id);
@@ -3052,19 +3056,26 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
}
HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
-
- req.flags = rte_cpu_to_le_32(
- HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
-
- req.enables = rte_cpu_to_le_32(
- HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
+ flags = HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT;
+ enables = HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID;
size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
size -= RTE_PKTMBUF_HEADROOM;
size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
-
req.jumbo_thresh = rte_cpu_to_le_16(size);
+
+ if (rs & vnic->hds_threshold) {
+ flags |=
+ HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV4 |
+ HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV6;
+ req.hds_threshold = rte_cpu_to_le_16(vnic->hds_threshold);
+ enables |=
+ HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID;
+ }
+
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+ req.flags = rte_cpu_to_le_32(flags);
+ req.enables = rte_cpu_to_le_32(enables);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@@ -29,7 +29,8 @@ uint64_t bnxt_get_rx_port_offloads(struct bnxt *bp)
RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
RTE_ETH_RX_OFFLOAD_KEEP_CRC |
RTE_ETH_RX_OFFLOAD_SCATTER |
- RTE_ETH_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_RSS_HASH |
+ RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
/* In P7 platform if truflow is enabled then vlan offload is disabled*/
if (!(BNXT_TRUFLOW_EN(bp) && BNXT_CHIP_P7(bp)))
@@ -332,8 +333,12 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
- struct bnxt *bp = eth_dev->data->dev_private;
uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
+ uint8_t rs = !!(rx_offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT);
+ struct bnxt *bp = eth_dev->data->dev_private;
+ struct rte_eth_rxseg_split *rx_seg =
+ (struct rte_eth_rxseg_split *)rx_conf->rx_seg;
+ uint16_t n_seg = rx_conf->rx_nseg;
struct bnxt_rx_queue *rxq;
int rc = 0;
@@ -341,6 +346,17 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
if (rc)
return rc;
+ if (n_seg > 1 && !rs) {
+ PMD_DRV_LOG_LINE(ERR, "n_seg %d does not match buffer split %d setting",
+ n_seg, rs);
+ return -EINVAL;
+ }
+
+ if (n_seg > BNXT_MAX_BUFFER_SPLIT_SEGS) {
+ PMD_DRV_LOG_LINE(ERR, "n_seg %d not supported", n_seg);
+ return -EINVAL;
+ }
+
if (queue_idx >= bnxt_max_rings(bp)) {
PMD_DRV_LOG_LINE(ERR,
"Cannot create Rx ring %d. Only %d rings available",
@@ -365,7 +381,14 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
return -ENOMEM;
}
rxq->bp = bp;
- rxq->mb_pool = mp;
+ if (n_seg > 1) {
+ rxq->mb_pool = rx_seg[BNXT_MEM_POOL_IDX_0].mp;
+ rxq->agg_mb_pool = rx_seg[BNXT_MEM_POOL_IDX_1].mp;
+ } else {
+ rxq->mb_pool = mp;
+ rxq->agg_mb_pool = mp;
+ }
+
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh =
RTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_RX_BURST);
@@ -411,6 +434,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq->rx_started = rxq->rx_deferred_start ? false : true;
rxq->vnic = bnxt_get_default_vnic(bp);
+ rxq->vnic->hds_threshold = n_seg ? rxq->vnic->hds_threshold : 0;
return 0;
err:
@@ -12,11 +12,15 @@
/* Drop by default when receive desc is not available. */
#define BNXT_DEFAULT_RX_DROP_EN 1
+#define BNXT_MEM_POOL_IDX_0 0
+#define BNXT_MEM_POOL_IDX_1 1
+
struct bnxt;
struct bnxt_rx_ring_info;
struct bnxt_cp_ring_info;
struct bnxt_rx_queue {
struct rte_mempool *mb_pool; /* mbuf pool for RX ring */
+ struct rte_mempool *agg_mb_pool; /* mbuf pool for AGG ring */
uint64_t mbuf_initializer; /* val to init mbuf */
uint16_t nb_rx_desc; /* num of RX desc */
uint16_t rx_free_thresh; /* max free RX desc to hold */
@@ -84,7 +84,7 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
return -EINVAL;
}
- mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
+ mbuf = __bnxt_alloc_rx_data(rxq->agg_mb_pool);
if (!mbuf) {
rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
rte_memory_order_relaxed);
@@ -1673,7 +1673,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
for (i = 0; i < max_aggs; i++) {
if (unlikely(!rxr->tpa_info[i].mbuf)) {
rxr->tpa_info[i].mbuf =
- __bnxt_alloc_rx_data(rxq->mb_pool);
+ __bnxt_alloc_rx_data(rxq->agg_mb_pool);
if (!rxr->tpa_info[i].mbuf) {
rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
rte_memory_order_relaxed);
@@ -84,6 +84,7 @@ struct bnxt_vnic_info {
enum rte_eth_hash_function hash_f;
enum rte_eth_hash_function hash_f_local;
uint64_t rss_types_local;
+ uint16_t hds_threshold;
uint8_t metadata_format;
uint8_t state;
};