@@ -164,6 +164,9 @@ struct mlx5_stats_ctrl {
/* Maximal size of aggregated LRO packet. */
#define MLX5_MAX_LRO_SIZE (UINT8_MAX * MLX5_LRO_SEG_CHUNK_SIZE)
+/* Maximal number of segments to split. */
+#define MLX5_MAX_RXQ_NSEG (1u << MLX5_MAX_LOG_RQ_SEGS)
+
/* LRO configurations structure. */
struct mlx5_lro_config {
uint32_t supported:1; /* Whether LRO is supported. */
@@ -731,12 +731,39 @@
struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct rte_eth_rxseg_split *rx_seg =
+ (struct rte_eth_rxseg_split *)conf->rx_seg;
+ struct rte_eth_rxseg_split rx_single = {.mp = mp};
+ uint16_t n_seg = conf->rx_nseg;
int res;
+ if (mp) {
+ /* The parameters should be checked on rte_eth_dev layer. */
+ MLX5_ASSERT(!n_seg);
+ rx_seg = &rx_single;
+ n_seg = 1;
+ } else {
+ MLX5_ASSERT(conf && n_seg && rx_seg);
+ }
+ if (n_seg > 1) {
+ uint64_t offloads = conf->offloads |
+ dev->data->dev_conf.rxmode.offloads;
+
+ /* The offloads should be checked on rte_eth_dev layer. */
+ MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER);
+ if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
+ DRV_LOG(ERR, "port %u queue index %u split "
+ "offload not configured",
+ dev->data->port_id, idx);
+ rte_errno = ENOSPC;
+ return -rte_errno;
+ }
+ MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
+ }
res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
if (res)
return res;
- rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
+ rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg);
if (!rxq_ctrl) {
DRV_LOG(ERR, "port %u unable to allocate queue index %u",
dev->data->port_id, idx);
@@ -1329,11 +1356,11 @@
struct mlx5_rxq_ctrl *
mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_rxconf *conf,
- struct rte_mempool *mp)
+ const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
- unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
+ unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
unsigned int mprq_stride_nums;
unsigned int mprq_stride_size;
unsigned int mprq_stride_cap;
@@ -1347,7 +1374,7 @@ struct mlx5_rxq_ctrl *
uint64_t offloads = conf->offloads |
dev->data->dev_conf.rxmode.offloads;
unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
- const int mprq_en = mlx5_check_mprq_support(dev) > 0;
+ const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1;
unsigned int max_rx_pkt_len = lro_on_queue ?
dev->data->dev_conf.rxmode.max_lro_pkt_size :
dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -1532,7 +1559,7 @@ struct mlx5_rxq_ctrl *
(!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
tmpl->rxq.port_id = dev->data->port_id;
tmpl->priv = priv;
- tmpl->rxq.mp = mp;
+ tmpl->rxq.mp = rx_seg[0].mp;
tmpl->rxq.elts_n = log2above(desc);
tmpl->rxq.rq_repl_thresh =
MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
@@ -94,6 +94,13 @@ enum mlx5_rxq_err_state {
MLX5_RXQ_ERR_STATE_NEED_READY,
};
+struct mlx5_eth_rxseg {
+ struct rte_mempool *mp; /**< Memory pool to allocate segment from. */
+ uint16_t length; /**< Segment data length, configures split point. */
+ uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */
+ uint32_t reserved; /**< Reserved field. */
+};
+
/* RX queue descriptor. */
struct mlx5_rxq_data {
unsigned int csum:1; /* Enable checksum offloading. */
@@ -153,6 +160,9 @@ struct mlx5_rxq_data {
uint32_t tunnel; /* Tunnel information. */
uint64_t flow_meta_mask;
int32_t flow_meta_offset;
+ uint32_t rxseg_n; /* Number of split segment descriptions. */
+ struct mlx5_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG];
+ /* Buffer split segment descriptions - sizes, offsets, pools. */
} __rte_cache_aligned;
enum mlx5_rxq_type {
@@ -316,7 +326,8 @@ int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
uint16_t desc, unsigned int socket,
const struct rte_eth_rxconf *conf,
- struct rte_mempool *mp);
+ const struct rte_eth_rxseg_split *rx_seg,
+ uint16_t n_seg);
struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
const struct rte_eth_hairpin_conf *hairpin_conf);