[v5] net/ice: add support for low Rx latency
Checks
Commit Message
This patch adds a devarg parameter to enable/disable low Rx latency.
Signed-off-by: Alvin Zhang <alvinx.zhang@intel.com>
---
v3: rebase to dpdk-next-net-intel
v4: rename arg rx-low-latency to rx_low_latency
v5: rename macro ICE_RX_LOW_LATENCY to ICE_RX_LOW_LATENCY_ARG
---
doc/guides/nics/ice.rst | 12 ++++++++++++
drivers/net/ice/ice_ethdev.c | 26 +++++++++++++++++++++++---
drivers/net/ice/ice_ethdev.h | 1 +
3 files changed, 36 insertions(+), 3 deletions(-)
Comments
> -----Original Message-----
> From: Zhang, AlvinX <alvinx.zhang@intel.com>
> Sent: Friday, September 24, 2021 5:34 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org; Zhang, AlvinX <alvinx.zhang@intel.com>
> Subject: [PATCH v5] net/ice: add support for low Rx latency
>
> This patch adds a devarg parameter to enable/disable low Rx latency.
>
> Signed-off-by: Alvin Zhang <alvinx.zhang@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
Applied to dpdk-next-net-intel.
Thanks
Qi
@@ -227,6 +227,18 @@ Runtime Config Options
-a af:00.0,pps_out='[pin:0]'
+- ``Low Rx latency`` (default ``0``)
+
+ vRAN workloads require low latency DPDK interface for the front haul
+ interface connection to Radio. By specifying ``1`` for parameter
+ ``rx_low_latency``, each completed Rx descriptor can be written immediately
+ to host memory and the Rx interrupt latency can be reduced to 2us::
+
+ -a 0000:88:00.0,rx_low_latency=1
+
+ As a trade-off, this configuration may cause the packet processing performance
+ degradation due to the PCI bandwidth limitation.
+
Driver compilation and testing
------------------------------
@@ -30,6 +30,7 @@
#define ICE_PROTO_XTR_ARG "proto_xtr"
#define ICE_HW_DEBUG_MASK_ARG "hw_debug_mask"
#define ICE_ONE_PPS_OUT_ARG "pps_out"
+#define ICE_RX_LOW_LATENCY_ARG "rx_low_latency"
static const char * const ice_valid_args[] = {
ICE_SAFE_MODE_SUPPORT_ARG,
@@ -37,6 +38,7 @@
ICE_PROTO_XTR_ARG,
ICE_HW_DEBUG_MASK_ARG,
ICE_ONE_PPS_OUT_ARG,
+ ICE_RX_LOW_LATENCY_ARG,
NULL
};
@@ -1956,6 +1958,9 @@ static int ice_parse_devargs(struct rte_eth_dev *dev)
if (ret)
goto bail;
+ ret = rte_kvargs_process(kvlist, ICE_RX_LOW_LATENCY_ARG,
+ &parse_bool, &ad->devargs.rx_low_latency);
+
bail:
rte_kvargs_free(kvlist);
return ret;
@@ -3272,8 +3277,9 @@ static int ice_init_rss(struct ice_pf *pf)
{
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
uint32_t val, val_tx;
- int i;
+ int rx_low_latency, i;
+ rx_low_latency = vsi->adapter->devargs.rx_low_latency;
for (i = 0; i < nb_queue; i++) {
/*do actual bind*/
val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
@@ -3283,8 +3289,21 @@ static int ice_init_rss(struct ice_pf *pf)
PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
base_queue + i, msix_vect);
+
/* set ITR0 value */
- ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x2);
+ if (rx_low_latency) {
+ /**
+ * Empirical configuration for optimal real time
+ * latency reduced interrupt throttling to 2us
+ */
+ ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x1);
+ ICE_WRITE_REG(hw, QRX_ITR(base_queue + i),
+ QRX_ITR_NO_EXPR_M);
+ } else {
+ ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x2);
+ ICE_WRITE_REG(hw, QRX_ITR(base_queue + i), 0);
+ }
+
ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
}
@@ -5497,7 +5516,8 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
ICE_HW_DEBUG_MASK_ARG "=0xXXX"
ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset>"
ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
- ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>");
+ ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>"
+ ICE_RX_LOW_LATENCY_ARG "=<0|1>");
RTE_LOG_REGISTER_SUFFIX(ice_logtype_init, init, NOTICE);
RTE_LOG_REGISTER_SUFFIX(ice_logtype_driver, driver, NOTICE);
@@ -476,6 +476,7 @@ struct ice_pf {
* Cache devargs parse result.
*/
struct ice_devargs {
+ int rx_low_latency;
int safe_mode_support;
uint8_t proto_xtr_dflt;
int pipe_mode_support;