@@ -9,7 +9,7 @@
#include <compat.h>
#include <fsl_qbman_base.h>
-/* Sanity check */
+/* Byte order check */
#if (__BYTE_ORDER__ != __ORDER_BIG_ENDIAN__) && \
(__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__)
#error "Unknown endianness!"
@@ -528,7 +528,7 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
node->flags &= ~NIX_TM_NODE_ENABLED;
}
- /* Verify sanity of all tx queues */
+ /* Verify all tx queues */
for (i = 0; i < sq_cnt; i++) {
sq = nix->sqs[i];
if (!sq)
@@ -458,7 +458,7 @@ npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,
char name[PLT_MEMZONE_NAMESIZE];
const struct plt_memzone *mz;
- /* Sanity check */
+ /* argument checks */
if (!lf || !block_size || !block_count || !pool || !aura ||
!aura_handle)
return NPA_ERR_PARAM;
@@ -622,7 +622,7 @@ npa_aura_alloc(struct npa_lf *lf, const uint32_t block_count, int pool_id,
{
int rc, aura_id;
- /* Sanity check */
+ /* parameter check */
if (!lf || !aura || !aura_handle)
return NPA_ERR_PARAM;
@@ -1096,7 +1096,7 @@ npa_dev_init(struct npa_lf *lf, uintptr_t base, struct mbox *mbox)
uint8_t aura_sz;
int rc;
- /* Sanity checks */
+ /* Input checks */
if (!lf || !base || !mbox)
return NPA_ERR_PARAM;
@@ -107,7 +107,7 @@ extern enum rta_sec_era rta_sec_era;
/*
* ONE - should always be set. Combination of ONE (always
- * set) and ZRO (always clear) forms an endianness sanity check
+ * set) and ZRO (always clear) forms an endianness check
*/
#define HDR_ONE BIT(23)
#define HDR_ZRO BIT(15)
@@ -235,7 +235,7 @@ mlx5_get_ifname_sysfs(const char *ibdev_path, char *ifname)
/**
* Suffix RTE_EAL_PMD_PATH with "-glue".
*
- * This function performs a sanity check on RTE_EAL_PMD_PATH before
+ * This function performs a check on RTE_EAL_PMD_PATH before
* suffixing its last component.
*
* @param buf[out]
@@ -595,13 +595,13 @@ tlv_update_partition_len_and_cks(
/*
* We just modified the partition, so the total length may not be
- * valid. Don't use tlv_find(), which performs some sanity checks
+ * valid. Don't use tlv_find(), which performs some checks
* that may fail here.
*/
partition.data = cursor->block;
memcpy(&partition.tlv_cursor, cursor, sizeof (*cursor));
header = (struct tlv_partition_header *)partition.data;
- /* Sanity check. */
+ /* Header check */
if (__LE_TO_CPU_32(header->tag) != TLV_TAG_PARTITION_HEADER) {
rc = EFAULT;
goto fail1;
@@ -930,7 +930,7 @@ efx_rx_qcreate_internal(
ndescs, id, flags, eep, erp)) != 0)
goto fail4;
- /* Sanity check queue creation result */
+ /* Check queue creation result */
if (flags & EFX_RXQ_FLAG_RSS_HASH) {
const efx_rx_prefix_layout_t *erplp = &erp->er_prefix_layout;
const efx_rx_prefix_field_info_t *rss_hash_field;
@@ -317,7 +317,7 @@ bcmfs4_mdst_desc(uint64_t addr, unsigned int length_div_16)
}
static bool
-bcmfs4_sanity_check(struct bcmfs_qp_message *msg)
+bcmfs4_message_check(struct bcmfs_qp_message *msg)
{
unsigned int i = 0;
@@ -458,8 +458,8 @@ bcmfs4_enqueue_single_request_qp(struct bcmfs_qp *qp, void *op)
struct bcmfs_queue *txq = &qp->tx_q;
struct bcmfs_qp_message *msg = (struct bcmfs_qp_message *)op;
- /* Do sanity check on message */
- if (!bcmfs4_sanity_check(msg)) {
+ /* Do check on message */
+ if (!bcmfs4_message_check(msg)) {
BCMFS_DP_LOG(ERR, "Invalid msg on queue %d", qp->qpair_id);
return -EIO;
}
@@ -293,7 +293,7 @@ bcmfs5_mdst_desc(uint64_t addr, unsigned int len_div_16)
}
static bool
-bcmfs5_sanity_check(struct bcmfs_qp_message *msg)
+bcmfs5_message_check(struct bcmfs_qp_message *msg)
{
unsigned int i = 0;
@@ -389,8 +389,8 @@ bcmfs5_enqueue_single_request_qp(struct bcmfs_qp *qp, void *op)
struct bcmfs_queue *txq = &qp->tx_q;
struct bcmfs_qp_message *msg = (struct bcmfs_qp_message *)op;
- /* Do sanity check on message */
- if (!bcmfs5_sanity_check(msg)) {
+ /* Do check on message */
+ if (!bcmfs5_message_check(msg)) {
BCMFS_DP_LOG(ERR, "Invalid msg on queue %d", qp->qpair_id);
return -EIO;
}
@@ -205,7 +205,7 @@ init_pci_device(struct rte_pci_device *dev, struct idxd_dmadev *idxd,
goto err;
}
- /* sanity check device status */
+ /* check device status */
if (pci->regs->gensts & GENSTS_DEV_STATE_MASK) {
/* need function-level-reset (FLR) or is enabled */
IDXD_PMD_ERR("Device status is not disabled, cannot init");
@@ -241,7 +241,7 @@ opdl_queue_setup(struct rte_eventdev *dev,
struct opdl_evdev *device = opdl_pmd_priv(dev);
- /* Extra sanity check, probably not needed */
+ /* Extra check, probably not needed */
if (queue_id == OPDL_INVALID_QID) {
PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
"Invalid queue id %u requested\n",
@@ -300,7 +300,7 @@ static int opdl_add_deps(struct opdl_evdev *device,
struct opdl_queue *queue_deps = &device->queue[deps_q_id];
struct opdl_stage *dep_stages[OPDL_PORTS_MAX];
- /* sanity check that all stages are for same opdl ring */
+ /* check that all stages are for same opdl ring */
for (i = 0; i < queue->nb_ports; i++) {
struct opdl_ring *r =
opdl_stage_get_opdl_ring(stage_for_port(queue, i));
@@ -349,10 +349,10 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
ARK_PMD_LOG(NOTICE, "Arkville HW Commit_ID: %08x\n",
rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4]));
- /* If HW sanity test fails, return an error */
+ /* If HW test fails, return an error */
if (ark->sysctrl.t32[4] != 0xcafef00d) {
ARK_PMD_LOG(ERR,
- "HW Sanity test has failed, expected constant"
+ "HW test has failed, expected constant"
" 0x%x, read 0x%x (%s)\n",
0xcafef00d,
ark->sysctrl.t32[4], __func__);
@@ -360,7 +360,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
}
ARK_PMD_LOG(DEBUG,
- "HW Sanity test has PASSED, expected constant"
+ "HW test has PASSED, expected constant"
" 0x%x, read 0x%x (%s)\n",
0xcafef00d, ark->sysctrl.t32[4], __func__);
@@ -590,7 +590,7 @@ eth_ark_dev_start(struct rte_eth_dev *dev)
rte_thread_t thread;
/* Delay packet generator start allow the hardware to be ready
- * This is only used for sanity checking with internal generator
+ * This is only used for checking with internal generator
*/
char tname[RTE_THREAD_INTERNAL_NAME_SIZE];
snprintf(tname, sizeof(tname), "ark-pg%d", dev->data->port_id);
@@ -283,7 +283,7 @@ eth_ark_recv_pkts(void *rx_queue,
mbuf->pkt_len = meta->pkt_len;
mbuf->data_len = meta->pkt_len;
- if (ARK_DEBUG_CORE) { /* debug sanity checks */
+ if (ARK_DEBUG_CORE) { /* debug checks */
if ((meta->pkt_len > (1024 * 16)) ||
(meta->pkt_len == 0)) {
@@ -7820,7 +7820,7 @@ static int bnx2x_check_valid_mf_cfg(struct bnx2x_softc *sc)
uint32_t ovlan2;
uint8_t i, j;
- /* various MF mode sanity checks... */
+ /* various MF mode checks... */
if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
PMD_DRV_LOG(NOTICE, sc,
@@ -181,7 +181,7 @@ bnx2x_stats_pmf_update(struct bnx2x_softc *sc)
*/
return;
}
- /* sanity */
+ /* input validation */
if (!sc->port.pmf || !sc->port.port_stx) {
PMD_DRV_LOG(ERR, sc, "BUG!");
return;
@@ -231,7 +231,7 @@ bnx2x_port_stats_init(struct bnx2x_softc *sc)
uint32_t mac_addr;
uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
- /* sanity */
+ /* input validation */
if (!sc->link_vars.link_up || !sc->port.pmf) {
PMD_DRV_LOG(ERR, sc, "BUG!");
return;
@@ -457,7 +457,7 @@ bnx2x_func_stats_init(struct bnx2x_softc *sc)
struct dmae_command *dmae = &sc->stats_dmae;
uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
- /* sanity */
+ /* input validation */
if (!sc->func_stx) {
PMD_DRV_LOG(ERR, sc, "BUG!");
return;
@@ -1280,7 +1280,7 @@ bnx2x_port_stats_base_init(struct bnx2x_softc *sc)
struct dmae_command *dmae;
uint32_t *stats_comp = BNX2X_SP(sc, stats_comp);
- /* sanity */
+ /* input validation */
if (!sc->port.pmf || !sc->port.port_stx) {
PMD_DRV_LOG(ERR, sc, "BUG!");
return;
@@ -207,7 +207,7 @@ static int ecore_exe_queue_step(struct bnx2x_softc *sc,
break;
}
- /* Sanity check */
+ /* Argument check */
if (!cur_len)
return ECORE_SUCCESS;
@@ -1088,7 +1088,7 @@ static void elink_get_epio(struct bnx2x_softc *sc, uint32_t epio_pin,
{
uint32_t epio_mask, gp_oenable;
*en = 0;
- /* Sanity check */
+ /* Input check */
if (epio_pin > 31) {
ELINK_DEBUG_P1(sc, "Invalid EPIO pin %d to get", epio_pin);
return;
@@ -1105,7 +1105,7 @@ static void elink_set_epio(struct bnx2x_softc *sc, uint32_t epio_pin, uint32_t e
{
uint32_t epio_mask, gp_output, gp_oenable;
- /* Sanity check */
+ /* Input check */
if (epio_pin > 31) {
ELINK_DEBUG_P1(sc, "Invalid EPIO pin %d to set", epio_pin);
return;
@@ -538,7 +538,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
done = bnxt_check_cq_hwrm_done(cpr, is_tx, is_rx,
i == timeout - 1);
- /* Sanity check on the resp->resp_len */
+ /* Input check on the resp->resp_len */
rte_io_rmb();
if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
/* Last byte of resp contains the valid key */
@@ -56770,7 +56770,7 @@ struct hwrm_tfc_ident_alloc_output {
* Requests the firmware to free a TFC resource identifier.
* A resource subtype and session id are passed in.
* An identifier (previously allocated) corresponding to all these is
- * freed, only after various sanity checks are completed.
+ * freed, only after various checks are completed.
*/
/* hwrm_tfc_ident_free_input (size:192b/24B) */
struct hwrm_tfc_ident_free_input {
@@ -1473,7 +1473,7 @@ bond_8023ad_setup_validate(uint16_t port_id,
return -EINVAL;
if (conf != NULL) {
- /* Basic sanity check */
+ /* Basic input check */
if (conf->slow_periodic_ms == 0 ||
conf->fast_periodic_ms >= conf->slow_periodic_ms ||
conf->long_timeout_ms == 0 ||
@@ -643,7 +643,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
struct rte_mempool *lpb_pool = mp;
struct rte_mempool *spb_pool = NULL;
- /* Sanity checks */
+ /* input checks */
if (rx_conf->rx_deferred_start == 1) {
plt_err("Deferred Rx start is not supported");
goto fail;
@@ -1212,7 +1212,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
rc = -EINVAL;
- /* Sanity checks */
+ /* baseline checks */
if (rte_eal_has_hugepages() == 0) {
plt_err("Huge page is not configured");
goto fail_configure;
@@ -525,10 +525,7 @@ int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
eth_dev->data->tx_queues[queue_idx] = (void *)txq;
- /* Sanity Checking
- *
- * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE
- */
+ /* nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE */
temp_nb_desc = nb_desc;
if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
@@ -649,10 +646,7 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
eth_dev->data->rx_queues[queue_idx] = (void *)rxq;
- /* Sanity Checking
- *
- * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
- */
+ /* nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE */
temp_nb_desc = nb_desc;
if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
@@ -137,7 +137,7 @@ static int adap_init0vf(struct adapter *adapter)
/*
* Grab our Virtual Interface resource allocation, extract the
- * features that we're interested in and do a bit of sanity testing on
+ * features that we're interested in and do a bit of testing on
* what we discover.
*/
err = t4vf_get_vfres(adapter);
@@ -148,7 +148,7 @@ static int adap_init0vf(struct adapter *adapter)
}
/*
- * Check for various parameter sanity issues.
+ * Check for various parameter issues.
*/
if (adapter->params.vfres.pmask == 0) {
dev_err(adapter->pdev_dev, "no port access configured\n"
@@ -1758,7 +1758,7 @@ mempool_element_size_valid(struct rte_mempool *mp)
/* account for up to 512B of alignment */
min_size -= FM10K_RX_DATABUF_ALIGN;
- /* sanity check for overflow */
+ /* check for overflow */
if (min_size > mp->elt_size)
return 0;
@@ -646,7 +646,7 @@ fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
if (q->nb_free < mb->nb_segs)
break;
- /* sanity check to make sure the mbuf is valid */
+ /* check to make sure the mbuf is valid */
if ((mb->nb_segs == 0) ||
((mb->nb_segs > 1) && (mb->next == NULL)))
break;
@@ -1130,7 +1130,7 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
return -ENOTSUP;
/*
- * Sanity check for x550.
+ * Check for x550.
* When adding a new filter with flow type set to IPv4,
* the flow director mask should be configed before,
* and the L4 protocol and ports are masked.
@@ -615,7 +615,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
rx_offloads = dev->data->dev_conf.rxmode.offloads;
tx_offloads = dev->data->dev_conf.txmode.offloads;
- /* sanity checks */
+ /* input validation */
if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
return -1;
@@ -4983,7 +4983,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
uint32_t rdrxctl;
uint32_t rfctl;
- /* Sanity check */
+ /* Offload check */
dev->dev_ops->dev_infos_get(dev, &dev_info);
if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO)
rsc_capable = true;
@@ -1216,7 +1216,7 @@ static struct rte_pci_driver mlx4_driver = {
/**
* Suffix RTE_EAL_PMD_PATH with "-glue".
*
- * This function performs a sanity check on RTE_EAL_PMD_PATH before
+ * This function performs a check on RTE_EAL_PMD_PATH before
* suffixing its last component.
*
* @param buf[out]
@@ -514,7 +514,7 @@ mlx4_flow_merge_tcp(struct rte_flow *flow,
}
/**
- * Perform basic sanity checks on a pattern item.
+ * Perform basic checks on a pattern item.
*
* @param[in] item
* Item specification.
@@ -730,7 +730,7 @@ mlx4_flow_prepare(struct mlx4_priv *priv,
goto exit_item_not_supported;
proc = next;
/*
- * Perform basic sanity checks only once, while handle is
+ * Perform basic checks only once, while handle is
* not allocated.
*/
if (flow == &temp) {
@@ -799,7 +799,7 @@ mlx4_flow_prepare(struct mlx4_priv *priv,
rss_key = mlx4_rss_hash_key_default;
rss_key_len = MLX4_RSS_HASH_KEY_SIZE;
}
- /* Sanity checks. */
+ /* Input checks. */
for (i = 0; i < rss->queue_num; ++i)
if (rss->queue[i] >=
ETH_DEV(priv)->data->nb_rx_queues)
@@ -9212,7 +9212,7 @@ flow_dv_prepare(struct rte_eth_dev *dev,
#ifdef RTE_LIBRTE_MLX5_DEBUG
/**
- * Sanity check for match mask and value. Similar to check_valid_spec() in
+ * Check for match mask and value. Similar to check_valid_spec() in
* kernel driver. If unmasked bit is present in value, it returns failure.
*
* @param match_mask
@@ -9536,11 +9536,10 @@ flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev)
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t mask = priv->sh->dv_regc0_mask;
- /* Mask is verified during device initialization. Sanity checking here. */
+ /* Mask is verified during device initialization. */
MLX5_ASSERT(mask != 0);
/*
* Availability of sufficient number of bits in REG_C_0 is verified on initialization.
- * Sanity checking here.
*/
MLX5_ASSERT(rte_popcount32(mask) >= rte_popcount32(priv->vport_meta_mask));
return mask;
@@ -9552,12 +9551,11 @@ flow_hw_tx_tag_regc_value(struct rte_eth_dev *dev)
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t tag;
- /* Mask is verified during device initialization. Sanity checking here. */
+ /* Mask is verified during device initialization. */
MLX5_ASSERT(priv->vport_meta_mask != 0);
tag = priv->vport_meta_tag >> (rte_bsf32(priv->vport_meta_mask));
/*
* Availability of sufficient number of bits in REG_C_0 is verified on initialization.
- * Sanity checking here.
*/
MLX5_ASSERT((tag & priv->sh->dv_regc0_mask) == tag);
return tag;
@@ -1442,7 +1442,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
desc *= 4;
obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
/*
- * rte_mempool_create_empty() has sanity check to refuse large cache
+ * rte_mempool_create_empty() has basic check to refuse large cache
* size compared to the number of elements.
* CALC_CACHE_FLUSHTHRESH() is defined in a C file, so using a
* constant number 2 instead.
@@ -21,7 +21,7 @@
RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
/*
- * Compile time sanity check for vectorized functions.
+ * Compile time checks for vectorized functions.
*/
#define S_ASSERT_RTE_MBUF(s) \
@@ -260,7 +260,7 @@ get_entry_values(const char *entry, uint8_t *tab,
if (nb_rng_tokens != 2)
return -3;
- /* Range and sanity checks. */
+ /* Range and input checks. */
if (get_val_securely(rng_tokens[0], &token_val) < 0)
return -4;
beg = (char)token_val;
@@ -2732,7 +2732,7 @@ nfp_flow_compile_item_proc(struct nfp_flower_representor *repr,
break;
}
- /* Perform basic sanity checks */
+ /* Perform basic checks */
ret = nfp_flow_item_check(item, proc);
if (ret != 0) {
PMD_DRV_LOG(ERR, "nfp flow item %d check failed", item->type);
@@ -527,7 +527,7 @@ nfp_net_flow_compile_items(const struct rte_flow_item items[],
break;
}
- /* Perform basic sanity checks */
+ /* Perform basic checks */
ret = nfp_net_flow_item_check(item, proc);
if (ret != 0) {
PMD_DRV_LOG(ERR, "NFP flow item %d check failed", item->type);
@@ -1383,7 +1383,7 @@ qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
cqe->len_list[0]);
/* Update total length and frags based on end TPA */
rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head;
- /* TODO: Add Sanity Checks */
+ /* TODO: Add more checks */
rx_mb->nb_segs = cqe->num_of_bds;
rx_mb->pkt_len = cqe->total_packet_len;
@@ -2211,7 +2211,7 @@ qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
static inline void
-qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf,
+qede_mpls_tunn_tx_basic_check(struct rte_mbuf *mbuf,
struct qede_tx_queue *txq)
{
if (((mbuf->outer_l2_len + mbuf->outer_l3_len) / 2) > 0xff)
@@ -2473,7 +2473,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
RTE_MBUF_F_TX_TUNNEL_MPLSINUDP) {
mplsoudp_flg = true;
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
- qede_mpls_tunn_tx_sanity_check(mbuf, txq);
+ qede_mpls_tunn_tx_basic_check(mbuf, txq);
#endif
/* Outer L4 offset in two byte words */
tunn_l4_hdr_start_offset =
@@ -34,7 +34,7 @@ struct ring_internal_args {
struct rte_ring * const *tx_queues;
const unsigned int nb_tx_queues;
const unsigned int numa_node;
- void *addr; /* self addr for sanity check */
+ void *addr; /* self addr for verification */
};
enum dev_action {
@@ -612,10 +612,7 @@ static int parse_kvlist(const char *key __rte_unused,
*action = '\0';
action++;
- /*
- * Need to do some sanity checking here
- */
-
+ /* Check the command argument is valid action */
if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0)
info->list[info->count].action = DEV_ATTACH;
else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0)
@@ -59,7 +59,7 @@ struct sfc_dp_rx_qcreate_info {
/**
* Maximum number of Rx descriptors completed in one Rx event.
- * Just for sanity checks if datapath would like to do.
+ * Checks if datapath would like to do.
*/
unsigned int batch_max;
@@ -595,7 +595,7 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
rx_offloads = dev->data->dev_conf.rxmode.offloads;
tx_offloads = dev->data->dev_conf.txmode.offloads;
- /* sanity checks */
+ /* offload checks */
if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
return -1;
@@ -4181,7 +4181,7 @@ txgbe_set_rsc(struct rte_eth_dev *dev)
uint32_t rdrxctl;
uint32_t rfctl;
- /* Sanity check */
+ /* Offload check */
dev->dev_ops->dev_infos_get(dev, &dev_info);
if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO)
rsc_capable = true;