[v2,21/71] net/txgbe: replace use of fixed size rte_memcpy

Message ID 20240301171707.95242-22-stephen@networkplumber.org (mailing list archive)
State Superseded
Delegated to: Thomas Monjalon
Headers
Series replace use of fixed size rte_mempcy |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Stephen Hemminger March 1, 2024, 5:15 p.m. UTC
  Automatically generated by devtools/cocci/rte_memcpy.cocci

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/txgbe/txgbe_ethdev.c |  9 ++---
 drivers/net/txgbe/txgbe_fdir.c   |  6 +--
 drivers/net/txgbe/txgbe_flow.c   | 65 ++++++++++++++------------------
 drivers/net/txgbe/txgbe_ipsec.c  |  8 ++--
 drivers/net/txgbe/txgbe_pf.c     |  4 +-
 drivers/net/txgbe/txgbe_tm.c     | 11 +++---
 6 files changed, 46 insertions(+), 57 deletions(-)
  

Patch

diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index b75e8898e2d4..1c42fd74b47d 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -4304,9 +4304,8 @@  txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
 				sizeof(struct txgbe_5tuple_filter), 0);
 		if (filter == NULL)
 			return -ENOMEM;
-		rte_memcpy(&filter->filter_info,
-				 &filter_5tuple,
-				 sizeof(struct txgbe_5tuple_filter_info));
+		memcpy(&filter->filter_info, &filter_5tuple,
+		       sizeof(struct txgbe_5tuple_filter_info));
 		filter->queue = ntuple_filter->queue;
 		ret = txgbe_add_5tuple_filter(dev, filter);
 		if (ret < 0) {
@@ -5109,9 +5108,7 @@  txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
 		if (!node)
 			return -ENOMEM;
 
-		rte_memcpy(&node->key,
-				 &key,
-				 sizeof(struct txgbe_l2_tn_key));
+		memcpy(&node->key, &key, sizeof(struct txgbe_l2_tn_key));
 		node->pool = l2_tunnel->pool;
 		ret = txgbe_insert_l2_tn_filter(l2_tn_info, node);
 		if (ret < 0) {
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
index a198b6781bad..00366ed87323 100644
--- a/drivers/net/txgbe/txgbe_fdir.c
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -42,7 +42,7 @@ 
 		else \
 			ipv6_addr[i] = 0; \
 	} \
-	rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
+	memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
 } while (0)
 
 /**
@@ -858,8 +858,8 @@  txgbe_fdir_filter_program(struct rte_eth_dev *dev,
 				   sizeof(struct txgbe_fdir_filter), 0);
 		if (!node)
 			return -ENOMEM;
-		rte_memcpy(&node->input, &rule->input,
-			   sizeof(struct txgbe_atr_input));
+		memcpy(&node->input, &rule->input,
+		       sizeof(struct txgbe_atr_input));
 		node->fdirflags = rule->fdirflags;
 		node->fdirhash = fdirhash;
 		node->queue = queue;
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 7ef52d0b0fcd..c76fc0eed0e0 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -1834,10 +1834,10 @@  txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
 		if (item->spec) {
 			rule->b_spec = TRUE;
 			ipv6_spec = item->spec;
-			rte_memcpy(rule->input.src_ip,
-				   ipv6_spec->hdr.src_addr, 16);
-			rte_memcpy(rule->input.dst_ip,
-				   ipv6_spec->hdr.dst_addr, 16);
+			memcpy(rule->input.src_ip, ipv6_spec->hdr.src_addr,
+			       16);
+			memcpy(rule->input.dst_ip, ipv6_spec->hdr.dst_addr,
+			       16);
 		}
 
 		/**
@@ -2756,9 +2756,9 @@  txgbe_flow_create(struct rte_eth_dev *dev,
 				PMD_DRV_LOG(ERR, "failed to allocate memory");
 				goto out;
 			}
-			rte_memcpy(&ntuple_filter_ptr->filter_info,
-				&ntuple_filter,
-				sizeof(struct rte_eth_ntuple_filter));
+			memcpy(&ntuple_filter_ptr->filter_info,
+			       &ntuple_filter,
+			       sizeof(struct rte_eth_ntuple_filter));
 			TAILQ_INSERT_TAIL(&filter_ntuple_list,
 				ntuple_filter_ptr, entries);
 			flow->rule = ntuple_filter_ptr;
@@ -2782,9 +2782,9 @@  txgbe_flow_create(struct rte_eth_dev *dev,
 				PMD_DRV_LOG(ERR, "failed to allocate memory");
 				goto out;
 			}
-			rte_memcpy(&ethertype_filter_ptr->filter_info,
-				&ethertype_filter,
-				sizeof(struct rte_eth_ethertype_filter));
+			memcpy(&ethertype_filter_ptr->filter_info,
+			       &ethertype_filter,
+			       sizeof(struct rte_eth_ethertype_filter));
 			TAILQ_INSERT_TAIL(&filter_ethertype_list,
 				ethertype_filter_ptr, entries);
 			flow->rule = ethertype_filter_ptr;
@@ -2806,9 +2806,8 @@  txgbe_flow_create(struct rte_eth_dev *dev,
 				PMD_DRV_LOG(ERR, "failed to allocate memory");
 				goto out;
 			}
-			rte_memcpy(&syn_filter_ptr->filter_info,
-				&syn_filter,
-				sizeof(struct rte_eth_syn_filter));
+			memcpy(&syn_filter_ptr->filter_info, &syn_filter,
+			       sizeof(struct rte_eth_syn_filter));
 			TAILQ_INSERT_TAIL(&filter_syn_list,
 				syn_filter_ptr,
 				entries);
@@ -2827,9 +2826,8 @@  txgbe_flow_create(struct rte_eth_dev *dev,
 		if (fdir_rule.b_mask) {
 			if (!fdir_info->mask_added) {
 				/* It's the first time the mask is set. */
-				rte_memcpy(&fdir_info->mask,
-					&fdir_rule.mask,
-					sizeof(struct txgbe_hw_fdir_mask));
+				memcpy(&fdir_info->mask, &fdir_rule.mask,
+				       sizeof(struct txgbe_hw_fdir_mask));
 				fdir_info->flex_bytes_offset =
 					fdir_rule.flex_bytes_offset;
 
@@ -2873,9 +2871,9 @@  txgbe_flow_create(struct rte_eth_dev *dev,
 						"failed to allocate memory");
 					goto out;
 				}
-				rte_memcpy(&fdir_rule_ptr->filter_info,
-					&fdir_rule,
-					sizeof(struct txgbe_fdir_rule));
+				memcpy(&fdir_rule_ptr->filter_info,
+				       &fdir_rule,
+				       sizeof(struct txgbe_fdir_rule));
 				TAILQ_INSERT_TAIL(&filter_fdir_list,
 					fdir_rule_ptr, entries);
 				flow->rule = fdir_rule_ptr;
@@ -2910,9 +2908,8 @@  txgbe_flow_create(struct rte_eth_dev *dev,
 				PMD_DRV_LOG(ERR, "failed to allocate memory");
 				goto out;
 			}
-			rte_memcpy(&l2_tn_filter_ptr->filter_info,
-				&l2_tn_filter,
-				sizeof(struct txgbe_l2_tunnel_conf));
+			memcpy(&l2_tn_filter_ptr->filter_info, &l2_tn_filter,
+			       sizeof(struct txgbe_l2_tunnel_conf));
 			TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
 				l2_tn_filter_ptr, entries);
 			flow->rule = l2_tn_filter_ptr;
@@ -3038,9 +3035,8 @@  txgbe_flow_destroy(struct rte_eth_dev *dev,
 	case RTE_ETH_FILTER_NTUPLE:
 		ntuple_filter_ptr = (struct txgbe_ntuple_filter_ele *)
 					pmd_flow->rule;
-		rte_memcpy(&ntuple_filter,
-			&ntuple_filter_ptr->filter_info,
-			sizeof(struct rte_eth_ntuple_filter));
+		memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info,
+		       sizeof(struct rte_eth_ntuple_filter));
 		ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
 		if (!ret) {
 			TAILQ_REMOVE(&filter_ntuple_list,
@@ -3051,9 +3047,8 @@  txgbe_flow_destroy(struct rte_eth_dev *dev,
 	case RTE_ETH_FILTER_ETHERTYPE:
 		ethertype_filter_ptr = (struct txgbe_ethertype_filter_ele *)
 					pmd_flow->rule;
-		rte_memcpy(&ethertype_filter,
-			&ethertype_filter_ptr->filter_info,
-			sizeof(struct rte_eth_ethertype_filter));
+		memcpy(&ethertype_filter, &ethertype_filter_ptr->filter_info,
+		       sizeof(struct rte_eth_ethertype_filter));
 		ret = txgbe_add_del_ethertype_filter(dev,
 				&ethertype_filter, FALSE);
 		if (!ret) {
@@ -3065,9 +3060,8 @@  txgbe_flow_destroy(struct rte_eth_dev *dev,
 	case RTE_ETH_FILTER_SYN:
 		syn_filter_ptr = (struct txgbe_eth_syn_filter_ele *)
 				pmd_flow->rule;
-		rte_memcpy(&syn_filter,
-			&syn_filter_ptr->filter_info,
-			sizeof(struct rte_eth_syn_filter));
+		memcpy(&syn_filter, &syn_filter_ptr->filter_info,
+		       sizeof(struct rte_eth_syn_filter));
 		ret = txgbe_syn_filter_set(dev, &syn_filter, FALSE);
 		if (!ret) {
 			TAILQ_REMOVE(&filter_syn_list,
@@ -3077,9 +3071,8 @@  txgbe_flow_destroy(struct rte_eth_dev *dev,
 		break;
 	case RTE_ETH_FILTER_FDIR:
 		fdir_rule_ptr = (struct txgbe_fdir_rule_ele *)pmd_flow->rule;
-		rte_memcpy(&fdir_rule,
-			&fdir_rule_ptr->filter_info,
-			sizeof(struct txgbe_fdir_rule));
+		memcpy(&fdir_rule, &fdir_rule_ptr->filter_info,
+		       sizeof(struct txgbe_fdir_rule));
 		ret = txgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
 		if (!ret) {
 			TAILQ_REMOVE(&filter_fdir_list,
@@ -3092,8 +3085,8 @@  txgbe_flow_destroy(struct rte_eth_dev *dev,
 	case RTE_ETH_FILTER_L2_TUNNEL:
 		l2_tn_filter_ptr = (struct txgbe_eth_l2_tunnel_conf_ele *)
 				pmd_flow->rule;
-		rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
-			sizeof(struct txgbe_l2_tunnel_conf));
+		memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
+		       sizeof(struct txgbe_l2_tunnel_conf));
 		ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
 		if (!ret) {
 			TAILQ_REMOVE(&filter_l2_tunnel_list,
diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c
index f9f8108fb894..000dd5ec6d39 100644
--- a/drivers/net/txgbe/txgbe_ipsec.c
+++ b/drivers/net/txgbe/txgbe_ipsec.c
@@ -658,10 +658,10 @@  txgbe_crypto_add_ingress_sa_from_flow(const void *sess,
 			const struct rte_flow_item_ipv6 *ipv6 = ip_spec;
 			ic_session->src_ip.type = IPv6;
 			ic_session->dst_ip.type = IPv6;
-			rte_memcpy(ic_session->src_ip.ipv6,
-				   ipv6->hdr.src_addr, 16);
-			rte_memcpy(ic_session->dst_ip.ipv6,
-				   ipv6->hdr.dst_addr, 16);
+			memcpy(ic_session->src_ip.ipv6, ipv6->hdr.src_addr,
+			       16);
+			memcpy(ic_session->dst_ip.ipv6, ipv6->hdr.dst_addr,
+			       16);
 		} else {
 			const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
 			ic_session->src_ip.type = IPv4;
diff --git a/drivers/net/txgbe/txgbe_pf.c b/drivers/net/txgbe/txgbe_pf.c
index 176f79005cde..a0f5a67d9c78 100644
--- a/drivers/net/txgbe/txgbe_pf.c
+++ b/drivers/net/txgbe/txgbe_pf.c
@@ -435,7 +435,7 @@  txgbe_vf_reset(struct rte_eth_dev *eth_dev, uint16_t vf, uint32_t *msgbuf)
 
 	/* reply to reset with ack and vf mac address */
 	msgbuf[0] = TXGBE_VF_RESET | TXGBE_VT_MSGTYPE_ACK;
-	rte_memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN);
+	memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN);
 	/*
 	 * Piggyback the multicast filter type so VF can compute the
 	 * correct vectors
@@ -457,7 +457,7 @@  txgbe_vf_set_mac_addr(struct rte_eth_dev *eth_dev,
 	struct rte_ether_addr *ea = (struct rte_ether_addr *)new_mac;
 
 	if (rte_is_valid_assigned_ether_addr(ea)) {
-		rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
+		memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
 		return hw->mac.set_rar(hw, rar_entry, new_mac, vf, true);
 	}
 	return -1;
diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c
index 3171be73d05d..7d77b01dfef2 100644
--- a/drivers/net/txgbe/txgbe_tm.c
+++ b/drivers/net/txgbe/txgbe_tm.c
@@ -280,8 +280,8 @@  txgbe_shaper_profile_add(struct rte_eth_dev *dev,
 	if (!shaper_profile)
 		return -ENOMEM;
 	shaper_profile->shaper_profile_id = shaper_profile_id;
-	rte_memcpy(&shaper_profile->profile, profile,
-			 sizeof(struct rte_tm_shaper_params));
+	memcpy(&shaper_profile->profile, profile,
+	       sizeof(struct rte_tm_shaper_params));
 	TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list,
 			  shaper_profile, node);
 
@@ -625,8 +625,8 @@  txgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 		tm_node->no = 0;
 		tm_node->parent = NULL;
 		tm_node->shaper_profile = shaper_profile;
-		rte_memcpy(&tm_node->params, params,
-				 sizeof(struct rte_tm_node_params));
+		memcpy(&tm_node->params, params,
+		       sizeof(struct rte_tm_node_params));
 		tm_conf->root = tm_node;
 
 		/* increase the reference counter of the shaper profile */
@@ -706,8 +706,7 @@  txgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
 	tm_node->reference_count = 0;
 	tm_node->parent = parent_node;
 	tm_node->shaper_profile = shaper_profile;
-	rte_memcpy(&tm_node->params, params,
-			 sizeof(struct rte_tm_node_params));
+	memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
 	if (parent_node_type == TXGBE_TM_NODE_TYPE_PORT) {
 		tm_node->no = parent_node->reference_count;
 		TAILQ_INSERT_TAIL(&tm_conf->tc_list,