net/mlx5: add support for transport mode ESP ptype
Checks
Commit Message
Implement matching on RTE_PTYPE_L4_ESP and RTE_PTYPE_INNER_L4_ESP
for IP Encapsulating Security Payload (ESP) in transport mode.
Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_definer.c | 33 +++++++++++++++++++++++++++
drivers/net/mlx5/hws/mlx5dr_definer.h | 2 ++
2 files changed, 35 insertions(+)
Comments
> -----Original Message-----
> From: Alexander Kozyrev <akozyrev@nvidia.com>
> Sent: Tuesday, October 22, 2024 20:07
> To: dev@dpdk.org
> Cc: Raslan Darawsheh <rasland@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>; Matan Azrad <matan@nvidia.com>; Hamdan
> Agbariya <hamdani@nvidia.com>; Alex Vesker <valex@nvidia.com>; Dariusz
> Sosnowski <dsosnowski@nvidia.com>; Ori Kam <orika@nvidia.com>; Bing Zhao
> <bingz@nvidia.com>; Suanming Mou <suanmingm@nvidia.com>;
> david.marchand@redhat.com; nithind1988@gmail.com
> Subject: [PATCH] net/mlx5: add support for transport mode ESP ptype
>
> Implement matching on RTE_PTYPE_L4_ESP and RTE_PTYPE_INNER_L4_ESP for IP
> Encapsulating Security Payload (ESP) in transport mode.
>
> Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Best regards,
Dariusz Sosnowski
> > Implement matching on RTE_PTYPE_L4_ESP and RTE_PTYPE_INNER_L4_ESP for IP
> > Encapsulating Security Payload (ESP) in transport mode.
> >
> > Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
>
> Acked-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Applied, thanks.
@@ -345,6 +345,27 @@ mlx5dr_definer_ptype_l4_set(struct mlx5dr_definer_fc *fc,
(inner ? RTE_PTYPE_INNER_L4_MASK : RTE_PTYPE_L4_MASK);
uint8_t l4_type = STE_NO_L4;
+ if (packet_type == (inner ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP))
+ l4_type = STE_TCP;
+ else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP))
+ l4_type = STE_UDP;
+ else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_ESP : RTE_PTYPE_L4_ESP))
+ l4_type = STE_ESP;
+
+ DR_SET(tag, l4_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_ptype_l4_ext_set(struct mlx5dr_definer_fc *fc,
+ const void *item_spec,
+ uint8_t *tag)
+{
+ bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L4_EXT_I);
+ const struct rte_flow_item_ptype *v = item_spec;
+ uint32_t packet_type = v->packet_type &
+ (inner ? RTE_PTYPE_INNER_L4_MASK : RTE_PTYPE_L4_MASK);
+ uint8_t l4_type = STE_NO_L4;
+
if (packet_type == (inner ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP))
l4_type = STE_TCP;
else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP))
@@ -1937,6 +1958,12 @@ mlx5dr_definer_conv_item_ptype(struct mlx5dr_definer_conv_data *cd,
fc->item_idx = item_idx;
fc->tag_set = &mlx5dr_definer_ptype_l4_set;
fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l4_type_bwc, false);
+
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4_EXT, false)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l4_ext_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
DR_CALC_SET(fc, eth_l2, l4_type, false);
}
}
@@ -1953,6 +1980,12 @@ mlx5dr_definer_conv_item_ptype(struct mlx5dr_definer_conv_data *cd,
fc->item_idx = item_idx;
fc->tag_set = &mlx5dr_definer_ptype_l4_set;
fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l4_type_bwc, true);
+
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4_EXT, true)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l4_ext_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
DR_CALC_SET(fc, eth_l2, l4_type, true);
}
}
@@ -171,6 +171,8 @@ enum mlx5dr_definer_fname {
MLX5DR_DEFINER_FNAME_PTYPE_L3_I,
MLX5DR_DEFINER_FNAME_PTYPE_L4_O,
MLX5DR_DEFINER_FNAME_PTYPE_L4_I,
+ MLX5DR_DEFINER_FNAME_PTYPE_L4_EXT_O,
+ MLX5DR_DEFINER_FNAME_PTYPE_L4_EXT_I,
MLX5DR_DEFINER_FNAME_PTYPE_TUNNEL,
MLX5DR_DEFINER_FNAME_PTYPE_FRAG_O,
MLX5DR_DEFINER_FNAME_PTYPE_FRAG_I,