[v1] net/ice: calculate TCP header size for offload
Checks
Commit Message
The ice needs the exact TCP header size including options for TCP
checksum offload, but according to PKT_TX_TCP_CKSUM note, l4_len
is not required to be set, so it needs to calculate the TCP header
size if not set.
Fixes: 17c7d0f9d6a4 ("net/ice: support basic Rx/Tx")
Cc: stable@dpdk.org
Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
---
drivers/net/ice/ice_rxtx.c | 27 +++++++++++++++++++++++++++
1 file changed, 27 insertions(+)
Comments
> -----Original Message-----
> From: Wang, Haiyue <haiyue.wang@intel.com>
> Sent: Wednesday, July 29, 2020 3:51 PM
> To: dev@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>
> Cc: Wang, Yinan <yinan.wang@intel.com>; Xu, Ting <ting.xu@intel.com>;
> Wang, Haiyue <haiyue.wang@intel.com>; stable@dpdk.org
> Subject: [PATCH v1] net/ice: calculate TCP header size for offload
>
> The ice needs the exact TCP header size including options for TCP checksum
> offload, but according to PKT_TX_TCP_CKSUM note, l4_len is not required to be
> set, so it needs to calculate the TCP header size if not set.
>
> Fixes: 17c7d0f9d6a4 ("net/ice: support basic Rx/Tx")
> Cc: stable@dpdk.org
>
> Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
Applied to dpdk-next-net-intel.
Thanks
Qi
> -----Original Message-----
> From: Wang, Haiyue <haiyue.wang@intel.com>
> Sent: Wednesday, July 29, 2020 15:51
> To: dev@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>
> Cc: Wang, Yinan <yinan.wang@intel.com>; Xu, Ting <ting.xu@intel.com>;
> Wang, Haiyue <haiyue.wang@intel.com>; stable@dpdk.org
> Subject: [PATCH v1] net/ice: calculate TCP header size for offload
>
> The ice needs the exact TCP header size including options for TCP checksum
> offload, but according to PKT_TX_TCP_CKSUM note, l4_len is not required to
> be set, so it needs to calculate the TCP header size if not set.
>
> Fixes: 17c7d0f9d6a4 ("net/ice: support basic Rx/Tx")
> Cc: stable@dpdk.org
>
> Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
> ---
> drivers/net/ice/ice_rxtx.c | 27 +++++++++++++++++++++++++++
> 1 file changed, 27 insertions(+)
>
> diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index
> ddf6a93fb2..bcb67ec251 100644
> --- a/drivers/net/ice/ice_rxtx.c
> +++ b/drivers/net/ice/ice_rxtx.c
> @@ -2371,6 +2371,28 @@ ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
> return count;
> }
>
> +/* Calculate TCP header length for PKT_TX_TCP_CKSUM if not provided */
> +static inline uint16_t ice_calc_pkt_tcp_hdr(struct rte_mbuf *tx_pkt,
> +union ice_tx_offload tx_offload) {
> + uint16_t tcpoff = tx_offload.l2_len + tx_offload.l3_len;
> + const struct rte_tcp_hdr *tcp_hdr;
> + struct rte_tcp_hdr _tcp_hdr;
> +
> + if (tcpoff + sizeof(struct rte_tcp_hdr) < tx_pkt->data_len) {
> + tcp_hdr = rte_pktmbuf_mtod_offset(tx_pkt, struct
> rte_tcp_hdr *,
> + tcpoff);
> +
No need blank line?
> + return (tcp_hdr->data_off & 0xf0) >> 2;
> + }
> +
> + tcp_hdr = rte_pktmbuf_read(tx_pkt, tcpoff, sizeof(_tcp_hdr),
> &_tcp_hdr);
> + if (tcp_hdr)
> + return (tcp_hdr->data_off & 0xf0) >> 2;
> + else
> + return 0;
> +}
> +
> uint16_t
> ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t
> nb_pkts) { @@ -2468,6 +2490,11 @@ ice_xmit_pkts(void *tx_queue, struct
> rte_mbuf **tx_pkts, uint16_t nb_pkts)
>
> /* Enable checksum offloading */
> if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) {
> + if ((ol_flags & PKT_TX_L4_MASK) ==
> PKT_TX_TCP_CKSUM &&
> + !tx_offload.l4_len)
> + tx_offload.l4_len =
> + ice_calc_pkt_tcp_hdr(tx_pkt, tx_offload);
> +
> ice_txd_enable_checksum(ol_flags, &td_cmd,
> &td_offset, tx_offload);
> }
> --
> 2.28.0
Acked-by: Qiming Yang <qiming.yang@intel.com>
@@ -2371,6 +2371,28 @@ ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
return count;
}
+/* Calculate TCP header length for PKT_TX_TCP_CKSUM if not provided */
+static inline uint16_t
+ice_calc_pkt_tcp_hdr(struct rte_mbuf *tx_pkt, union ice_tx_offload tx_offload)
+{
+ uint16_t tcpoff = tx_offload.l2_len + tx_offload.l3_len;
+ const struct rte_tcp_hdr *tcp_hdr;
+ struct rte_tcp_hdr _tcp_hdr;
+
+ if (tcpoff + sizeof(struct rte_tcp_hdr) < tx_pkt->data_len) {
+ tcp_hdr = rte_pktmbuf_mtod_offset(tx_pkt, struct rte_tcp_hdr *,
+ tcpoff);
+
+ return (tcp_hdr->data_off & 0xf0) >> 2;
+ }
+
+ tcp_hdr = rte_pktmbuf_read(tx_pkt, tcpoff, sizeof(_tcp_hdr), &_tcp_hdr);
+ if (tcp_hdr)
+ return (tcp_hdr->data_off & 0xf0) >> 2;
+ else
+ return 0;
+}
+
uint16_t
ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
@@ -2468,6 +2490,11 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Enable checksum offloading */
if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) {
+ if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM &&
+ !tx_offload.l4_len)
+ tx_offload.l4_len =
+ ice_calc_pkt_tcp_hdr(tx_pkt, tx_offload);
+
ice_txd_enable_checksum(ol_flags, &td_cmd,
&td_offset, tx_offload);
}