[v3] ip_frag: add IPv4 fragment copy packet API
Checks
Commit Message
Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
for fast release of mbufs. When set application must guarantee that
per-queue all mbufs comes from the same mempool and has refcnt = 1)
offload. In order to adapt to this offload function, add this API.
Add some test data for this API.
Signed-off-by: Huichao Cai <chcchc88@163.com>
---
app/test/test_ipfrag.c | 8 +-
lib/ip_frag/rte_ip_frag.h | 27 +++++
lib/ip_frag/rte_ipv4_fragmentation.c | 208 +++++++++++++++++++++++++++++++++++
lib/ip_frag/version.map | 1 +
4 files changed, 243 insertions(+), 1 deletion(-)
Comments
> From: Huichao Cai [mailto:chcchc88@163.com]
> Sent: Friday, 22 July 2022 15.02
> To: dev@dpdk.org
> Cc: konstantin.v.ananyev@yandex.ru
> Subject: [PATCH v3] ip_frag: add IPv4 fragment copy packet API
>
> Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
> for fast release of mbufs. When set application must guarantee that
> per-queue all mbufs comes from the same mempool and has refcnt = 1)
> offload. In order to adapt to this offload function, add this API.
> Add some test data for this API.
>
> Signed-off-by: Huichao Cai <chcchc88@163.com>
> ---
> app/test/test_ipfrag.c | 8 +-
> lib/ip_frag/rte_ip_frag.h | 27 +++++
> lib/ip_frag/rte_ipv4_fragmentation.c | 208
> +++++++++++++++++++++++++++++++++++
> lib/ip_frag/version.map | 1 +
> 4 files changed, 243 insertions(+), 1 deletion(-)
>
> diff --git a/app/test/test_ipfrag.c b/app/test/test_ipfrag.c
> index ba0ffd0..bb7c4d3 100644
> --- a/app/test/test_ipfrag.c
> +++ b/app/test/test_ipfrag.c
> @@ -418,10 +418,16 @@ static void ut_teardown(void)
> }
>
> if (tests[i].ipv == 4)
> - len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
> + if (i % 2)
> + len = rte_ipv4_fragment_packet(b, pkts_out,
> BURST,
> tests[i].mtu_size,
> direct_pool,
> indirect_pool);
> + else
> + len = rte_ipv4_fragment_copy_packet(b,
> pkts_out,
> + BURST,
> + tests[i].mtu_size,
> + pkt_pool);
> else if (tests[i].ipv == 6)
> len = rte_ipv6_fragment_packet(b, pkts_out, BURST,
> tests[i].mtu_size,
> diff --git a/lib/ip_frag/rte_ip_frag.h b/lib/ip_frag/rte_ip_frag.h
> index 7d2abe1..c2a7e1e 100644
> --- a/lib/ip_frag/rte_ip_frag.h
> +++ b/lib/ip_frag/rte_ip_frag.h
> @@ -179,6 +179,33 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf
> *pkt_in,
> struct rte_mempool *pool_indirect);
>
> /**
> + * IPv4 fragmentation by copy.
> + *
> + * This function implements the fragmentation of IPv4 packets by copy.
> + *
> + * @param pkt_in
> + * The input packet.
> + * @param pkts_out
> + * Array storing the output fragments.
> + * @param mtu_size
> + * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing
> IPv4
> + * datagrams. This value includes the size of the IPv4 header.
> + * @param pool_direct
> + * MBUF pool used for allocating direct buffers for the output
> fragments.
> + * @return
> + * Upon successful completion - number of output fragments placed
> + * in the pkts_out array.
> + * Otherwise - (-1) * errno.
> + */
> +__rte_experimental
> +int32_t
> +rte_ipv4_fragment_copy_packet(struct rte_mbuf *pkt_in,
> + struct rte_mbuf **pkts_out,
> + uint16_t nb_pkts_out,
> + uint16_t mtu_size,
> + struct rte_mempool *pool_direct);
> +
> +/**
> * This function implements reassembly of fragmented IPv4 packets.
> * Incoming mbufs should have its l2_len/l3_len fields setup
> correctly.
> *
> diff --git a/lib/ip_frag/rte_ipv4_fragmentation.c
> b/lib/ip_frag/rte_ipv4_fragmentation.c
> index 27a8ad2..cb15781 100644
> --- a/lib/ip_frag/rte_ipv4_fragmentation.c
> +++ b/lib/ip_frag/rte_ipv4_fragmentation.c
> @@ -83,6 +83,48 @@ static inline uint16_t
> __create_ipopt_frag_hdr(uint8_t *iph,
> return ipopt_len;
> }
>
> +static struct rte_mbuf *
> +__copy_to_pktmbuf(char *src, struct rte_mbuf *m_head,
> + struct rte_mbuf *m_tail, struct rte_mempool *mp, uint32_t len)
> +{
> + struct rte_mbuf *m_last, **prev;
> +
> + m_last = m_tail;
> + prev = &m_last->next;
> + while (len > 0) {
> + uint32_t copy_len;
> +
> + /* current buffer is full, chain a new one */
> + if (unlikely(rte_pktmbuf_tailroom(m_last) == 0)) {
> + m_last = rte_pktmbuf_alloc(mp);
> + if (unlikely(m_last == NULL))
> + return NULL;
> +
> + ++m_head->nb_segs;
> + *prev = m_last;
> + prev = &m_last->next;
> + }
I think that MBUF_FAST_FREE also requires non-segmented packets, although this requirement is missing in the documentation. I have asked Olivier (as MBUF maintainer) to confirm this requirement [1].
[1] http://inbox.dpdk.org/dev/98CBD80474FA8B44BF855DF32C47DC35D871C5@smartserver.smartshare.dk/
> +
> + /*
> + * copy the min of data in len
> + * vs space available in output (m_last)
> + */
> + copy_len = RTE_MIN(rte_pktmbuf_tailroom(m_last), len);
> +
> + /* append from seg to m_last */
> + memcpy(rte_pktmbuf_mtod_offset(m_last, char *, m_last-
> >data_len),
> + src, copy_len);
> +
> + /* update offsets and lengths */
> + m_last->data_len += copy_len;
> + m_head->pkt_len += copy_len;
> + src += copy_len;
> + len -= copy_len;
> + }
> +
> + return m_last;
> +}
> +
> /**
> * IPv4 fragmentation.
> *
On Fri, 22 Jul 2022 21:01:50 +0800
Huichao Cai <chcchc88@163.com> wrote:
> Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
> for fast release of mbufs. When set application must guarantee that
> per-queue all mbufs comes from the same mempool and has refcnt = 1)
> offload. In order to adapt to this offload function, add this API.
> Add some test data for this API.
>
> Signed-off-by: Huichao Cai <chcchc88@163.com>
The code should just be checking that refcnt == 1 directly.
There are cases where sender passes a cloned mbuf. This is independent
of the fast free optimization.
Similar to what Linux kernel does with skb_cow().
> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
> Sent: Friday, 22 July 2022 16.49
>
> On Fri, 22 Jul 2022 21:01:50 +0800
> Huichao Cai <chcchc88@163.com> wrote:
>
> > Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
> > for fast release of mbufs. When set application must guarantee that
> > per-queue all mbufs comes from the same mempool and has refcnt = 1)
> > offload. In order to adapt to this offload function, add this API.
> > Add some test data for this API.
> >
> > Signed-off-by: Huichao Cai <chcchc88@163.com>
>
> The code should just be checking that refcnt == 1 directly.
>
> There are cases where sender passes a cloned mbuf. This is independent
> of the fast free optimization.
>
> Similar to what Linux kernel does with skb_cow().
Olivier just confirmed that MBUF_FAST_FREE requires that the mbufs are direct and non-segmented, although these requirements are not yet documented.
This means that you should not generate segmented mbufs with this patch. I don't know what to do instead; probably fail with an appropriate errno.
At 2022-07-22 23:52:28, "Morten Brørup" <mb@smartsharesystems.com> wrote:
>> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
>> Sent: Friday, 22 July 2022 16.49
>>
>> On Fri, 22 Jul 2022 21:01:50 +0800
>> Huichao Cai <chcchc88@163.com> wrote:
>>
>> > Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
>> > for fast release of mbufs. When set application must guarantee that
>> > per-queue all mbufs comes from the same mempool and has refcnt = 1)
>> > offload. In order to adapt to this offload function, add this API.
>> > Add some test data for this API.
>> >
>> > Signed-off-by: Huichao Cai <chcchc88@163.com>
>>
>> The code should just be checking that refcnt == 1 directly.
>>
>> There are cases where sender passes a cloned mbuf. This is independent
>> of the fast free optimization.
>>
>> Similar to what Linux kernel does with skb_cow().
>
>Olivier just confirmed that MBUF_FAST_FREE requires that the mbufs are direct and non-segmented, although these requirements are not yet documented.
>
>This means that you should not generate segmented mbufs with this patch. I don't know what to do instead; probably fail with an appropriate errno.
When the bnxt driver sends mbuf, it will take the mbuf segments apart and hang it to the tx_buf_ring, so there is no mbuf segments when it is released.Does this mean that there can be mbuf segments?
From: Huichao Cai [mailto:chcchc88@163.com]
Sent: Friday, 22 July 2022 17.59
> At 2022-07-22 23:52:28, "Morten Brørup" <mb@smartsharesystems.com> wrote:
> >> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
> >> Sent: Friday, 22 July 2022 16.49
> >>
> >> On Fri, 22 Jul 2022 21:01:50 +0800
> >> Huichao Cai <chcchc88@163.com> wrote:
> >>
> >> > Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
> >> > for fast release of mbufs. When set application must guarantee that
> >> > per-queue all mbufs comes from the same mempool and has refcnt = 1)
> >> > offload. In order to adapt to this offload function, add this API.
> >> > Add some test data for this API.
> >> >
> >> > Signed-off-by: Huichao Cai <chcchc88@163.com>
> >>
> >> The code should just be checking that refcnt == 1 directly.
> >>
> >> There are cases where sender passes a cloned mbuf. This is independent
> >> of the fast free optimization.
> >>
> >> Similar to what Linux kernel does with skb_cow().
> >
> >Olivier just confirmed that MBUF_FAST_FREE requires that the mbufs are direct and non-segmented, although these requirements are not yet documented.
> >
> >This means that you should not generate segmented mbufs with this patch. I don't know what to do instead; probably fail with an appropriate errno.
>
> When the bnxt driver sends mbuf, it will take the mbuf segments apart and hang it to the tx_buf_ring, so there is no mbuf segments when it is released. Does this mean that there can be mbuf segments?
Only if the bnxt driver also resets the segmentation fields (nb_segs and next) in those mbufs, which I suppose it does, if it supports MBUF_FAST_FREE with segmented packets.
However, other Ethernet drivers don't do that, so a generic library function cannot rely on it. These missing requirements for MBUF_FAST_FREE is a bug, either in the MBUF_FAST_FREE documentation, or in the drivers where MBUF_FAST_FREE only works correctly with direct and non-segmented mbufs.
22/07/2022 17:14, Morten Brørup пишет:
> From: Huichao Cai [mailto:chcchc88@163.com]
> Sent: Friday, 22 July 2022 17.59
>
>> At 2022-07-22 23:52:28, "Morten Brørup" <mb@smartsharesystems.com> wrote:
>>>> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
>>>> Sent: Friday, 22 July 2022 16.49
>>>>
>>>> On Fri, 22 Jul 2022 21:01:50 +0800
>>>> Huichao Cai <chcchc88@163.com> wrote:
>>>>
>>>>> Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
>>>>> for fast release of mbufs. When set application must guarantee that
>>>>> per-queue all mbufs comes from the same mempool and has refcnt = 1)
>>>>> offload. In order to adapt to this offload function, add this API.
>>>>> Add some test data for this API.
>>>>>
>>>>> Signed-off-by: Huichao Cai <chcchc88@163.com>
>>>>
>>>> The code should just be checking that refcnt == 1 directly.
>>>>
>>>> There are cases where sender passes a cloned mbuf. This is independent
>>>> of the fast free optimization.
>>>>
>>>> Similar to what Linux kernel does with skb_cow().
>>>
>>> Olivier just confirmed that MBUF_FAST_FREE requires that the mbufs are direct and non-segmented, although these requirements are not yet documented.
>>>
>>> This means that you should not generate segmented mbufs with this patch. I don't know what to do instead; probably fail with an appropriate errno.
>>
>> When the bnxt driver sends mbuf, it will take the mbuf segments apart and hang it to the tx_buf_ring, so there is no mbuf segments when it is released. Does this mean that there can be mbuf segments?
>
> Only if the bnxt driver also resets the segmentation fields (nb_segs and next) in those mbufs, which I suppose it does, if it supports MBUF_FAST_FREE with segmented packets.
>
> However, other Ethernet drivers don't do that, so a generic library function cannot rely on it. These missing requirements for MBUF_FAST_FREE is a bug, either in the MBUF_FAST_FREE documentation, or in the drivers where MBUF_FAST_FREE only works correctly with direct and non-segmented mbufs.
>
I believe multi-segment packets work ok with MBUF_FAST_FREE
(as long as other requirements are met).
+CC: i40e maintainers
+CC: mlx5 maintainers
> From: Konstantin Ananyev [mailto:konstantin.v.ananyev@yandex.ru]
> Sent: Saturday, 23 July 2022 00.35
>
> 22/07/2022 17:14, Morten Brørup пишет:
> > From: Huichao Cai [mailto:chcchc88@163.com]
> > Sent: Friday, 22 July 2022 17.59
> >
> >> At 2022-07-22 23:52:28, "Morten Brørup" <mb@smartsharesystems.com>
> wrote:
> >>>> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
> >>>> Sent: Friday, 22 July 2022 16.49
> >>>>
> >>>> On Fri, 22 Jul 2022 21:01:50 +0800
> >>>> Huichao Cai <chcchc88@163.com> wrote:
> >>>>
> >>>>> Some NIC drivers support MBUF_FAST_FREE(Device supports
> optimization
> >>>>> for fast release of mbufs. When set application must guarantee
> that
> >>>>> per-queue all mbufs comes from the same mempool and has refcnt =
> 1)
> >>>>> offload. In order to adapt to this offload function, add this
> API.
> >>>>> Add some test data for this API.
> >>>>>
> >>>>> Signed-off-by: Huichao Cai <chcchc88@163.com>
> >>>>
> >>>> The code should just be checking that refcnt == 1 directly.
> >>>>
> >>>> There are cases where sender passes a cloned mbuf. This is
> independent
> >>>> of the fast free optimization.
> >>>>
> >>>> Similar to what Linux kernel does with skb_cow().
> >>>
> >>> Olivier just confirmed that MBUF_FAST_FREE requires that the mbufs
> are direct and non-segmented, although these requirements are not yet
> documented.
> >>>
> >>> This means that you should not generate segmented mbufs with this
> patch. I don't know what to do instead; probably fail with an
> appropriate errno.
> >>
> >> When the bnxt driver sends mbuf, it will take the mbuf segments
> apart and hang it to the tx_buf_ring, so there is no mbuf segments when
> it is released. Does this mean that there can be mbuf segments?
> >
> > Only if the bnxt driver also resets the segmentation fields (nb_segs
> and next) in those mbufs, which I suppose it does, if it supports
> MBUF_FAST_FREE with segmented packets.
> >
> > However, other Ethernet drivers don't do that, so a generic library
> function cannot rely on it. These missing requirements for
> MBUF_FAST_FREE is a bug, either in the MBUF_FAST_FREE documentation, or
> in the drivers where MBUF_FAST_FREE only works correctly with direct
> and non-segmented mbufs.
> >
>
> I believe multi-segment packets work ok with MBUF_FAST_FREE
> (as long as other requirements are met).
Looking at the i40e and mlx5 drivers, they both seem to call rte_mempool_put_bulk() without first calling rte_pktmbuf_prefree_seg(). So segmented packets freed with MBUF_FAST_FREE, will be stored in the mbuf pool without m->nb_segs and m->next being reset first.
I don't have deep knowledge of these drivers, so maybe I have overlooked something.
The point of MBUF_FAST_FREE is to bypass a lot of code under certain conditions. So I believe that these two undocumented requirements should remain, so the drivers can bypass this code. Otherwise, don't use MBUF_FAST_FREE.
23/07/2022 09:24, Morten Brørup пишет:
> +CC: i40e maintainers
> +CC: mlx5 maintainers
>
>> From: Konstantin Ananyev [mailto:konstantin.v.ananyev@yandex.ru]
>> Sent: Saturday, 23 July 2022 00.35
>>
>> 22/07/2022 17:14, Morten Brørup пишет:
>>> From: Huichao Cai [mailto:chcchc88@163.com]
>>> Sent: Friday, 22 July 2022 17.59
>>>
>>>> At 2022-07-22 23:52:28, "Morten Brørup" <mb@smartsharesystems.com>
>> wrote:
>>>>>> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
>>>>>> Sent: Friday, 22 July 2022 16.49
>>>>>>
>>>>>> On Fri, 22 Jul 2022 21:01:50 +0800
>>>>>> Huichao Cai <chcchc88@163.com> wrote:
>>>>>>
>>>>>>> Some NIC drivers support MBUF_FAST_FREE(Device supports
>> optimization
>>>>>>> for fast release of mbufs. When set application must guarantee
>> that
>>>>>>> per-queue all mbufs comes from the same mempool and has refcnt =
>> 1)
>>>>>>> offload. In order to adapt to this offload function, add this
>> API.
>>>>>>> Add some test data for this API.
>>>>>>>
>>>>>>> Signed-off-by: Huichao Cai <chcchc88@163.com>
>>>>>>
>>>>>> The code should just be checking that refcnt == 1 directly.
>>>>>>
>>>>>> There are cases where sender passes a cloned mbuf. This is
>> independent
>>>>>> of the fast free optimization.
>>>>>>
>>>>>> Similar to what Linux kernel does with skb_cow().
>>>>>
>>>>> Olivier just confirmed that MBUF_FAST_FREE requires that the mbufs
>> are direct and non-segmented, although these requirements are not yet
>> documented.
>>>>>
>>>>> This means that you should not generate segmented mbufs with this
>> patch. I don't know what to do instead; probably fail with an
>> appropriate errno.
>>>>
>>>> When the bnxt driver sends mbuf, it will take the mbuf segments
>> apart and hang it to the tx_buf_ring, so there is no mbuf segments when
>> it is released. Does this mean that there can be mbuf segments?
>>>
>>> Only if the bnxt driver also resets the segmentation fields (nb_segs
>> and next) in those mbufs, which I suppose it does, if it supports
>> MBUF_FAST_FREE with segmented packets.
>>>
>>> However, other Ethernet drivers don't do that, so a generic library
>> function cannot rely on it. These missing requirements for
>> MBUF_FAST_FREE is a bug, either in the MBUF_FAST_FREE documentation, or
>> in the drivers where MBUF_FAST_FREE only works correctly with direct
>> and non-segmented mbufs.
>>>
>>
>> I believe multi-segment packets work ok with MBUF_FAST_FREE
>> (as long as other requirements are met).
>
> Looking at the i40e and mlx5 drivers, they both seem to call rte_mempool_put_bulk() without first calling rte_pktmbuf_prefree_seg(). So segmented packets freed with MBUF_FAST_FREE, will be stored in the mbuf pool without m->nb_segs and m->next being reset first.
>
> I don't have deep knowledge of these drivers, so maybe I have overlooked something.
>
> The point of MBUF_FAST_FREE is to bypass a lot of code under certain conditions. So I believe that these two undocumented requirements should remain, so the drivers can bypass this code. Otherwise, don't use MBUF_FAST_FREE.
>
Actually, after another look, I think you and Olivier are right -
multi-seg packets should not be used together with MBUF_FAST_FREE.
I forgot that mbuf_prefree() is responsible to reset both 'next'
and 'nb_segs' fields of the mbuf.
It might keep working for some simple forwarding app (like l3fwd),
as most PMDs reset these fields at RX path anyway, but that's just a
coincidence we shouldn't rely on.
We probably need to update l3fwd (and other examples) to dis-allow
MBUF_FAST_FREE when TX_OFFLOAD_MULTI_SEGS is selected.
Konstantin
> From: Konstantin Ananyev [mailto:konstantin.v.ananyev@yandex.ru]
> Sent: Saturday, 23 July 2022 20.25
>
> 23/07/2022 09:24, Morten Brørup пишет:
> > +CC: i40e maintainers
> > +CC: mlx5 maintainers
> >
> >> From: Konstantin Ananyev [mailto:konstantin.v.ananyev@yandex.ru]
> >> Sent: Saturday, 23 July 2022 00.35
> >>
> >> 22/07/2022 17:14, Morten Brørup пишет:
> >>> From: Huichao Cai [mailto:chcchc88@163.com]
> >>> Sent: Friday, 22 July 2022 17.59
> >>>
> >>>> At 2022-07-22 23:52:28, "Morten Brørup" <mb@smartsharesystems.com>
> >> wrote:
> >>>>>> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
> >>>>>> Sent: Friday, 22 July 2022 16.49
> >>>>>>
> >>>>>> On Fri, 22 Jul 2022 21:01:50 +0800
> >>>>>> Huichao Cai <chcchc88@163.com> wrote:
> >>>>>>
> >>>>>>> Some NIC drivers support MBUF_FAST_FREE(Device supports
> >> optimization
> >>>>>>> for fast release of mbufs. When set application must guarantee
> >> that
> >>>>>>> per-queue all mbufs comes from the same mempool and has refcnt
> =
> >> 1)
> >>>>>>> offload. In order to adapt to this offload function, add this
> >> API.
> >>>>>>> Add some test data for this API.
> >>>>>>>
> >>>>>>> Signed-off-by: Huichao Cai <chcchc88@163.com>
> >>>>>>
> >>>>>> The code should just be checking that refcnt == 1 directly.
> >>>>>>
> >>>>>> There are cases where sender passes a cloned mbuf. This is
> >> independent
> >>>>>> of the fast free optimization.
> >>>>>>
> >>>>>> Similar to what Linux kernel does with skb_cow().
> >>>>>
> >>>>> Olivier just confirmed that MBUF_FAST_FREE requires that the
> mbufs
> >> are direct and non-segmented, although these requirements are not
> yet
> >> documented.
> >>>>>
> >>>>> This means that you should not generate segmented mbufs with this
> >> patch. I don't know what to do instead; probably fail with an
> >> appropriate errno.
> >>>>
> >>>> When the bnxt driver sends mbuf, it will take the mbuf segments
> >> apart and hang it to the tx_buf_ring, so there is no mbuf segments
> when
> >> it is released. Does this mean that there can be mbuf segments?
> >>>
> >>> Only if the bnxt driver also resets the segmentation fields
> (nb_segs
> >> and next) in those mbufs, which I suppose it does, if it supports
> >> MBUF_FAST_FREE with segmented packets.
> >>>
> >>> However, other Ethernet drivers don't do that, so a generic library
> >> function cannot rely on it. These missing requirements for
> >> MBUF_FAST_FREE is a bug, either in the MBUF_FAST_FREE documentation,
> or
> >> in the drivers where MBUF_FAST_FREE only works correctly with direct
> >> and non-segmented mbufs.
> >>>
> >>
> >> I believe multi-segment packets work ok with MBUF_FAST_FREE
> >> (as long as other requirements are met).
> >
> > Looking at the i40e and mlx5 drivers, they both seem to call
> rte_mempool_put_bulk() without first calling rte_pktmbuf_prefree_seg().
> So segmented packets freed with MBUF_FAST_FREE, will be stored in the
> mbuf pool without m->nb_segs and m->next being reset first.
> >
> > I don't have deep knowledge of these drivers, so maybe I have
> overlooked something.
> >
> > The point of MBUF_FAST_FREE is to bypass a lot of code under certain
> conditions. So I believe that these two undocumented requirements
> should remain, so the drivers can bypass this code. Otherwise, don't
> use MBUF_FAST_FREE.
> >
>
> Actually, after another look, I think you and Olivier are right -
> multi-seg packets should not be used together with MBUF_FAST_FREE.
> I forgot that mbuf_prefree() is responsible to reset both 'next'
> and 'nb_segs' fields of the mbuf.
> It might keep working for some simple forwarding app (like l3fwd),
> as most PMDs reset these fields at RX path anyway, but that's just a
> coincidence we shouldn't rely on.
I hope the PMDs don't reset these fields in their RX path, unless they are creating multi-seg packets and therefore must. It might cause an extra cache miss per packet, if the PMD unnecessarily sets m->next, which is in the second cache line of the mbuf.
Or perhaps everyone has forgotten about this RX/TX split of the first/second cache line of the mbufs, because all tests are based on run-to-completion, where the second cache line will be written shortly afterwards anyway. :-(
> We probably need to update l3fwd (and other examples) to dis-allow
> MBUF_FAST_FREE when TX_OFFLOAD_MULTI_SEGS is selected.
+1
>
> Konstantin
@@ -418,10 +418,16 @@ static void ut_teardown(void)
}
if (tests[i].ipv == 4)
- len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
+ if (i % 2)
+ len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
tests[i].mtu_size,
direct_pool,
indirect_pool);
+ else
+ len = rte_ipv4_fragment_copy_packet(b, pkts_out,
+ BURST,
+ tests[i].mtu_size,
+ pkt_pool);
else if (tests[i].ipv == 6)
len = rte_ipv6_fragment_packet(b, pkts_out, BURST,
tests[i].mtu_size,
@@ -179,6 +179,33 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
struct rte_mempool *pool_indirect);
/**
+ * IPv4 fragmentation by copy.
+ *
+ * This function implements the fragmentation of IPv4 packets by copy.
+ *
+ * @param pkt_in
+ * The input packet.
+ * @param pkts_out
+ * Array storing the output fragments.
+ * @param mtu_size
+ * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
+ * datagrams. This value includes the size of the IPv4 header.
+ * @param pool_direct
+ * MBUF pool used for allocating direct buffers for the output fragments.
+ * @return
+ * Upon successful completion - number of output fragments placed
+ * in the pkts_out array.
+ * Otherwise - (-1) * errno.
+ */
+__rte_experimental
+int32_t
+rte_ipv4_fragment_copy_packet(struct rte_mbuf *pkt_in,
+ struct rte_mbuf **pkts_out,
+ uint16_t nb_pkts_out,
+ uint16_t mtu_size,
+ struct rte_mempool *pool_direct);
+
+/**
* This function implements reassembly of fragmented IPv4 packets.
* Incoming mbufs should have its l2_len/l3_len fields setup correctly.
*
@@ -83,6 +83,48 @@ static inline uint16_t __create_ipopt_frag_hdr(uint8_t *iph,
return ipopt_len;
}
+static struct rte_mbuf *
+__copy_to_pktmbuf(char *src, struct rte_mbuf *m_head,
+ struct rte_mbuf *m_tail, struct rte_mempool *mp, uint32_t len)
+{
+ struct rte_mbuf *m_last, **prev;
+
+ m_last = m_tail;
+ prev = &m_last->next;
+ while (len > 0) {
+ uint32_t copy_len;
+
+ /* current buffer is full, chain a new one */
+ if (unlikely(rte_pktmbuf_tailroom(m_last) == 0)) {
+ m_last = rte_pktmbuf_alloc(mp);
+ if (unlikely(m_last == NULL))
+ return NULL;
+
+ ++m_head->nb_segs;
+ *prev = m_last;
+ prev = &m_last->next;
+ }
+
+ /*
+ * copy the min of data in len
+ * vs space available in output (m_last)
+ */
+ copy_len = RTE_MIN(rte_pktmbuf_tailroom(m_last), len);
+
+ /* append from seg to m_last */
+ memcpy(rte_pktmbuf_mtod_offset(m_last, char *, m_last->data_len),
+ src, copy_len);
+
+ /* update offsets and lengths */
+ m_last->data_len += copy_len;
+ m_head->pkt_len += copy_len;
+ src += copy_len;
+ len -= copy_len;
+ }
+
+ return m_last;
+}
+
/**
* IPv4 fragmentation.
*
@@ -259,3 +301,169 @@ static inline uint16_t __create_ipopt_frag_hdr(uint8_t *iph,
return out_pkt_pos;
}
+
+/**
+ * IPv4 fragmentation by copy.
+ *
+ * This function implements the fragmentation of IPv4 packets by copy.
+ *
+ * @param pkt_in
+ * The input packet.
+ * @param pkts_out
+ * Array storing the output fragments.
+ * @param mtu_size
+ * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
+ * datagrams. This value includes the size of the IPv4 header.
+ * @param pool_direct
+ * MBUF pool used for allocating direct buffers for the output fragments.
+ * @return
+ * Upon successful completion - number of output fragments placed
+ * in the pkts_out array.
+ * Otherwise - (-1) * errno.
+ */
+int32_t
+rte_ipv4_fragment_copy_packet(struct rte_mbuf *pkt_in,
+ struct rte_mbuf **pkts_out,
+ uint16_t nb_pkts_out,
+ uint16_t mtu_size,
+ struct rte_mempool *pool_direct)
+{
+ struct rte_mbuf *in_seg = NULL;
+ struct rte_ipv4_hdr *in_hdr;
+ uint32_t out_pkt_pos, in_seg_data_pos;
+ uint32_t more_in_segs;
+ uint16_t fragment_offset, flag_offset, frag_size, header_len;
+ uint16_t frag_bytes_remaining;
+ uint8_t ipopt_frag_hdr[IPV4_HDR_MAX_LEN];
+ uint16_t ipopt_len;
+
+ /*
+ * Formal parameter checking.
+ */
+ if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
+ unlikely(nb_pkts_out == 0) ||
+ unlikely(pool_direct == NULL) ||
+ unlikely(pool_direct != pkt_in->pool) ||
+ unlikely(mtu_size < RTE_ETHER_MIN_MTU))
+ return -EINVAL;
+
+ in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
+ header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
+ RTE_IPV4_IHL_MULTIPLIER;
+
+ /* Check IP header length */
+ if (unlikely(pkt_in->data_len < header_len) ||
+ unlikely(mtu_size < header_len))
+ return -EINVAL;
+
+ /*
+ * Ensure the IP payload length of all fragments is aligned to a
+ * multiple of 8 bytes as per RFC791 section 2.3.
+ */
+ frag_size = RTE_ALIGN_FLOOR((mtu_size - header_len),
+ IPV4_HDR_FO_ALIGN);
+
+ flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
+
+ /* If Don't Fragment flag is set */
+ if (unlikely((flag_offset & IPV4_HDR_DF_MASK) != 0))
+ return -ENOTSUP;
+
+ /* Check that pkts_out is big enough to hold all fragments */
+ if (unlikely(frag_size * nb_pkts_out <
+ (uint16_t)(pkt_in->pkt_len - header_len)))
+ return -EINVAL;
+
+ in_seg = pkt_in;
+ in_seg_data_pos = header_len;
+ out_pkt_pos = 0;
+ fragment_offset = 0;
+
+ ipopt_len = header_len - sizeof(struct rte_ipv4_hdr);
+ if (unlikely(ipopt_len > RTE_IPV4_HDR_OPT_MAX_LEN))
+ return -EINVAL;
+
+ more_in_segs = 1;
+ while (likely(more_in_segs)) {
+ struct rte_mbuf *out_pkt = NULL, *tail_pkt = NULL;
+ uint32_t more_out_segs;
+ struct rte_ipv4_hdr *out_hdr;
+
+ /* Allocate direct buffer */
+ out_pkt = rte_pktmbuf_alloc(pool_direct);
+ if (unlikely(out_pkt == NULL)) {
+ __free_fragments(pkts_out, out_pkt_pos);
+ return -ENOMEM;
+ }
+
+ /* Reserve space for the IP header that will be built later */
+ out_pkt->data_len = header_len;
+ out_pkt->pkt_len = header_len;
+ frag_bytes_remaining = frag_size;
+ tail_pkt = out_pkt;
+
+ more_out_segs = 1;
+ while (likely(more_out_segs && more_in_segs)) {
+ uint32_t len;
+
+ len = frag_bytes_remaining;
+ if (len > (in_seg->data_len - in_seg_data_pos))
+ len = in_seg->data_len - in_seg_data_pos;
+
+ tail_pkt = __copy_to_pktmbuf(rte_pktmbuf_mtod_offset(
+ in_seg, char *, in_seg_data_pos),
+ out_pkt, tail_pkt, pool_direct, len);
+ if (unlikely(!tail_pkt)) {
+ rte_pktmbuf_free(out_pkt);
+ __free_fragments(pkts_out, out_pkt_pos);
+ return -ENOMEM;
+ }
+
+ in_seg_data_pos += len;
+ frag_bytes_remaining -= len;
+
+ /* Current output packet (i.e. fragment) done ? */
+ if (unlikely(frag_bytes_remaining == 0))
+ more_out_segs = 0;
+
+ /* Current input segment done ? */
+ if (unlikely(in_seg_data_pos == in_seg->data_len)) {
+ in_seg = in_seg->next;
+ in_seg_data_pos = 0;
+
+ if (unlikely(in_seg == NULL))
+ more_in_segs = 0;
+ }
+ }
+
+ /* Build the IP header */
+
+ out_hdr = rte_pktmbuf_mtod(out_pkt, struct rte_ipv4_hdr *);
+
+ __fill_ipv4hdr_frag(out_hdr, in_hdr, header_len,
+ (uint16_t)out_pkt->pkt_len,
+ flag_offset, fragment_offset, more_in_segs);
+
+ if (unlikely((fragment_offset == 0) && (ipopt_len) &&
+ ((flag_offset & RTE_IPV4_HDR_OFFSET_MASK) == 0))) {
+ ipopt_len = __create_ipopt_frag_hdr((uint8_t *)in_hdr,
+ ipopt_len, ipopt_frag_hdr);
+ fragment_offset = (uint16_t)(fragment_offset +
+ out_pkt->pkt_len - header_len);
+ out_pkt->l3_len = header_len;
+
+ header_len = sizeof(struct rte_ipv4_hdr) + ipopt_len;
+ in_hdr = (struct rte_ipv4_hdr *)ipopt_frag_hdr;
+ } else {
+ fragment_offset = (uint16_t)(fragment_offset +
+ out_pkt->pkt_len - header_len);
+ out_pkt->l3_len = header_len;
+ }
+
+ /* Write the fragment to the output list */
+ pkts_out[out_pkt_pos] = out_pkt;
+ out_pkt_pos++;
+ }
+
+ return out_pkt_pos;
+}
@@ -17,4 +17,5 @@ EXPERIMENTAL {
global:
rte_ip_frag_table_del_expired_entries;
+ rte_ipv4_fragment_copy_packet;
};