netstack3_ip/
reassembly.rs

1// Copyright 2019 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Module for IP fragmented packet reassembly support.
6//!
7//! `reassembly` is a utility to support reassembly of fragmented IP packets.
8//! Fragmented packets are associated by a combination of the packets' source
9//! address, destination address and identification value. When a potentially
10//! fragmented packet is received, this utility will check to see if the packet
11//! is in fact fragmented or not. If it isn't fragmented, it will be returned as
12//! is without any modification. If it is fragmented, this utility will capture
13//! its body and store it in a cache while waiting for all the fragments for a
14//! packet to arrive. The header information from a fragment with offset set to
15//! 0 will also be kept to add to the final, reassembled packet. Once this
16//! utility has received all the fragments for a combination of source address,
17//! destination address and identification value, the implementer will need to
18//! allocate a buffer of sufficient size to reassemble the final packet into and
19//! pass it to this utility. This utility will then attempt to reassemble and
20//! parse the packet, which will be returned to the caller. The caller should
21//! then handle the returned packet as a normal IP packet. Note, there is a
22//! timer from receipt of the first fragment to reassembly of the final packet.
23//! See [`REASSEMBLY_TIMEOUT_SECONDS`].
24//!
25//! Note, this utility does not support reassembly of jumbogram packets.
26//! According to the IPv6 Jumbogram RFC (RFC 2675), the jumbogram payload option
27//! is relevant only for nodes that may be attached to links with a link MTU
28//! greater than 65575 bytes. Note, the maximum size of a non-jumbogram IPv6
29//! packet is also 65575 (as the payload length field for IP packets is 16 bits
30//! + 40 byte IPv6 header). If a link supports an MTU greater than the maximum
31//! size of a non-jumbogram packet, the packet should not be fragmented.
32
33use alloc::collections::hash_map::{Entry, HashMap};
34use alloc::collections::{BTreeSet, BinaryHeap};
35use alloc::vec::Vec;
36use core::cmp::Ordering;
37use core::fmt::Debug;
38use core::hash::Hash;
39use core::time::Duration;
40
41use assert_matches::assert_matches;
42use log::debug;
43use net_types::ip::{GenericOverIp, Ip, IpAddr, IpVersionMarker, Ipv4, Ipv6};
44use netstack3_base::{
45    CoreTimerContext, HandleableTimer, InstantBindingsTypes, IpExt, LocalTimerHeap,
46    TimerBindingsTypes, TimerContext,
47};
48use packet::BufferViewMut;
49use packet_formats::ip::{IpPacket, Ipv4Proto};
50use packet_formats::ipv4::{Ipv4Header, Ipv4Packet};
51use packet_formats::ipv6::ext_hdrs::Ipv6ExtensionHeaderData;
52use packet_formats::ipv6::Ipv6Packet;
53use zerocopy::{SplitByteSlice, SplitByteSliceMut};
54
55/// An IP extension trait supporting reassembly of fragments.
56pub trait ReassemblyIpExt: IpExt {
57    /// The maximum amount of time from receipt of the first fragment to
58    /// reassembly of a packet. Note, "first fragment" does not mean a fragment
59    /// with offset 0; it means the first fragment packet we receive with a new
60    /// combination of source address, destination address and fragment
61    /// identification value.
62    const REASSEMBLY_TIMEOUT: Duration;
63
64    /// An IP specific field that should be considered part of the
65    /// [`FragmentCacheKey`].
66    type FragmentCacheKeyPart: Copy + Clone + Debug + Hash + PartialEq + Eq;
67
68    /// Returns the IP specific portion of the [`FragmentCacheKey`] from the
69    /// packet.
70    fn ip_specific_key_part<B: SplitByteSlice>(
71        packet: &Self::Packet<B>,
72    ) -> Self::FragmentCacheKeyPart;
73}
74
75impl ReassemblyIpExt for Ipv4 {
76    /// This value is specified in RFC 729, section 3.1:
77    ///   The current recommendation for the initial timer setting is 15
78    ///   seconds.
79    const REASSEMBLY_TIMEOUT: Duration = Duration::from_secs(15);
80
81    /// IPv4 considers the inner protocol to be part of the fragmentation key.
82    /// From RFC 791, section 2.3:
83    ///   To assemble the fragments of an internet datagram, an internet
84    ///   protocol module (for example at a destination host) combines
85    ///   internet datagrams that all have the same value for the four fields:
86    ///   identification, source, destination, and protocol.
87    type FragmentCacheKeyPart = Ipv4Proto;
88
89    fn ip_specific_key_part<B: SplitByteSlice>(
90        packet: &Self::Packet<B>,
91    ) -> Self::FragmentCacheKeyPart {
92        IpPacket::proto(packet)
93    }
94}
95
96impl ReassemblyIpExt for Ipv6 {
97    /// This value is specified in RFC 8200, section 4.5:
98    ///   If insufficient fragments are received to complete reassembly
99    ///   of a packet within 60 seconds of the reception of the first-
100    ///   arriving fragment of that packet, reassembly of that packet
101    ///   must be abandoned and all the fragments that have been received
102    ///   for that packet must be discarded.
103    const REASSEMBLY_TIMEOUT: Duration = Duration::from_secs(60);
104
105    /// Unlike IPv4, IPv6 allows reassembling fragments that have different
106    /// inner protocols. From RFC 8200, section 4.5:
107    ///   The Next Header values in the Fragment headers of different
108    ///   fragments of the same original packet may differ.  Only the value
109    ///   from the Offset zero fragment packet is used for reassembly.
110    type FragmentCacheKeyPart = ();
111
112    fn ip_specific_key_part<B: SplitByteSlice>(
113        _packet: &Self::Packet<B>,
114    ) -> Self::FragmentCacheKeyPart {
115        ()
116    }
117}
118
119/// Number of bytes per fragment block for IPv4 and IPv6.
120///
121/// IPv4 outlines the fragment block size in RFC 791 section 3.1, under the
122/// fragment offset field's description: "The fragment offset is measured in
123/// units of 8 octets (64 bits)".
124///
125/// IPv6 outlines the fragment block size in RFC 8200 section 4.5, under the
126/// fragment offset field's description: "The offset, in 8-octet units, of the
127/// data following this header".
128const FRAGMENT_BLOCK_SIZE: u8 = 8;
129
130/// Maximum number of fragment blocks an IPv4 or IPv6 packet can have.
131///
132/// We use this value because both IPv4 fixed header's fragment offset field and
133/// IPv6 fragment extension header's fragment offset field are 13 bits wide.
134const MAX_FRAGMENT_BLOCKS: u16 = 8191;
135
136/// Maximum number of bytes of all currently cached fragments per IP protocol.
137///
138/// If the current cache size is less than this number, a new fragment can be
139/// cached (even if this will result in the total cache size exceeding this
140/// threshold). If the current cache size >= this number, the incoming fragment
141/// will be dropped.
142const MAX_FRAGMENT_CACHE_SIZE: usize = 4 * 1024 * 1024;
143
144/// The state context for the fragment cache.
145pub trait FragmentContext<I: Ip, BT: FragmentBindingsTypes> {
146    /// Returns a mutable reference to the fragment cache.
147    fn with_state_mut<O, F: FnOnce(&mut IpPacketFragmentCache<I, BT>) -> O>(&mut self, cb: F) -> O;
148}
149
150/// The bindings types for IP packet fragment reassembly.
151pub trait FragmentBindingsTypes: TimerBindingsTypes + InstantBindingsTypes {}
152impl<BT> FragmentBindingsTypes for BT where BT: TimerBindingsTypes + InstantBindingsTypes {}
153
154/// The bindings execution context for IP packet fragment reassembly.
155pub trait FragmentBindingsContext: TimerContext + FragmentBindingsTypes {}
156impl<BC> FragmentBindingsContext for BC where BC: TimerContext + FragmentBindingsTypes {}
157
158/// The timer ID for the fragment cache.
159#[derive(Hash, Eq, PartialEq, Default, Clone, Debug, GenericOverIp)]
160#[generic_over_ip(I, Ip)]
161pub struct FragmentTimerId<I: Ip>(IpVersionMarker<I>);
162
163/// An implementation of a fragment cache.
164pub trait FragmentHandler<I: ReassemblyIpExt, BC> {
165    /// Attempts to process a packet fragment.
166    ///
167    /// # Panics
168    ///
169    /// Panics if the packet has no fragment data.
170    fn process_fragment<B: SplitByteSlice>(
171        &mut self,
172        bindings_ctx: &mut BC,
173        packet: I::Packet<B>,
174    ) -> FragmentProcessingState<I, B>
175    where
176        I::Packet<B>: FragmentablePacket;
177
178    /// Attempts to reassemble a packet.
179    ///
180    /// Attempts to reassemble a packet associated with a given
181    /// `FragmentCacheKey`, `key`, and cancels the timer to reset reassembly
182    /// data. The caller is expected to allocate a buffer of sufficient size
183    /// (available from `process_fragment` when it returns a
184    /// `FragmentProcessingState::Ready` value) and provide it to
185    /// `reassemble_packet` as `buffer` where the packet will be reassembled
186    /// into.
187    ///
188    /// # Panics
189    ///
190    /// Panics if the provided `buffer` does not have enough capacity for the
191    /// reassembled packet. Also panics if a different `ctx` is passed to
192    /// `reassemble_packet` from the one passed to `process_fragment` when
193    /// processing a packet with a given `key` as `reassemble_packet` will fail
194    /// to cancel the reassembly timer.
195    fn reassemble_packet<B: SplitByteSliceMut, BV: BufferViewMut<B>>(
196        &mut self,
197        bindings_ctx: &mut BC,
198        key: &FragmentCacheKey<I>,
199        buffer: BV,
200    ) -> Result<(), FragmentReassemblyError>;
201}
202
203impl<I: IpExt + ReassemblyIpExt, BC: FragmentBindingsContext, CC: FragmentContext<I, BC>>
204    FragmentHandler<I, BC> for CC
205{
206    fn process_fragment<B: SplitByteSlice>(
207        &mut self,
208        bindings_ctx: &mut BC,
209        packet: I::Packet<B>,
210    ) -> FragmentProcessingState<I, B>
211    where
212        I::Packet<B>: FragmentablePacket,
213    {
214        self.with_state_mut(|cache| {
215            let (res, timer_action) = cache.process_fragment(packet);
216
217            if let Some(timer_action) = timer_action {
218                match timer_action {
219                    // TODO(https://fxbug.dev/414413500): for IPv4, use the
220                    // fragment's TTL to determine the timeout.
221                    CacheTimerAction::CreateNewTimer(key) => {
222                        assert_eq!(
223                            cache.timers.schedule_after(
224                                bindings_ctx,
225                                key,
226                                (),
227                                I::REASSEMBLY_TIMEOUT,
228                            ),
229                            None
230                        )
231                    }
232                    CacheTimerAction::CancelExistingTimer(key) => {
233                        assert_ne!(cache.timers.cancel(bindings_ctx, &key), None)
234                    }
235                }
236            }
237
238            res
239        })
240    }
241
242    fn reassemble_packet<B: SplitByteSliceMut, BV: BufferViewMut<B>>(
243        &mut self,
244        bindings_ctx: &mut BC,
245        key: &FragmentCacheKey<I>,
246        buffer: BV,
247    ) -> Result<(), FragmentReassemblyError> {
248        self.with_state_mut(|cache| {
249            let res = cache.reassemble_packet(key, buffer);
250
251            match res {
252                Ok(_) | Err(FragmentReassemblyError::PacketParsingError) => {
253                    // Cancel the reassembly timer as we attempt reassembly which
254                    // means we had all the fragments for the final packet, even
255                    // if parsing the reassembled packet failed.
256                    assert_matches!(cache.timers.cancel(bindings_ctx, key), Some(_));
257                }
258                Err(FragmentReassemblyError::InvalidKey)
259                | Err(FragmentReassemblyError::MissingFragments) => {}
260            }
261
262            res
263        })
264    }
265}
266
267impl<I: ReassemblyIpExt, BC: FragmentBindingsContext, CC: FragmentContext<I, BC>>
268    HandleableTimer<CC, BC> for FragmentTimerId<I>
269{
270    fn handle(self, core_ctx: &mut CC, bindings_ctx: &mut BC, _: BC::UniqueTimerId) {
271        let Self(IpVersionMarker { .. }) = self;
272        core_ctx.with_state_mut(|cache| {
273            let Some((key, ())) = cache.timers.pop(bindings_ctx) else {
274                return;
275            };
276
277            // If a timer fired, the `key` must still exist in our fragment cache.
278            let FragmentCacheData { missing_blocks: _, body_fragments, header: _, total_size } =
279                assert_matches!(cache.remove_data(&key), Some(c) => c);
280            debug!(
281                "reassembly for {key:?} \
282                timed out with {} fragments and {total_size} bytes",
283                body_fragments.len(),
284            );
285        });
286    }
287}
288
289/// Trait that must be implemented by any packet type that is fragmentable.
290pub trait FragmentablePacket {
291    /// Return fragment identifier data.
292    ///
293    /// Returns the fragment identification, offset and more flag as `(a, b, c)`
294    /// where `a` is the fragment identification value, `b` is the fragment
295    /// offset and `c` is the more flag.
296    ///
297    /// # Panics
298    ///
299    /// Panics if the packet has no fragment data.
300    fn fragment_data(&self) -> (u32, u16, bool);
301}
302
303impl<B: SplitByteSlice> FragmentablePacket for Ipv4Packet<B> {
304    fn fragment_data(&self) -> (u32, u16, bool) {
305        (u32::from(self.id()), self.fragment_offset().into_raw(), self.mf_flag())
306    }
307}
308
309impl<B: SplitByteSlice> FragmentablePacket for Ipv6Packet<B> {
310    fn fragment_data(&self) -> (u32, u16, bool) {
311        for ext_hdr in self.iter_extension_hdrs() {
312            if let Ipv6ExtensionHeaderData::Fragment { fragment_data } = ext_hdr.data() {
313                return (
314                    fragment_data.identification(),
315                    fragment_data.fragment_offset().into_raw(),
316                    fragment_data.m_flag(),
317                );
318            }
319        }
320
321        unreachable!(
322            "Should never call this function if the packet does not have a fragment header"
323        );
324    }
325}
326
327/// Possible return values for [`IpPacketFragmentCache::process_fragment`].
328#[derive(Debug)]
329pub enum FragmentProcessingState<I: ReassemblyIpExt, B: SplitByteSlice> {
330    /// The provided packet is not fragmented so no processing is required.
331    /// The packet is returned with this value without any modification.
332    NotNeeded(I::Packet<B>),
333
334    /// The provided packet is fragmented but it is malformed.
335    ///
336    /// Possible reasons for being malformed are:
337    ///  1) Body is not a multiple of `FRAGMENT_BLOCK_SIZE` and  it is not the
338    ///     last fragment (last fragment of a packet, not last fragment received
339    ///     for a packet).
340    ///  2) Overlaps with an existing fragment. This is explicitly not allowed
341    ///     for IPv6 as per RFC 8200 section 4.5 (more details in RFC 5722). We
342    ///     choose the same behaviour for IPv4 for the same reasons.
343    ///  3) Packet's fragment offset + # of fragment blocks >
344    ///     `MAX_FRAGMENT_BLOCKS`.
345    InvalidFragment,
346
347    /// Successfully processed the provided fragment. We are still waiting on
348    /// more fragments for a packet to arrive before being ready to reassemble
349    /// the packet.
350    NeedMoreFragments,
351
352    /// Cannot process the fragment because `MAX_FRAGMENT_CACHE_SIZE` is
353    /// reached.
354    OutOfMemory,
355
356    /// Successfully processed the provided fragment. We now have all the
357    /// fragments we need to reassemble the packet. The caller must create a
358    /// buffer with capacity for at least `packet_len` bytes and provide the
359    /// buffer and `key` to `reassemble_packet`.
360    Ready { key: FragmentCacheKey<I>, packet_len: usize },
361}
362
363/// Possible errors when attempting to reassemble a packet.
364#[derive(Debug, PartialEq, Eq)]
365pub enum FragmentReassemblyError {
366    /// At least one fragment for a packet has not arrived.
367    MissingFragments,
368
369    /// A `FragmentCacheKey` is not associated with any packet. This could be
370    /// because either no fragment has yet arrived for a packet associated with
371    /// a `FragmentCacheKey` or some fragments did arrive, but the reassembly
372    /// timer expired and got discarded.
373    InvalidKey,
374
375    /// Packet parsing error.
376    PacketParsingError,
377}
378
379/// Fragment Cache Key.
380///
381/// Composed of the original packet's source address, destination address,
382/// and fragment id.
383#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
384pub struct FragmentCacheKey<I: ReassemblyIpExt> {
385    src_ip: I::Addr,
386    dst_ip: I::Addr,
387    fragment_id: u32,
388    ip_specific_fields: I::FragmentCacheKeyPart,
389}
390
391/// An inclusive-inclusive range of bytes within a reassembled packet.
392// NOTE: We use this instead of `std::ops::RangeInclusive` because the latter
393// provides getter methods which return references, and it adds a lot of
394// unnecessary dereferences.
395#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord)]
396struct BlockRange {
397    start: u16,
398    end: u16,
399}
400
401/// Data required for fragmented packet reassembly.
402#[derive(Debug)]
403struct FragmentCacheData {
404    /// List of non-overlapping inclusive ranges of fragment blocks required
405    /// before being ready to reassemble a packet.
406    ///
407    /// When creating a new instance of `FragmentCacheData`, we will set
408    /// `missing_blocks` to a list with a single element representing all
409    /// blocks, (0, MAX_VALUE). In this case, MAX_VALUE will be set to
410    /// `core::u16::MAX`.
411    missing_blocks: BTreeSet<BlockRange>,
412
413    /// Received fragment blocks.
414    ///
415    /// We use a binary heap for help when reassembling packets. When we
416    /// reassemble packets, we will want to fill up a new buffer with all the
417    /// body fragments. The easiest way to do this is in order, from the
418    /// fragment with offset 0 to the fragment with the highest offset. Since we
419    /// only need to enforce the order when reassembling, we use a min-heap so
420    /// we have a defined order (increasing fragment offset values) when
421    /// popping. `BinaryHeap` is technically a max-heap, but we use the negative
422    /// of the offset values as the key for the heap. See
423    /// [`PacketBodyFragment::new`].
424    body_fragments: BinaryHeap<PacketBodyFragment>,
425
426    /// The header data for the reassembled packet.
427    ///
428    /// The header of the fragment packet with offset 0 will be used as the
429    /// header for the final, reassembled packet.
430    header: Option<Vec<u8>>,
431
432    /// Total number of bytes in the reassembled packet.
433    ///
434    /// This is used so that we don't have to iterated through `body_fragments`
435    /// and sum the partial body sizes to calculate the reassembled packet's
436    /// size.
437    total_size: usize,
438}
439
440impl Default for FragmentCacheData {
441    fn default() -> FragmentCacheData {
442        FragmentCacheData {
443            missing_blocks: core::iter::once(BlockRange { start: 0, end: u16::MAX }).collect(),
444            body_fragments: BinaryHeap::new(),
445            header: None,
446            total_size: 0,
447        }
448    }
449}
450
451impl FragmentCacheData {
452    /// Attempts to find a gap where the provided `BlockRange` will fit in.
453    fn find_gap(&self, BlockRange { start, end }: BlockRange) -> FindGapResult {
454        let result = self.missing_blocks.iter().find_map(|gap| {
455            // This gap completely contains the provided range.
456            if gap.start <= start && gap.end >= end {
457                return Some(FindGapResult::Found { gap: *gap });
458            }
459
460            // This gap is completely disjoint from the provided range.
461            // Ignore it.
462            if gap.start > end || gap.end < start {
463                return None;
464            }
465
466            // If neither of the above are true, this gap must overlap with
467            // the provided range.
468            return Some(FindGapResult::Overlap);
469        });
470
471        match result {
472            Some(result) => result,
473            None => {
474                // Searching the missing blocks didn't find a suitable gap nor
475                // an overlap. Check for an out-of-bounds range before
476                // concluding that this range must be a duplicate.
477
478                // Note: `last` *must* exist and *must* represent the final
479                // fragment. If we had not yet received the final fragment, the
480                // search through the `missing_blocks` would be guaranteed to
481                // return `Some` (because it would contain a range with an end
482                // equal to u16::Max).
483                let last = self.body_fragments.peek().unwrap();
484                if last.offset < start {
485                    FindGapResult::OutOfBounds
486                } else {
487                    FindGapResult::Duplicate
488                }
489            }
490        }
491    }
492}
493
494/// The result of calling [`FragmentCacheData::find_gap`].
495enum FindGapResult {
496    // The provided `BlockRange` fits inside of an existing gap. The gap may be
497    // completely or partially filled by the provided `BlockRange`.
498    Found {
499        gap: BlockRange,
500    },
501    // The provided `BlockRange` overlaps with data we've already received.
502    // Specifically, an overlap occurs if the provided `BlockRange` is partially
503    // contained within a gap.
504    Overlap,
505    /// The provided `BlockRange` has an end beyond the known end of the packet.
506    OutOfBounds,
507    // The provided `BlockRange` has already been received. Specifically, a
508    // duplicate occurs if the provided `BlockRange` is completely disjoint from
509    // all known gaps.
510    //
511    // RFC 8200, Section 4.5 states:
512    //   It should be noted that fragments may be duplicated in the
513    //   network.  Instead of treating these exact duplicate fragments
514    //   as overlapping fragments, an implementation may choose to
515    //   detect this case and drop exact duplicate fragments while
516    //   keeping the other fragments belonging to the same packet.
517    //
518    // Here we take a loose interpretation of "exact" and choose not to verify
519    // that the *data* contained within the fragment matches the previously
520    // received data. This is in the spirit of reducing the work performed by
521    // the assembler, and is in line with the behavior of other platforms.
522    Duplicate,
523}
524
525/// A cache of inbound IP packet fragments.
526#[derive(Debug)]
527pub struct IpPacketFragmentCache<I: ReassemblyIpExt, BT: FragmentBindingsTypes> {
528    cache: HashMap<FragmentCacheKey<I>, FragmentCacheData>,
529    size: usize,
530    threshold: usize,
531    timers: LocalTimerHeap<FragmentCacheKey<I>, (), BT>,
532}
533
534impl<I: ReassemblyIpExt, BC: FragmentBindingsContext> IpPacketFragmentCache<I, BC> {
535    /// Creates a new `IpFragmentCache`.
536    pub fn new<CC: CoreTimerContext<FragmentTimerId<I>, BC>>(
537        bindings_ctx: &mut BC,
538    ) -> IpPacketFragmentCache<I, BC> {
539        IpPacketFragmentCache {
540            cache: HashMap::new(),
541            size: 0,
542            threshold: MAX_FRAGMENT_CACHE_SIZE,
543            timers: LocalTimerHeap::new(bindings_ctx, CC::convert_timer(Default::default())),
544        }
545    }
546}
547
548enum CacheTimerAction<I: ReassemblyIpExt> {
549    CreateNewTimer(FragmentCacheKey<I>),
550    CancelExistingTimer(FragmentCacheKey<I>),
551}
552
553impl<I: ReassemblyIpExt, BT: FragmentBindingsTypes> IpPacketFragmentCache<I, BT> {
554    /// Attempts to process a packet fragment.
555    ///
556    /// # Panics
557    ///
558    /// Panics if the packet has no fragment data.
559    fn process_fragment<B: SplitByteSlice>(
560        &mut self,
561        packet: I::Packet<B>,
562    ) -> (FragmentProcessingState<I, B>, Option<CacheTimerAction<I>>)
563    where
564        I::Packet<B>: FragmentablePacket,
565    {
566        if self.above_size_threshold() {
567            return (FragmentProcessingState::OutOfMemory, None);
568        }
569
570        // Get the fragment data.
571        let (id, offset, m_flag) = packet.fragment_data();
572
573        // Check if `packet` is actually fragmented. We know it is not
574        // fragmented if the fragment offset is 0 (contains first fragment) and
575        // we have no more fragments. This means the first fragment is the only
576        // fragment, implying we have a full packet.
577        if offset == 0 && !m_flag {
578            return (FragmentProcessingState::NotNeeded(packet), None);
579        }
580
581        // Make sure packet's body isn't empty. Since at this point we know that
582        // the packet is definitely fragmented (`offset` is not 0 or `m_flag` is
583        // `true`), we simply let the caller know we need more fragments. This
584        // should never happen, but just in case :).
585        if packet.body().is_empty() {
586            return (FragmentProcessingState::NeedMoreFragments, None);
587        }
588
589        // Make sure body is a multiple of `FRAGMENT_BLOCK_SIZE` bytes, or
590        // `packet` contains the last fragment block which is allowed to be less
591        // than `FRAGMENT_BLOCK_SIZE` bytes.
592        if m_flag && (packet.body().len() % (FRAGMENT_BLOCK_SIZE as usize) != 0) {
593            return (FragmentProcessingState::InvalidFragment, None);
594        }
595
596        // Key used to find this connection's fragment cache data.
597        let key = FragmentCacheKey {
598            src_ip: packet.src_ip(),
599            dst_ip: packet.dst_ip(),
600            fragment_id: id,
601            ip_specific_fields: I::ip_specific_key_part(&packet),
602        };
603
604        // The number of fragment blocks `packet` contains.
605        //
606        // Note, we are calculating the ceiling of an integer division.
607        // Essentially:
608        //     ceil(packet.body.len() / FRAGMENT_BLOCK_SIZE)
609        //
610        // We need to calculate the ceiling of the division because the final
611        // fragment block for a reassembled packet is allowed to contain less
612        // than `FRAGMENT_BLOCK_SIZE` bytes.
613        //
614        // We know `packet.body().len() - 1` will never be less than 0 because
615        // we already made sure that `packet`'s body is not empty, and it is
616        // impossible to have a negative body size.
617        let num_fragment_blocks = 1 + ((packet.body().len() - 1) / (FRAGMENT_BLOCK_SIZE as usize));
618        assert!(num_fragment_blocks > 0);
619
620        // The range of fragment blocks `packet` contains.
621        //
622        // The maximum number of fragment blocks a reassembled packet is allowed
623        // to contain is `MAX_FRAGMENT_BLOCKS` so we make sure that the fragment
624        // we received does not violate this.
625        let fragment_blocks_range =
626            if let Ok(offset_end) = u16::try_from((offset as usize) + num_fragment_blocks - 1) {
627                if offset_end <= MAX_FRAGMENT_BLOCKS {
628                    BlockRange { start: offset, end: offset_end }
629                } else {
630                    return (FragmentProcessingState::InvalidFragment, None);
631                }
632            } else {
633                return (FragmentProcessingState::InvalidFragment, None);
634            };
635
636        // Get (or create) the fragment cache data.
637        let (fragment_data, timer_not_yet_scheduled) = self.get_or_create(key);
638
639        // Find the gap where `packet` belongs.
640        let found_gap = match fragment_data.find_gap(fragment_blocks_range) {
641            FindGapResult::Overlap | FindGapResult::OutOfBounds => {
642                // Drop all reassembly data as per RFC 8200 section 4.5 (IPv6).
643                // See RFC 5722 for more information.
644                //
645                // IPv4 (RFC 791) does not specify what to do for overlapped
646                // fragments. RFC 1858 section 4.2 outlines a way to prevent an
647                // overlapping fragment attack for IPv4, but this is primarily
648                // for IP filtering since "no standard requires that an
649                // overlap-safe reassemble algorithm be used" on hosts. In
650                // practice, non-malicious nodes should not intentionally send
651                // data for the same fragment block multiple times, so we will
652                // do the same thing as IPv6 in this case.
653                assert_matches!(self.remove_data(&key), Some(_));
654
655                return (
656                    FragmentProcessingState::InvalidFragment,
657                    (!timer_not_yet_scheduled)
658                        .then_some(CacheTimerAction::CancelExistingTimer(key)),
659                );
660            }
661            FindGapResult::Duplicate => {
662                // Ignore duplicate fragments as per RFC 8200 section 4.5
663                // (IPv6):
664                //   It should be noted that fragments may be duplicated in the
665                //   network.  Instead of treating these exact duplicate fragments
666                //   as overlapping fragments, an implementation may choose to
667                //   detect this case and drop exact duplicate fragments while
668                //   keeping the other fragments belonging to the same packet.
669                //
670                // Ipv4 (RFC 791) does not specify what to do for duplicate
671                // fragments. As such we choose to do the same as IPv6 in this
672                // case.
673                return (FragmentProcessingState::NeedMoreFragments, None);
674            }
675            FindGapResult::Found { gap } => gap,
676        };
677
678        let timer_id = timer_not_yet_scheduled.then_some(CacheTimerAction::CreateNewTimer(key));
679
680        // Remove `found_gap` since the gap as it exists will no longer be
681        // valid.
682        assert!(fragment_data.missing_blocks.remove(&found_gap));
683
684        // If the received fragment blocks start after the beginning of
685        // `found_gap`, create a new gap between the beginning of `found_gap`
686        // and the first fragment block contained in `packet`.
687        //
688        // Example:
689        //   `packet` w/ fragments [4, 7]
690        //                 |-----|-----|-----|-----|
691        //                    4     5     6     7
692        //
693        //   `found_gap` w/ fragments [X, 7] where 0 <= X < 4
694        //     |-----| ... |-----|-----|-----|-----|
695        //        X    ...    4     5     6     7
696        //
697        //   Here we can see that with a `found_gap` of [2, 7], `packet` covers
698        //   [4, 7] but we are still missing [X, 3] so we create a new gap of
699        //   [X, 3].
700        if found_gap.start < fragment_blocks_range.start {
701            assert!(fragment_data.missing_blocks.insert(BlockRange {
702                start: found_gap.start,
703                end: fragment_blocks_range.start - 1
704            }));
705        }
706
707        // If the received fragment blocks end before the end of `found_gap` and
708        // we expect more fragments, create a new gap between the last fragment
709        // block contained in `packet` and the end of `found_gap`.
710        //
711        // Example 1:
712        //   `packet` w/ fragments [4, 7] & m_flag = true
713        //     |-----|-----|-----|-----|
714        //        4     5     6     7
715        //
716        //   `found_gap` w/ fragments [4, Y] where 7 < Y <= `MAX_FRAGMENT_BLOCKS`.
717        //     |-----|-----|-----|-----| ... |-----|
718        //        4     5     6     7    ...    Y
719        //
720        //   Here we can see that with a `found_gap` of [4, Y], `packet` covers
721        //   [4, 7] but we still expect more fragment blocks after the blocks in
722        //   `packet` (as noted by `m_flag`) so we are still missing [8, Y] so
723        //   we create a new gap of [8, Y].
724        //
725        // Example 2:
726        //   `packet` w/ fragments [4, 7] & m_flag = false
727        //     |-----|-----|-----|-----|
728        //        4     5     6     7
729        //
730        //   `found_gap` w/ fragments [4, Y] where MAX = `MAX_FRAGMENT_BLOCKS`.
731        //     |-----|-----|-----|-----| ... |-----|
732        //        4     5     6     7    ...   MAX
733        //
734        //   Here we can see that with a `found_gap` of [4, MAX], `packet`
735        //   covers [4, 7] and we don't expect more fragment blocks after the
736        //   blocks in `packet` (as noted by `m_flag`) so we don't create a new
737        //   gap. Note, if we encounter a `packet` where `m_flag` is false,
738        //   `found_gap`'s end value must be MAX because we should only ever not
739        //   create a new gap where the end is MAX when we are processing a
740        //   packet with the last fragment block.
741        if found_gap.end > fragment_blocks_range.end && m_flag {
742            assert!(fragment_data
743                .missing_blocks
744                .insert(BlockRange { start: fragment_blocks_range.end + 1, end: found_gap.end }));
745        } else if found_gap.end > fragment_blocks_range.end && !m_flag && found_gap.end < u16::MAX {
746            // There is another fragment after this one that is already present
747            // in the cache. That means that this fragment can't be the last
748            // one (must have `m_flag` set).
749            return (FragmentProcessingState::InvalidFragment, timer_id);
750        } else {
751            // Make sure that if we are not adding a fragment after the packet,
752            // it is because `packet` goes up to the `found_gap`'s end boundary,
753            // or this is the last fragment. If it is the last fragment for a
754            // packet, we make sure that `found_gap`'s end value is
755            // `core::u16::MAX`.
756            assert!(
757                found_gap.end == fragment_blocks_range.end
758                    || (!m_flag && found_gap.end == u16::MAX),
759                "found_gap: {:?}, fragment_blocks_range: {:?} offset: {:?}, m_flag: {:?}",
760                found_gap,
761                fragment_blocks_range,
762                offset,
763                m_flag
764            );
765        }
766
767        let mut added_bytes = 0;
768        // Get header buffer from `packet` if its fragment offset equals to 0.
769        if offset == 0 {
770            assert_eq!(fragment_data.header, None);
771            let header = get_header::<B, I>(&packet);
772            added_bytes = header.len();
773            fragment_data.header = Some(header);
774        }
775
776        // Add our `packet`'s body to the store of body fragments.
777        let mut body = Vec::with_capacity(packet.body().len());
778        body.extend_from_slice(packet.body());
779        added_bytes += body.len();
780        fragment_data.total_size += added_bytes;
781        fragment_data.body_fragments.push(PacketBodyFragment::new(offset, body));
782
783        // If we still have missing fragments, let the caller know that we are
784        // still waiting on some fragments. Otherwise, we let them know we are
785        // ready to reassemble and give them a key and the final packet length
786        // so they can allocate a sufficient buffer and call
787        // `reassemble_packet`.
788        let result = if fragment_data.missing_blocks.is_empty() {
789            FragmentProcessingState::Ready { key, packet_len: fragment_data.total_size }
790        } else {
791            FragmentProcessingState::NeedMoreFragments
792        };
793
794        self.increment_size(added_bytes);
795        (result, timer_id)
796    }
797
798    /// Attempts to reassemble a packet.
799    ///
800    /// Attempts to reassemble a packet associated with a given
801    /// `FragmentCacheKey`, `key`, and cancels the timer to reset reassembly
802    /// data. The caller is expected to allocate a buffer of sufficient size
803    /// (available from `process_fragment` when it returns a
804    /// `FragmentProcessingState::Ready` value) and provide it to
805    /// `reassemble_packet` as `buffer` where the packet will be reassembled
806    /// into.
807    ///
808    /// # Panics
809    ///
810    /// Panics if the provided `buffer` does not have enough capacity for the
811    /// reassembled packet. Also panics if a different `ctx` is passed to
812    /// `reassemble_packet` from the one passed to `process_fragment` when
813    /// processing a packet with a given `key` as `reassemble_packet` will fail
814    /// to cancel the reassembly timer.
815    fn reassemble_packet<B: SplitByteSliceMut, BV: BufferViewMut<B>>(
816        &mut self,
817        key: &FragmentCacheKey<I>,
818        buffer: BV,
819    ) -> Result<(), FragmentReassemblyError> {
820        let entry = match self.cache.entry(*key) {
821            Entry::Occupied(entry) => entry,
822            Entry::Vacant(_) => return Err(FragmentReassemblyError::InvalidKey),
823        };
824
825        // Make sure we are not missing fragments.
826        if !entry.get().missing_blocks.is_empty() {
827            return Err(FragmentReassemblyError::MissingFragments);
828        }
829        // Remove the entry from the cache now that we've validated that we will
830        // be able to reassemble it.
831        let (_key, data) = entry.remove_entry();
832        self.size -= data.total_size;
833
834        // If we are not missing fragments, we must have header data.
835        assert_matches!(data.header, Some(_));
836
837        // TODO(https://github.com/rust-lang/rust/issues/59278): Use
838        // `BinaryHeap::into_iter_sorted`.
839        let body_fragments = data.body_fragments.into_sorted_vec().into_iter().map(|x| x.data);
840        I::Packet::reassemble_fragmented_packet(buffer, data.header.unwrap(), body_fragments)
841            .map_err(|_| FragmentReassemblyError::PacketParsingError)
842    }
843
844    /// Gets or creates a new entry in the cache for a given `key`.
845    ///
846    /// Returns a tuple whose second component indicates whether a reassembly
847    /// timer needs to be scheduled.
848    fn get_or_create(&mut self, key: FragmentCacheKey<I>) -> (&mut FragmentCacheData, bool) {
849        match self.cache.entry(key) {
850            Entry::Occupied(e) => (e.into_mut(), false),
851            Entry::Vacant(e) => {
852                // We have no reassembly data yet so this fragment is the first
853                // one associated with the given `key`. Create a new entry in
854                // the hash table and let the caller know to schedule a timer to
855                // reset the entry.
856                (e.insert(FragmentCacheData::default()), true)
857            }
858        }
859    }
860
861    fn above_size_threshold(&self) -> bool {
862        self.size >= self.threshold
863    }
864
865    fn increment_size(&mut self, sz: usize) {
866        assert!(!self.above_size_threshold());
867        self.size += sz;
868    }
869
870    fn remove_data(&mut self, key: &FragmentCacheKey<I>) -> Option<FragmentCacheData> {
871        let data = self.cache.remove(key)?;
872        self.size -= data.total_size;
873        Some(data)
874    }
875}
876
877/// Gets the header bytes for a packet.
878fn get_header<B: SplitByteSlice, I: IpExt>(packet: &I::Packet<B>) -> Vec<u8> {
879    match packet.as_ip_addr_ref() {
880        IpAddr::V4(packet) => packet.copy_header_bytes_for_fragment(),
881        IpAddr::V6(packet) => {
882            // We are guaranteed not to panic here because we will only panic if
883            // `packet` does not have a fragment extension header. We can only get
884            // here if `packet` is a fragment packet, so we know that `packet` has a
885            // fragment extension header.
886            packet.copy_header_bytes_for_fragment()
887        }
888    }
889}
890
891/// A fragment of a packet's body.
892#[derive(Debug, PartialEq, Eq)]
893struct PacketBodyFragment {
894    offset: u16,
895    data: Vec<u8>,
896}
897
898impl PacketBodyFragment {
899    /// Constructs a new `PacketBodyFragment` to be stored in a `BinaryHeap`.
900    fn new(offset: u16, data: Vec<u8>) -> Self {
901        PacketBodyFragment { offset, data }
902    }
903}
904
905// The ordering of a `PacketBodyFragment` is only dependant on the fragment
906// offset.
907impl PartialOrd for PacketBodyFragment {
908    fn partial_cmp(&self, other: &PacketBodyFragment) -> Option<Ordering> {
909        Some(self.cmp(other))
910    }
911}
912
913impl Ord for PacketBodyFragment {
914    fn cmp(&self, other: &Self) -> Ordering {
915        self.offset.cmp(&other.offset)
916    }
917}
918
919#[cfg(test)]
920mod tests {
921    use alloc::vec;
922
923    use assert_matches::assert_matches;
924    use ip_test_macro::ip_test;
925    use net_declare::{net_ip_v4, net_ip_v6};
926    use net_types::ip::{Ipv4, Ipv4Addr, Ipv6, Ipv6Addr};
927    use net_types::Witness;
928    use netstack3_base::testutil::{
929        assert_empty, FakeBindingsCtx, FakeCoreCtx, FakeInstant, FakeTimerCtxExt, TEST_ADDRS_V4,
930        TEST_ADDRS_V6,
931    };
932    use netstack3_base::{CtxPair, IntoCoreTimerCtx};
933    use packet::{Buf, ParsablePacket, ParseBuffer, Serializer};
934    use packet_formats::ip::{FragmentOffset, IpProto, Ipv6Proto};
935    use packet_formats::ipv4::Ipv4PacketBuilder;
936    use packet_formats::ipv6::{Ipv6PacketBuilder, Ipv6PacketBuilderWithFragmentHeader};
937    use test_case::test_case;
938
939    use super::*;
940
941    struct FakeFragmentContext<I: ReassemblyIpExt, BT: FragmentBindingsTypes> {
942        cache: IpPacketFragmentCache<I, BT>,
943    }
944
945    impl<I: ReassemblyIpExt, BC: FragmentBindingsContext> FakeFragmentContext<I, BC>
946    where
947        BC::DispatchId: From<FragmentTimerId<I>>,
948    {
949        fn new(bindings_ctx: &mut BC) -> Self {
950            Self { cache: IpPacketFragmentCache::new::<IntoCoreTimerCtx>(bindings_ctx) }
951        }
952    }
953
954    type FakeCtxImpl<I> = CtxPair<FakeCoreCtxImpl<I>, FakeBindingsCtxImpl<I>>;
955    type FakeBindingsCtxImpl<I> = FakeBindingsCtx<FragmentTimerId<I>, (), (), ()>;
956    type FakeCoreCtxImpl<I> = FakeCoreCtx<FakeFragmentContext<I, FakeBindingsCtxImpl<I>>, (), ()>;
957
958    impl<I: ReassemblyIpExt> FragmentContext<I, FakeBindingsCtxImpl<I>> for FakeCoreCtxImpl<I> {
959        fn with_state_mut<
960            O,
961            F: FnOnce(&mut IpPacketFragmentCache<I, FakeBindingsCtxImpl<I>>) -> O,
962        >(
963            &mut self,
964            cb: F,
965        ) -> O {
966            cb(&mut self.state.cache)
967        }
968    }
969
970    /// The result `process_ipv4_fragment` or `process_ipv6_fragment` should
971    /// expect after processing a fragment.
972    #[derive(PartialEq)]
973    enum ExpectedResult<I: ReassemblyIpExt> {
974        /// After processing a packet fragment, we should be ready to reassemble
975        /// the packet.
976        ///
977        /// `body_fragment_blocks` is in units of `FRAGMENT_BLOCK_SIZE`.
978        Ready { body_fragment_blocks: u16, key: FragmentCacheKey<I> },
979
980        /// After processing a packet fragment, we need more packet fragments
981        /// before being ready to reassemble the packet.
982        NeedMore,
983
984        /// The packet fragment is invalid.
985        Invalid,
986
987        /// The Cache is full.
988        OutOfMemory,
989    }
990
991    /// Get an IPv4 packet builder.
992    fn get_ipv4_builder() -> Ipv4PacketBuilder {
993        Ipv4PacketBuilder::new(
994            TEST_ADDRS_V4.remote_ip,
995            TEST_ADDRS_V4.local_ip,
996            10,
997            <Ipv4 as TestIpExt>::PROTOCOL,
998        )
999    }
1000
1001    /// Get an IPv6 packet builder.
1002    fn get_ipv6_builder() -> Ipv6PacketBuilder {
1003        Ipv6PacketBuilder::new(
1004            TEST_ADDRS_V6.remote_ip,
1005            TEST_ADDRS_V6.local_ip,
1006            10,
1007            <Ipv6 as TestIpExt>::PROTOCOL,
1008        )
1009    }
1010
1011    /// Validate that IpPacketFragmentCache has correct size.
1012    fn validate_size<I: ReassemblyIpExt, BT: FragmentBindingsTypes>(
1013        cache: &IpPacketFragmentCache<I, BT>,
1014    ) {
1015        let mut sz: usize = 0;
1016
1017        for v in cache.cache.values() {
1018            sz += v.total_size;
1019        }
1020
1021        assert_eq!(sz, cache.size);
1022    }
1023
1024    struct FragmentSpec {
1025        /// The ID of the fragment.
1026        id: u16,
1027        /// The offset of the fragment, in units of `FRAGMENT_BLOCK_SIZE`.
1028        offset: u16,
1029        /// The size of the fragment, in units of `FRAGMENT_BLOCK_SIZE`.
1030        size: u16,
1031        /// The value of the M flag. "True" indicates more fragments.
1032        m_flag: bool,
1033    }
1034
1035    fn expected_packet_size<I: TestIpExt>(num_fragment_blocks: u16) -> usize {
1036        usize::from(num_fragment_blocks) * usize::from(FRAGMENT_BLOCK_SIZE) + I::HEADER_LENGTH
1037    }
1038
1039    /// Generates and processes an IPv4 fragment packet.
1040    fn process_ipv4_fragment<CC: FragmentContext<Ipv4, BC>, BC: FragmentBindingsContext>(
1041        core_ctx: &mut CC,
1042        bindings_ctx: &mut BC,
1043        FragmentSpec { id, offset, size, m_flag }: FragmentSpec,
1044        mut builder: Ipv4PacketBuilder,
1045        expected_result: ExpectedResult<Ipv4>,
1046    ) {
1047        builder.id(id);
1048        builder.fragment_offset(FragmentOffset::new(offset).unwrap());
1049        builder.mf_flag(m_flag);
1050        let body = generate_body_fragment(
1051            id,
1052            offset,
1053            usize::from(size) * usize::from(FRAGMENT_BLOCK_SIZE),
1054        );
1055
1056        let mut buffer = Buf::new(body, ..).encapsulate(builder).serialize_vec_outer().unwrap();
1057        let packet = buffer.parse::<Ipv4Packet<_>>().unwrap();
1058
1059        let actual_result =
1060            FragmentHandler::process_fragment::<&[u8]>(core_ctx, bindings_ctx, packet);
1061        match expected_result {
1062            ExpectedResult::Ready { body_fragment_blocks, key: expected_key } => {
1063                let (key, packet_len) = assert_matches!(
1064                    actual_result,
1065                    FragmentProcessingState::Ready {key, packet_len} => (key, packet_len)
1066                );
1067                assert_eq!(key, expected_key);
1068                assert_eq!(packet_len, expected_packet_size::<Ipv4>(body_fragment_blocks));
1069            }
1070            ExpectedResult::NeedMore => {
1071                assert_matches!(actual_result, FragmentProcessingState::NeedMoreFragments);
1072            }
1073            ExpectedResult::Invalid => {
1074                assert_matches!(actual_result, FragmentProcessingState::InvalidFragment);
1075            }
1076            ExpectedResult::OutOfMemory => {
1077                assert_matches!(actual_result, FragmentProcessingState::OutOfMemory);
1078            }
1079        }
1080    }
1081
1082    /// Generates and processes an IPv6 fragment packet.
1083    ///
1084    /// `fragment_offset` and `size` are both in units of `FRAGMENT_BLOCK_SIZE`.
1085    fn process_ipv6_fragment<CC: FragmentContext<Ipv6, BC>, BC: FragmentBindingsContext>(
1086        core_ctx: &mut CC,
1087        bindings_ctx: &mut BC,
1088        FragmentSpec { id, offset, size, m_flag }: FragmentSpec,
1089        builder: Ipv6PacketBuilder,
1090        expected_result: ExpectedResult<Ipv6>,
1091    ) {
1092        let builder = Ipv6PacketBuilderWithFragmentHeader::new(
1093            builder,
1094            FragmentOffset::new(offset).unwrap(),
1095            m_flag,
1096            id.into(),
1097        );
1098
1099        let body = generate_body_fragment(
1100            id,
1101            offset,
1102            usize::from(size) * usize::from(FRAGMENT_BLOCK_SIZE),
1103        );
1104
1105        let mut buffer = Buf::new(body, ..).encapsulate(builder).serialize_vec_outer().unwrap();
1106        let packet = buffer.parse::<Ipv6Packet<_>>().unwrap();
1107
1108        let actual_result =
1109            FragmentHandler::process_fragment::<&[u8]>(core_ctx, bindings_ctx, packet);
1110        match expected_result {
1111            ExpectedResult::Ready { body_fragment_blocks, key: expected_key } => {
1112                let (key, packet_len) = assert_matches!(
1113                    actual_result,
1114                    FragmentProcessingState::Ready {key, packet_len} => (key, packet_len)
1115                );
1116                assert_eq!(key, expected_key);
1117                assert_eq!(packet_len, expected_packet_size::<Ipv6>(body_fragment_blocks));
1118            }
1119            ExpectedResult::NeedMore => {
1120                assert_matches!(actual_result, FragmentProcessingState::NeedMoreFragments);
1121            }
1122            ExpectedResult::Invalid => {
1123                assert_matches!(actual_result, FragmentProcessingState::InvalidFragment);
1124            }
1125            ExpectedResult::OutOfMemory => {
1126                assert_matches!(actual_result, FragmentProcessingState::OutOfMemory);
1127            }
1128        }
1129    }
1130
1131    trait TestIpExt: IpExt + netstack3_base::testutil::TestIpExt + ReassemblyIpExt {
1132        const HEADER_LENGTH: usize;
1133
1134        const PROTOCOL: Self::Proto;
1135
1136        fn process_ip_fragment<CC: FragmentContext<Self, BC>, BC: FragmentBindingsContext>(
1137            core_ctx: &mut CC,
1138            bindings_ctx: &mut BC,
1139            spec: FragmentSpec,
1140            expected_result: ExpectedResult<Self>,
1141        );
1142    }
1143
1144    impl TestIpExt for Ipv4 {
1145        const HEADER_LENGTH: usize = packet_formats::ipv4::HDR_PREFIX_LEN;
1146
1147        const PROTOCOL: Ipv4Proto = Ipv4Proto::Proto(IpProto::Tcp);
1148
1149        fn process_ip_fragment<CC: FragmentContext<Self, BC>, BC: FragmentBindingsContext>(
1150            core_ctx: &mut CC,
1151            bindings_ctx: &mut BC,
1152            spec: FragmentSpec,
1153            expected_result: ExpectedResult<Ipv4>,
1154        ) {
1155            process_ipv4_fragment(core_ctx, bindings_ctx, spec, get_ipv4_builder(), expected_result)
1156        }
1157    }
1158    impl TestIpExt for Ipv6 {
1159        const HEADER_LENGTH: usize = packet_formats::ipv6::IPV6_FIXED_HDR_LEN;
1160
1161        const PROTOCOL: Ipv6Proto = Ipv6Proto::Proto(IpProto::Tcp);
1162
1163        fn process_ip_fragment<CC: FragmentContext<Self, BC>, BC: FragmentBindingsContext>(
1164            core_ctx: &mut CC,
1165            bindings_ctx: &mut BC,
1166            spec: FragmentSpec,
1167            expected_result: ExpectedResult<Ipv6>,
1168        ) {
1169            process_ipv6_fragment(core_ctx, bindings_ctx, spec, get_ipv6_builder(), expected_result)
1170        }
1171    }
1172
1173    /// Tries to reassemble the packet with the given fragment ID.
1174    ///
1175    /// `body_fragment_blocks` is in units of `FRAGMENT_BLOCK_SIZE`.
1176    fn try_reassemble_ip_packet<
1177        I: TestIpExt + netstack3_base::IpExt,
1178        CC: FragmentContext<I, BC>,
1179        BC: FragmentBindingsContext,
1180    >(
1181        core_ctx: &mut CC,
1182        bindings_ctx: &mut BC,
1183        fragment_id: u16,
1184        body_fragment_blocks: u16,
1185    ) {
1186        let mut buffer: Vec<u8> = vec![
1187            0;
1188            usize::from(body_fragment_blocks)
1189                * usize::from(FRAGMENT_BLOCK_SIZE)
1190                + I::HEADER_LENGTH
1191        ];
1192        let mut buffer = &mut buffer[..];
1193        let key = test_key(fragment_id);
1194
1195        FragmentHandler::reassemble_packet(core_ctx, bindings_ctx, &key, &mut buffer).unwrap();
1196        let packet = I::Packet::parse_mut(&mut buffer, ()).unwrap();
1197
1198        let expected_body = generate_body_fragment(
1199            fragment_id,
1200            0,
1201            usize::from(body_fragment_blocks) * usize::from(FRAGMENT_BLOCK_SIZE),
1202        );
1203        assert_eq!(packet.body(), &expected_body[..]);
1204    }
1205
1206    /// Generates the body of a packet with the given fragment ID, offset, and
1207    /// length.
1208    ///
1209    /// Overlapping body bytes from different calls to `generate_body_fragment`
1210    /// are guaranteed to have the same values.
1211    fn generate_body_fragment(fragment_id: u16, fragment_offset: u16, len: usize) -> Vec<u8> {
1212        // The body contains increasing byte values which start at `fragment_id`
1213        // at byte 0. This ensures that different packets with different
1214        // fragment IDs contain bodies with different byte values.
1215        let start = usize::from(fragment_id)
1216            + usize::from(fragment_offset) * usize::from(FRAGMENT_BLOCK_SIZE);
1217        (start..start + len).map(|byte| byte as u8).collect()
1218    }
1219
1220    /// Gets a `FragmentCacheKey` with hard coded test values.
1221    fn test_key<I: TestIpExt>(id: u16) -> FragmentCacheKey<I> {
1222        #[derive(GenericOverIp)]
1223        #[generic_over_ip(I, Ip)]
1224        struct Wrapper<I: ReassemblyIpExt>(I::FragmentCacheKeyPart);
1225
1226        let Wrapper(ip_specific_fields) =
1227            I::map_ip_out((), |()| Wrapper(Ipv4::PROTOCOL), |()| Wrapper(()));
1228
1229        FragmentCacheKey {
1230            src_ip: I::TEST_ADDRS.remote_ip.get(),
1231            dst_ip: I::TEST_ADDRS.local_ip.get(),
1232            fragment_id: id.into(),
1233            ip_specific_fields,
1234        }
1235    }
1236
1237    fn new_context<I: ReassemblyIpExt>() -> FakeCtxImpl<I> {
1238        FakeCtxImpl::<I>::with_default_bindings_ctx(|bindings_ctx| {
1239            FakeCoreCtxImpl::with_state(FakeFragmentContext::new(bindings_ctx))
1240        })
1241    }
1242
1243    #[test]
1244    fn test_ipv4_reassembly_not_needed() {
1245        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv4>();
1246
1247        // Test that we don't attempt reassembly if the packet is not
1248        // fragmented.
1249
1250        let builder = get_ipv4_builder();
1251        let body = [1, 2, 3, 4, 5];
1252        let mut buffer =
1253            Buf::new(body.to_vec(), ..).encapsulate(builder).serialize_vec_outer().unwrap();
1254        let packet = buffer.parse::<Ipv4Packet<_>>().unwrap();
1255        assert_matches!(
1256            FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1257            FragmentProcessingState::NotNeeded(unfragmented) if unfragmented.body() == body
1258        );
1259    }
1260
1261    #[test]
1262    #[should_panic(
1263        expected = "internal error: entered unreachable code: Should never call this function if the packet does not have a fragment header"
1264    )]
1265    fn test_ipv6_reassembly_not_needed() {
1266        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv6>();
1267
1268        // Test that we panic if we call `fragment_data` on a packet that has no
1269        // fragment data.
1270
1271        let builder = get_ipv6_builder();
1272        let mut buffer =
1273            Buf::new(vec![1, 2, 3, 4, 5], ..).encapsulate(builder).serialize_vec_outer().unwrap();
1274        let packet = buffer.parse::<Ipv6Packet<_>>().unwrap();
1275        assert_matches!(
1276            FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1277            FragmentProcessingState::InvalidFragment
1278        );
1279    }
1280
1281    #[ip_test(I)]
1282    #[test_case(1)]
1283    #[test_case(10)]
1284    #[test_case(100)]
1285    fn test_ip_reassembly<I: TestIpExt>(size: u16) {
1286        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1287        let id = 5;
1288
1289        // Test that we properly reassemble fragmented packets.
1290
1291        // Process fragment #0
1292        I::process_ip_fragment(
1293            &mut core_ctx,
1294            &mut bindings_ctx,
1295            FragmentSpec { id, offset: 0, size, m_flag: true },
1296            ExpectedResult::NeedMore,
1297        );
1298
1299        // Process fragment #1
1300        I::process_ip_fragment(
1301            &mut core_ctx,
1302            &mut bindings_ctx,
1303            FragmentSpec { id, offset: size, size, m_flag: true },
1304            ExpectedResult::NeedMore,
1305        );
1306
1307        // Process fragment #2
1308        I::process_ip_fragment(
1309            &mut core_ctx,
1310            &mut bindings_ctx,
1311            FragmentSpec { id, offset: 2 * size, size, m_flag: false },
1312            ExpectedResult::Ready { body_fragment_blocks: 3 * size, key: test_key(id) },
1313        );
1314
1315        try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id, 3 * size);
1316    }
1317
1318    #[test]
1319    fn test_ipv4_key_uniqueness() {
1320        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv4>();
1321
1322        const RIGHT_SRC: Ipv4Addr = net_ip_v4!("192.0.2.1");
1323        const WRONG_SRC: Ipv4Addr = net_ip_v4!("192.0.2.2");
1324
1325        const RIGHT_DST: Ipv4Addr = net_ip_v4!("192.0.2.3");
1326        const WRONG_DST: Ipv4Addr = net_ip_v4!("192.0.2.4");
1327
1328        const RIGHT_PROTO: Ipv4Proto = Ipv4Proto::Proto(IpProto::Tcp);
1329        const WRONG_PROTO: Ipv4Proto = Ipv4Proto::Proto(IpProto::Udp);
1330
1331        const RIGHT_ID: u16 = 1;
1332        const WRONG_ID: u16 = 2;
1333
1334        const TTL: u8 = 1;
1335
1336        // Process fragment #0.
1337        process_ipv4_fragment(
1338            &mut core_ctx,
1339            &mut bindings_ctx,
1340            FragmentSpec { id: RIGHT_ID, offset: 0, size: 1, m_flag: true },
1341            Ipv4PacketBuilder::new(RIGHT_SRC, RIGHT_DST, TTL, RIGHT_PROTO),
1342            ExpectedResult::NeedMore,
1343        );
1344
1345        // Process fragment #1 under a different key, and verify it doesn't
1346        // complete the packet.
1347        for (id, src, dst, proto) in [
1348            (RIGHT_ID, RIGHT_SRC, RIGHT_DST, WRONG_PROTO),
1349            (RIGHT_ID, RIGHT_SRC, WRONG_DST, RIGHT_PROTO),
1350            (RIGHT_ID, WRONG_SRC, RIGHT_DST, RIGHT_PROTO),
1351            (WRONG_ID, RIGHT_SRC, RIGHT_DST, RIGHT_PROTO),
1352        ] {
1353            process_ipv4_fragment(
1354                &mut core_ctx,
1355                &mut bindings_ctx,
1356                FragmentSpec { id, offset: 1, size: 1, m_flag: false },
1357                Ipv4PacketBuilder::new(src, dst, TTL, proto),
1358                ExpectedResult::NeedMore,
1359            );
1360        }
1361
1362        // Finally, process fragment #1 under the correct key, and verify the
1363        // packet is completed.
1364        const KEY: FragmentCacheKey<Ipv4> = FragmentCacheKey {
1365            src_ip: RIGHT_SRC,
1366            dst_ip: RIGHT_DST,
1367            fragment_id: RIGHT_ID as u32,
1368            ip_specific_fields: RIGHT_PROTO,
1369        };
1370        process_ipv4_fragment(
1371            &mut core_ctx,
1372            &mut bindings_ctx,
1373            FragmentSpec { id: RIGHT_ID, offset: 1, size: 1, m_flag: false },
1374            Ipv4PacketBuilder::new(RIGHT_SRC, RIGHT_DST, TTL, RIGHT_PROTO),
1375            ExpectedResult::Ready { body_fragment_blocks: 2, key: KEY },
1376        );
1377        let mut buffer: Vec<u8> = vec![0; expected_packet_size::<Ipv4>(2)];
1378        let mut buffer = &mut buffer[..];
1379        FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &KEY, &mut buffer)
1380            .expect("reassembly should succeed");
1381        let _packet = Ipv4Packet::parse_mut(&mut buffer, ()).expect("parse should succeed");
1382    }
1383
1384    #[test]
1385    fn test_ipv6_key_uniqueness() {
1386        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv6>();
1387
1388        const RIGHT_SRC: Ipv6Addr = net_ip_v6!("2001:0db8::1");
1389        const WRONG_SRC: Ipv6Addr = net_ip_v6!("2001:0db8::2");
1390
1391        const RIGHT_DST: Ipv6Addr = net_ip_v6!("2001:0db8::3");
1392        const WRONG_DST: Ipv6Addr = net_ip_v6!("2001:0db8::4");
1393
1394        const RIGHT_ID: u16 = 1;
1395        const WRONG_ID: u16 = 2;
1396
1397        const TTL: u8 = 1;
1398
1399        // Process fragment #0.
1400        process_ipv6_fragment(
1401            &mut core_ctx,
1402            &mut bindings_ctx,
1403            FragmentSpec { id: RIGHT_ID, offset: 0, size: 1, m_flag: true },
1404            Ipv6PacketBuilder::new(RIGHT_SRC, RIGHT_DST, TTL, Ipv6::PROTOCOL),
1405            ExpectedResult::NeedMore,
1406        );
1407
1408        // Process fragment #1 under a different key, and verify it doesn't
1409        // complete the packet.
1410        for (id, src, dst) in [
1411            (RIGHT_ID, RIGHT_SRC, WRONG_DST),
1412            (RIGHT_ID, WRONG_SRC, RIGHT_DST),
1413            (WRONG_ID, RIGHT_SRC, RIGHT_DST),
1414        ] {
1415            process_ipv6_fragment(
1416                &mut core_ctx,
1417                &mut bindings_ctx,
1418                FragmentSpec { id, offset: 1, size: 1, m_flag: false },
1419                Ipv6PacketBuilder::new(src, dst, TTL, Ipv6::PROTOCOL),
1420                ExpectedResult::NeedMore,
1421            );
1422        }
1423
1424        // Finally, process fragment #1 under the correct key, and verify the
1425        // packet is completed.
1426        const KEY: FragmentCacheKey<Ipv6> = FragmentCacheKey {
1427            src_ip: RIGHT_SRC,
1428            dst_ip: RIGHT_DST,
1429            fragment_id: RIGHT_ID as u32,
1430            ip_specific_fields: (),
1431        };
1432        process_ipv6_fragment(
1433            &mut core_ctx,
1434            &mut bindings_ctx,
1435            FragmentSpec { id: RIGHT_ID, offset: 1, size: 1, m_flag: false },
1436            Ipv6PacketBuilder::new(RIGHT_SRC, RIGHT_DST, TTL, Ipv6::PROTOCOL),
1437            ExpectedResult::Ready { body_fragment_blocks: 2, key: KEY },
1438        );
1439        let mut buffer: Vec<u8> = vec![0; expected_packet_size::<Ipv6>(2)];
1440        let mut buffer = &mut buffer[..];
1441        FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &KEY, &mut buffer)
1442            .expect("reassembly should succeed");
1443        let _packet = Ipv6Packet::parse_mut(&mut buffer, ()).expect("parse should succeed");
1444    }
1445
1446    #[test]
1447    fn test_ipv6_reassemble_different_protocols() {
1448        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv6>();
1449
1450        const SRC: Ipv6Addr = net_ip_v6!("2001:0db8::1");
1451        const DST: Ipv6Addr = net_ip_v6!("2001:0db8::2");
1452        const ID: u16 = 1;
1453        const TTL: u8 = 1;
1454
1455        const PROTO1: Ipv6Proto = Ipv6Proto::Proto(IpProto::Tcp);
1456        const PROTO2: Ipv6Proto = Ipv6Proto::Proto(IpProto::Udp);
1457
1458        // Process fragment #0 (uses `PROTO1`).
1459        process_ipv6_fragment(
1460            &mut core_ctx,
1461            &mut bindings_ctx,
1462            FragmentSpec { id: ID, offset: 0, size: 1, m_flag: true },
1463            Ipv6PacketBuilder::new(SRC, DST, TTL, PROTO1),
1464            ExpectedResult::NeedMore,
1465        );
1466
1467        // Process fragment #1 (uses `PROTO2`).
1468        // The packet should successfully reassemble, using the protocol from
1469        // fragment #0 (i.e. `PROTO1`).
1470        const KEY: FragmentCacheKey<Ipv6> = FragmentCacheKey {
1471            src_ip: SRC,
1472            dst_ip: DST,
1473            fragment_id: ID as u32,
1474            ip_specific_fields: (),
1475        };
1476        process_ipv6_fragment(
1477            &mut core_ctx,
1478            &mut bindings_ctx,
1479            FragmentSpec { id: ID, offset: 1, size: 1, m_flag: false },
1480            Ipv6PacketBuilder::new(SRC, DST, TTL, PROTO2),
1481            ExpectedResult::Ready { body_fragment_blocks: 2, key: KEY },
1482        );
1483        let mut buffer: Vec<u8> = vec![0; expected_packet_size::<Ipv6>(2)];
1484        let mut buffer = &mut buffer[..];
1485        FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &KEY, &mut buffer)
1486            .expect("reassembly should succeed");
1487        let packet = Ipv6Packet::parse_mut(&mut buffer, ()).expect("parse should succeed");
1488        assert_eq!(packet.proto(), PROTO1);
1489    }
1490
1491    #[ip_test(I)]
1492    #[test_case(1)]
1493    #[test_case(10)]
1494    #[test_case(100)]
1495    fn test_ip_reassemble_with_missing_blocks<I: TestIpExt>(size: u16) {
1496        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1497        let id = 5;
1498
1499        // Test the error we get when we attempt to reassemble with missing
1500        // fragments.
1501
1502        // Process fragment #0
1503        I::process_ip_fragment(
1504            &mut core_ctx,
1505            &mut bindings_ctx,
1506            FragmentSpec { id, offset: 0, size, m_flag: true },
1507            ExpectedResult::NeedMore,
1508        );
1509
1510        // Process fragment #2
1511        I::process_ip_fragment(
1512            &mut core_ctx,
1513            &mut bindings_ctx,
1514            FragmentSpec { id, offset: size, size, m_flag: true },
1515            ExpectedResult::NeedMore,
1516        );
1517
1518        let mut buffer: Vec<u8> = vec![0; 1];
1519        let mut buffer = &mut buffer[..];
1520        let key = test_key(id);
1521        assert_eq!(
1522            FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &key, &mut buffer)
1523                .unwrap_err(),
1524            FragmentReassemblyError::MissingFragments,
1525        );
1526    }
1527
1528    #[ip_test(I)]
1529    fn test_ip_reassemble_after_timer<I: TestIpExt>() {
1530        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1531        let id = 5;
1532        let key = test_key::<I>(id);
1533
1534        // Make sure no timers in the dispatcher yet.
1535        bindings_ctx.timers.assert_no_timers_installed();
1536        assert_eq!(core_ctx.state.cache.size, 0);
1537
1538        // Test that we properly reset fragment cache on timer.
1539
1540        // Process fragment #0
1541        I::process_ip_fragment(
1542            &mut core_ctx,
1543            &mut bindings_ctx,
1544            FragmentSpec { id, offset: 0, size: 1, m_flag: true },
1545            ExpectedResult::NeedMore,
1546        );
1547
1548        // Make sure a timer got added.
1549        core_ctx.state.cache.timers.assert_timers([(
1550            key,
1551            (),
1552            FakeInstant::from(I::REASSEMBLY_TIMEOUT),
1553        )]);
1554        validate_size(&core_ctx.state.cache);
1555
1556        // Process fragment #1
1557        I::process_ip_fragment(
1558            &mut core_ctx,
1559            &mut bindings_ctx,
1560            FragmentSpec { id, offset: 1, size: 1, m_flag: true },
1561            ExpectedResult::NeedMore,
1562        );
1563        // Make sure no new timers got added or fired.
1564        core_ctx.state.cache.timers.assert_timers([(
1565            key,
1566            (),
1567            FakeInstant::from(I::REASSEMBLY_TIMEOUT),
1568        )]);
1569        validate_size(&core_ctx.state.cache);
1570
1571        // Process fragment #2
1572        I::process_ip_fragment(
1573            &mut core_ctx,
1574            &mut bindings_ctx,
1575            FragmentSpec { id, offset: 2, size: 1, m_flag: false },
1576            ExpectedResult::Ready { body_fragment_blocks: 3, key: test_key(id) },
1577        );
1578        // Make sure no new timers got added or fired.
1579        core_ctx.state.cache.timers.assert_timers([(
1580            key,
1581            (),
1582            FakeInstant::from(I::REASSEMBLY_TIMEOUT),
1583        )]);
1584        validate_size(&core_ctx.state.cache);
1585
1586        // Trigger the timer (simulate a timer for the fragmented packet).
1587        assert_eq!(
1588            bindings_ctx.trigger_next_timer(&mut core_ctx),
1589            Some(FragmentTimerId::<I>::default())
1590        );
1591
1592        // Make sure no other times exist..
1593        bindings_ctx.timers.assert_no_timers_installed();
1594        assert_eq!(core_ctx.state.cache.size, 0);
1595
1596        // Attempt to reassemble the packet but get an error since the fragment
1597        // data would have been reset/cleared.
1598        let key = test_key(id);
1599        let packet_len = 44;
1600        let mut buffer: Vec<u8> = vec![0; packet_len];
1601        let mut buffer = &mut buffer[..];
1602        assert_eq!(
1603            FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &key, &mut buffer)
1604                .unwrap_err(),
1605            FragmentReassemblyError::InvalidKey,
1606        );
1607    }
1608
1609    #[ip_test(I)]
1610    #[test_case(1)]
1611    #[test_case(10)]
1612    #[test_case(100)]
1613    fn test_ip_fragment_cache_oom<I: TestIpExt>(size: u16) {
1614        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1615        let mut id = 0;
1616        const THRESHOLD: usize = 8196usize;
1617
1618        assert_eq!(core_ctx.state.cache.size, 0);
1619        core_ctx.state.cache.threshold = THRESHOLD;
1620
1621        // Test that when cache size exceeds the threshold, process_fragment
1622        // returns OOM.
1623        while core_ctx.state.cache.size + usize::from(size) <= THRESHOLD {
1624            I::process_ip_fragment(
1625                &mut core_ctx,
1626                &mut bindings_ctx,
1627                FragmentSpec { id, offset: 0, size, m_flag: true },
1628                ExpectedResult::NeedMore,
1629            );
1630            validate_size(&core_ctx.state.cache);
1631            id += 1;
1632        }
1633
1634        // Now that the cache is at or above the threshold, observe OOM.
1635        I::process_ip_fragment(
1636            &mut core_ctx,
1637            &mut bindings_ctx,
1638            FragmentSpec { id, offset: 0, size, m_flag: true },
1639            ExpectedResult::OutOfMemory,
1640        );
1641        validate_size(&core_ctx.state.cache);
1642
1643        // Trigger the timers, which will clear the cache.
1644        let _timers = bindings_ctx
1645            .trigger_timers_for(I::REASSEMBLY_TIMEOUT + Duration::from_secs(1), &mut core_ctx);
1646        assert_eq!(core_ctx.state.cache.size, 0);
1647        validate_size(&core_ctx.state.cache);
1648
1649        // Can process fragments again.
1650        I::process_ip_fragment(
1651            &mut core_ctx,
1652            &mut bindings_ctx,
1653            FragmentSpec { id, offset: 0, size, m_flag: true },
1654            ExpectedResult::NeedMore,
1655        );
1656    }
1657
1658    #[ip_test(I)]
1659    #[test_case(1)]
1660    #[test_case(10)]
1661    #[test_case(100)]
1662    fn test_unordered_fragments<I: TestIpExt>(size: u16) {
1663        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1664        let id = 5;
1665
1666        // Process fragment #0
1667        I::process_ip_fragment(
1668            &mut core_ctx,
1669            &mut bindings_ctx,
1670            FragmentSpec { id, offset: 0, size, m_flag: true },
1671            ExpectedResult::NeedMore,
1672        );
1673
1674        // Process fragment #2
1675        I::process_ip_fragment(
1676            &mut core_ctx,
1677            &mut bindings_ctx,
1678            FragmentSpec { id, offset: 2 * size, size, m_flag: false },
1679            ExpectedResult::NeedMore,
1680        );
1681
1682        // Process fragment #1
1683        I::process_ip_fragment(
1684            &mut core_ctx,
1685            &mut bindings_ctx,
1686            FragmentSpec { id, offset: size, size, m_flag: true },
1687            ExpectedResult::Ready { body_fragment_blocks: 3 * size, key: test_key(id) },
1688        );
1689    }
1690
1691    #[ip_test(I)]
1692    #[test_case(1)]
1693    #[test_case(10)]
1694    #[test_case(100)]
1695    fn test_ip_duplicate_fragment<I: TestIpExt>(size: u16) {
1696        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1697        let id = 5;
1698
1699        // Process fragment #0
1700        I::process_ip_fragment(
1701            &mut core_ctx,
1702            &mut bindings_ctx,
1703            FragmentSpec { id, offset: 0, size, m_flag: true },
1704            ExpectedResult::NeedMore,
1705        );
1706
1707        // Process the exact same fragment over again. It should be ignored.
1708        I::process_ip_fragment(
1709            &mut core_ctx,
1710            &mut bindings_ctx,
1711            FragmentSpec { id, offset: 0, size, m_flag: true },
1712            ExpectedResult::NeedMore,
1713        );
1714
1715        // Verify that the fragment's cache is intact by sending the remaining
1716        // fragment.
1717        I::process_ip_fragment(
1718            &mut core_ctx,
1719            &mut bindings_ctx,
1720            FragmentSpec { id, offset: size, size, m_flag: false },
1721            ExpectedResult::Ready { body_fragment_blocks: 2 * size, key: test_key(id) },
1722        );
1723
1724        try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id, 2 * size);
1725    }
1726
1727    #[ip_test(I)]
1728    #[test_case(1)]
1729    #[test_case(10)]
1730    #[test_case(100)]
1731    fn test_ip_out_of_bounds_fragment<I: TestIpExt>(size: u16) {
1732        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1733        let id = 5;
1734
1735        // Process fragment #1
1736        I::process_ip_fragment(
1737            &mut core_ctx,
1738            &mut bindings_ctx,
1739            FragmentSpec { id, offset: size, size, m_flag: false },
1740            ExpectedResult::NeedMore,
1741        );
1742
1743        // Process a fragment after fragment #1. It should be deemed invalid
1744        // because fragment #1 was the end.
1745        I::process_ip_fragment(
1746            &mut core_ctx,
1747            &mut bindings_ctx,
1748            FragmentSpec { id, offset: 2 * size, size, m_flag: false },
1749            ExpectedResult::Invalid,
1750        );
1751    }
1752
1753    #[ip_test(I)]
1754    #[test_case(50, 100; "overlaps_front")]
1755    #[test_case(150, 100; "overlaps_back")]
1756    #[test_case(50, 200; "overlaps_both")]
1757    fn test_ip_overlapping_fragment<I: TestIpExt>(offset: u16, size: u16) {
1758        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1759        let id = 5;
1760
1761        // Process fragment #0
1762        I::process_ip_fragment(
1763            &mut core_ctx,
1764            &mut bindings_ctx,
1765            FragmentSpec { id, offset: 100, size: 100, m_flag: true },
1766            ExpectedResult::NeedMore,
1767        );
1768
1769        // Process a fragment that overlaps with fragment 0. It should be deemed
1770        // invalid.
1771        I::process_ip_fragment(
1772            &mut core_ctx,
1773            &mut bindings_ctx,
1774            FragmentSpec { id, offset, size, m_flag: true },
1775            ExpectedResult::Invalid,
1776        );
1777    }
1778
1779    #[test]
1780    fn test_ipv4_fragment_not_multiple_of_offset_unit() {
1781        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv4>();
1782        let id = 0;
1783
1784        assert_eq!(core_ctx.state.cache.size, 0);
1785        // Test that fragment bodies must be a multiple of
1786        // `FRAGMENT_BLOCK_SIZE`, except for the last fragment.
1787
1788        // Process fragment #0
1789        process_ipv4_fragment(
1790            &mut core_ctx,
1791            &mut bindings_ctx,
1792            FragmentSpec { id, offset: 0, size: 1, m_flag: true },
1793            get_ipv4_builder(),
1794            ExpectedResult::NeedMore,
1795        );
1796
1797        // Process fragment #1 (body size is not a multiple of
1798        // `FRAGMENT_BLOCK_SIZE` and more flag is `true`).
1799        let mut builder = get_ipv4_builder();
1800        builder.id(id);
1801        builder.fragment_offset(FragmentOffset::new(1).unwrap());
1802        builder.mf_flag(true);
1803        // Body with 1 byte less than `FRAGMENT_BLOCK_SIZE` so it is not a
1804        // multiple of `FRAGMENT_BLOCK_SIZE`.
1805        let mut body: Vec<u8> = Vec::new();
1806        body.extend(FRAGMENT_BLOCK_SIZE..FRAGMENT_BLOCK_SIZE * 2 - 1);
1807        let mut buffer = Buf::new(body, ..).encapsulate(builder).serialize_vec_outer().unwrap();
1808        let packet = buffer.parse::<Ipv4Packet<_>>().unwrap();
1809        assert_matches!(
1810            FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1811            FragmentProcessingState::InvalidFragment
1812        );
1813
1814        // Process fragment #1 (body size is not a multiple of
1815        // `FRAGMENT_BLOCK_SIZE` but more flag is `false`). The last fragment is
1816        // allowed to not be a multiple of `FRAGMENT_BLOCK_SIZE`.
1817        let mut builder = get_ipv4_builder();
1818        builder.id(id);
1819        builder.fragment_offset(FragmentOffset::new(1).unwrap());
1820        builder.mf_flag(false);
1821        // Body with 1 byte less than `FRAGMENT_BLOCK_SIZE` so it is not a
1822        // multiple of `FRAGMENT_BLOCK_SIZE`.
1823        let mut body: Vec<u8> = Vec::new();
1824        body.extend(FRAGMENT_BLOCK_SIZE..FRAGMENT_BLOCK_SIZE * 2 - 1);
1825        let mut buffer = Buf::new(body, ..).encapsulate(builder).serialize_vec_outer().unwrap();
1826        let packet = buffer.parse::<Ipv4Packet<_>>().unwrap();
1827        let (key, packet_len) = assert_matches!(
1828            FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1829            FragmentProcessingState::Ready {key, packet_len} => (key, packet_len)
1830        );
1831        assert_eq!(key, test_key(id));
1832        assert_eq!(packet_len, 35);
1833        validate_size(&core_ctx.state.cache);
1834        let mut buffer: Vec<u8> = vec![0; packet_len];
1835        let mut buffer = &mut buffer[..];
1836        FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &key, &mut buffer)
1837            .unwrap();
1838        let packet = Ipv4Packet::parse_mut(&mut buffer, ()).unwrap();
1839        let mut expected_body: Vec<u8> = Vec::new();
1840        expected_body.extend(0..15);
1841        assert_eq!(packet.body(), &expected_body[..]);
1842        assert_eq!(core_ctx.state.cache.size, 0);
1843    }
1844
1845    #[test]
1846    fn test_ipv6_fragment_not_multiple_of_offset_unit() {
1847        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv6>();
1848        let id = 0;
1849
1850        assert_eq!(core_ctx.state.cache.size, 0);
1851        // Test that fragment bodies must be a multiple of
1852        // `FRAGMENT_BLOCK_SIZE`, except for the last fragment.
1853
1854        // Process fragment #0
1855        process_ipv6_fragment(
1856            &mut core_ctx,
1857            &mut bindings_ctx,
1858            FragmentSpec { id, offset: 0, size: 1, m_flag: true },
1859            get_ipv6_builder(),
1860            ExpectedResult::NeedMore,
1861        );
1862
1863        // Process fragment #1 (body size is not a multiple of
1864        // `FRAGMENT_BLOCK_SIZE` and more flag is `true`).
1865        let offset = 1;
1866        let body_size: usize = (FRAGMENT_BLOCK_SIZE - 1).into();
1867        let builder = Ipv6PacketBuilderWithFragmentHeader::new(
1868            get_ipv6_builder(),
1869            FragmentOffset::new(offset).unwrap(),
1870            true,
1871            id.into(),
1872        );
1873        let body = generate_body_fragment(id, offset, body_size);
1874        let mut buffer = Buf::new(body, ..).encapsulate(builder).serialize_vec_outer().unwrap();
1875        let packet = buffer.parse::<Ipv6Packet<_>>().unwrap();
1876        assert_matches!(
1877            FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1878            FragmentProcessingState::InvalidFragment
1879        );
1880
1881        // Process fragment #1 (body size is not a multiple of
1882        // `FRAGMENT_BLOCK_SIZE` but more flag is `false`). The last fragment is
1883        // allowed to not be a multiple of `FRAGMENT_BLOCK_SIZE`.
1884        let builder = Ipv6PacketBuilderWithFragmentHeader::new(
1885            get_ipv6_builder(),
1886            FragmentOffset::new(offset).unwrap(),
1887            false,
1888            id.into(),
1889        );
1890        let body = generate_body_fragment(id, offset, body_size);
1891        let mut buffer = Buf::new(body, ..).encapsulate(builder).serialize_vec_outer().unwrap();
1892        let packet = buffer.parse::<Ipv6Packet<_>>().unwrap();
1893        let (key, packet_len) = assert_matches!(
1894            FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1895            FragmentProcessingState::Ready {key, packet_len} => (key, packet_len)
1896        );
1897        assert_eq!(key, test_key(id));
1898        assert_eq!(packet_len, 55);
1899
1900        validate_size(&core_ctx.state.cache);
1901        let mut buffer: Vec<u8> = vec![0; packet_len];
1902        let mut buffer = &mut buffer[..];
1903        FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &key, &mut buffer)
1904            .unwrap();
1905        let packet = Ipv6Packet::parse_mut(&mut buffer, ()).unwrap();
1906        let mut expected_body: Vec<u8> = Vec::new();
1907        expected_body.extend(0..15);
1908        assert_eq!(packet.body(), &expected_body[..]);
1909        assert_eq!(core_ctx.state.cache.size, 0);
1910    }
1911
1912    #[ip_test(I)]
1913    fn test_ip_reassembly_with_multiple_intertwined_packets<I: TestIpExt>() {
1914        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1915        const SIZE: u16 = 1;
1916        let id_0 = 5;
1917        let id_1 = 10;
1918
1919        // Test that we properly reassemble fragmented packets when they arrive
1920        // intertwined with other packets' fragments.
1921
1922        // Process fragment #0 for packet #0
1923        I::process_ip_fragment(
1924            &mut core_ctx,
1925            &mut bindings_ctx,
1926            FragmentSpec { id: id_0, offset: 0, size: SIZE, m_flag: true },
1927            ExpectedResult::NeedMore,
1928        );
1929
1930        // Process fragment #0 for packet #1
1931        I::process_ip_fragment(
1932            &mut core_ctx,
1933            &mut bindings_ctx,
1934            FragmentSpec { id: id_1, offset: 0, size: SIZE, m_flag: true },
1935            ExpectedResult::NeedMore,
1936        );
1937
1938        // Process fragment #1 for packet #0
1939        I::process_ip_fragment(
1940            &mut core_ctx,
1941            &mut bindings_ctx,
1942            FragmentSpec { id: id_0, offset: 1, size: SIZE, m_flag: true },
1943            ExpectedResult::NeedMore,
1944        );
1945
1946        // Process fragment #1 for packet #0
1947        I::process_ip_fragment(
1948            &mut core_ctx,
1949            &mut bindings_ctx,
1950            FragmentSpec { id: id_1, offset: 1, size: SIZE, m_flag: true },
1951            ExpectedResult::NeedMore,
1952        );
1953
1954        // Process fragment #2 for packet #0
1955        I::process_ip_fragment(
1956            &mut core_ctx,
1957            &mut bindings_ctx,
1958            FragmentSpec { id: id_0, offset: 2, size: SIZE, m_flag: false },
1959            ExpectedResult::Ready { body_fragment_blocks: 3, key: test_key(id_0) },
1960        );
1961
1962        try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id_0, 3);
1963
1964        // Process fragment #2 for packet #1
1965        I::process_ip_fragment(
1966            &mut core_ctx,
1967            &mut bindings_ctx,
1968            FragmentSpec { id: id_1, offset: 2, size: SIZE, m_flag: false },
1969            ExpectedResult::Ready { body_fragment_blocks: 3, key: test_key(id_1) },
1970        );
1971
1972        try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id_1, 3);
1973    }
1974
1975    #[ip_test(I)]
1976    fn test_ip_reassembly_timer_with_multiple_intertwined_packets<I: TestIpExt>() {
1977        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1978        const SIZE: u16 = 1;
1979        let id_0 = 5;
1980        let id_1 = 10;
1981        let id_2 = 15;
1982
1983        // Test that we properly timer with multiple intertwined packets that
1984        // all arrive out of order. We expect packet 1 and 3 to succeed, and
1985        // packet 1 to fail due to the reassembly timer.
1986        //
1987        // The flow of events:
1988        //   T=0:
1989        //     - Packet #0, Fragment #0 arrives (timer scheduled for T=60s).
1990        //     - Packet #1, Fragment #2 arrives (timer scheduled for T=60s).
1991        //     - Packet #2, Fragment #2 arrives (timer scheduled for T=60s).
1992        //   T=BEFORE_TIMEOUT1:
1993        //     - Packet #0, Fragment #2 arrives.
1994        //   T=BEFORE_TIMEOUT2:
1995        //     - Packet #2, Fragment #1 arrives.
1996        //     - Packet #0, Fragment #1 arrives (timer cancelled since all
1997        //       fragments arrived).
1998        //   T=BEFORE_TIMEOUT3:
1999        //     - Packet #1, Fragment #0 arrives.
2000        //     - Packet #2, Fragment #0 arrives (timer cancelled since all
2001        //       fragments arrived).
2002        //   T=TIMEOUT:
2003        //     - Timeout for reassembly of Packet #1.
2004        //     - Packet #1, Fragment #1 arrives (final fragment but timer
2005        //       already triggered so fragment not complete).
2006
2007        const BEFORE_TIMEOUT1: Duration = Duration::from_secs(1);
2008        const BEFORE_TIMEOUT2: Duration = Duration::from_secs(2);
2009        const BEFORE_TIMEOUT3: Duration = Duration::from_secs(3);
2010        assert!(BEFORE_TIMEOUT1 < I::REASSEMBLY_TIMEOUT);
2011        assert!(BEFORE_TIMEOUT2 < I::REASSEMBLY_TIMEOUT);
2012        assert!(BEFORE_TIMEOUT3 < I::REASSEMBLY_TIMEOUT);
2013
2014        // Process fragment #0 for packet #0
2015        I::process_ip_fragment(
2016            &mut core_ctx,
2017            &mut bindings_ctx,
2018            FragmentSpec { id: id_0, offset: 0, size: SIZE, m_flag: true },
2019            ExpectedResult::NeedMore,
2020        );
2021
2022        // Process fragment #1 for packet #1
2023        I::process_ip_fragment(
2024            &mut core_ctx,
2025            &mut bindings_ctx,
2026            FragmentSpec { id: id_1, offset: 2, size: SIZE, m_flag: false },
2027            ExpectedResult::NeedMore,
2028        );
2029
2030        // Process fragment #2 for packet #2
2031        I::process_ip_fragment(
2032            &mut core_ctx,
2033            &mut bindings_ctx,
2034            FragmentSpec { id: id_2, offset: 2, size: SIZE, m_flag: false },
2035            ExpectedResult::NeedMore,
2036        );
2037
2038        // Advance time.
2039        assert_empty(
2040            bindings_ctx
2041                .trigger_timers_until_instant(FakeInstant::from(BEFORE_TIMEOUT1), &mut core_ctx),
2042        );
2043
2044        // Process fragment #2 for packet #0
2045        I::process_ip_fragment(
2046            &mut core_ctx,
2047            &mut bindings_ctx,
2048            FragmentSpec { id: id_0, offset: 2, size: SIZE, m_flag: false },
2049            ExpectedResult::NeedMore,
2050        );
2051
2052        // Advance time.
2053        assert_empty(
2054            bindings_ctx
2055                .trigger_timers_until_instant(FakeInstant::from(BEFORE_TIMEOUT2), &mut core_ctx),
2056        );
2057
2058        // Process fragment #1 for packet #2
2059        I::process_ip_fragment(
2060            &mut core_ctx,
2061            &mut bindings_ctx,
2062            FragmentSpec { id: id_2, offset: 1, size: SIZE, m_flag: true },
2063            ExpectedResult::NeedMore,
2064        );
2065
2066        // Process fragment #1 for packet #0
2067        I::process_ip_fragment(
2068            &mut core_ctx,
2069            &mut bindings_ctx,
2070            FragmentSpec { id: id_0, offset: 1, size: SIZE, m_flag: true },
2071            ExpectedResult::Ready { body_fragment_blocks: 3, key: test_key(id_0) },
2072        );
2073
2074        try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id_0, 3);
2075
2076        // Advance time.
2077        assert_empty(
2078            bindings_ctx
2079                .trigger_timers_until_instant(FakeInstant::from(BEFORE_TIMEOUT3), &mut core_ctx),
2080        );
2081
2082        // Process fragment #0 for packet #1
2083        I::process_ip_fragment(
2084            &mut core_ctx,
2085            &mut bindings_ctx,
2086            FragmentSpec { id: id_1, offset: 0, size: SIZE, m_flag: true },
2087            ExpectedResult::NeedMore,
2088        );
2089
2090        // Process fragment #0 for packet #2
2091        I::process_ip_fragment(
2092            &mut core_ctx,
2093            &mut bindings_ctx,
2094            FragmentSpec { id: id_2, offset: 0, size: SIZE, m_flag: true },
2095            ExpectedResult::Ready { body_fragment_blocks: 3, key: test_key(id_2) },
2096        );
2097
2098        try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id_2, 3);
2099
2100        // Advance time to the timeout, triggering the timer for the reassembly
2101        // of packet #1
2102        bindings_ctx.trigger_timers_until_and_expect_unordered(
2103            FakeInstant::from(I::REASSEMBLY_TIMEOUT),
2104            [FragmentTimerId::<I>::default()],
2105            &mut core_ctx,
2106        );
2107
2108        // Make sure no other times exist.
2109        bindings_ctx.timers.assert_no_timers_installed();
2110
2111        // Process fragment #2 for packet #1 Should get a need more return value
2112        // since even though we technically received all the fragments, the last
2113        // fragment didn't arrive until after the reassembly timer.
2114        I::process_ip_fragment(
2115            &mut core_ctx,
2116            &mut bindings_ctx,
2117            FragmentSpec { id: id_1, offset: 2, size: SIZE, m_flag: true },
2118            ExpectedResult::NeedMore,
2119        );
2120    }
2121
2122    #[test]
2123    fn test_no_more_fragments_in_middle_of_block() {
2124        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv4>();
2125        process_ipv4_fragment(
2126            &mut core_ctx,
2127            &mut bindings_ctx,
2128            FragmentSpec { id: 0, offset: 100, size: 1, m_flag: false },
2129            get_ipv4_builder(),
2130            ExpectedResult::NeedMore,
2131        );
2132
2133        process_ipv4_fragment(
2134            &mut core_ctx,
2135            &mut bindings_ctx,
2136            FragmentSpec { id: 0, offset: 50, size: 1, m_flag: false },
2137            get_ipv4_builder(),
2138            ExpectedResult::Invalid,
2139        );
2140    }
2141
2142    #[ip_test(I)]
2143    fn test_cancel_timer_on_overlap<I: TestIpExt>() {
2144        const FRAGMENT_ID: u16 = 1;
2145
2146        let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
2147
2148        let key = test_key(FRAGMENT_ID);
2149
2150        // Do this a couple times to make sure that new packets matching the
2151        // invalid packet's fragment cache key create a new entry.
2152        for _ in 0..=2 {
2153            I::process_ip_fragment(
2154                &mut core_ctx,
2155                &mut bindings_ctx,
2156                FragmentSpec { id: FRAGMENT_ID, offset: 0, size: 10, m_flag: true },
2157                ExpectedResult::NeedMore,
2158            );
2159            core_ctx
2160                .state
2161                .cache
2162                .timers
2163                .assert_timers_after(&mut bindings_ctx, [(key, (), I::REASSEMBLY_TIMEOUT)]);
2164
2165            I::process_ip_fragment(
2166                &mut core_ctx,
2167                &mut bindings_ctx,
2168                FragmentSpec { id: FRAGMENT_ID, offset: 5, size: 10, m_flag: true },
2169                ExpectedResult::Invalid,
2170            );
2171            assert_eq!(bindings_ctx.timers.timers(), [],);
2172        }
2173    }
2174}