1use alloc::collections::hash_map::{Entry, HashMap};
34use alloc::collections::{BTreeSet, BinaryHeap};
35use alloc::vec::Vec;
36use core::cmp::Ordering;
37use core::fmt::Debug;
38use core::hash::Hash;
39use core::time::Duration;
40
41use assert_matches::assert_matches;
42use log::debug;
43use net_types::ip::{GenericOverIp, Ip, IpAddr, IpVersionMarker, Ipv4, Ipv6};
44use netstack3_base::{
45 CoreTimerContext, HandleableTimer, InstantBindingsTypes, IpExt, LocalTimerHeap,
46 TimerBindingsTypes, TimerContext,
47};
48use packet::BufferViewMut;
49use packet_formats::ip::{IpPacket, Ipv4Proto};
50use packet_formats::ipv4::{Ipv4Header, Ipv4Packet};
51use packet_formats::ipv6::ext_hdrs::Ipv6ExtensionHeaderData;
52use packet_formats::ipv6::Ipv6Packet;
53use zerocopy::{SplitByteSlice, SplitByteSliceMut};
54
55pub trait ReassemblyIpExt: IpExt {
57 const REASSEMBLY_TIMEOUT: Duration;
63
64 type FragmentCacheKeyPart: Copy + Clone + Debug + Hash + PartialEq + Eq;
67
68 fn ip_specific_key_part<B: SplitByteSlice>(
71 packet: &Self::Packet<B>,
72 ) -> Self::FragmentCacheKeyPart;
73}
74
75impl ReassemblyIpExt for Ipv4 {
76 const REASSEMBLY_TIMEOUT: Duration = Duration::from_secs(15);
80
81 type FragmentCacheKeyPart = Ipv4Proto;
88
89 fn ip_specific_key_part<B: SplitByteSlice>(
90 packet: &Self::Packet<B>,
91 ) -> Self::FragmentCacheKeyPart {
92 IpPacket::proto(packet)
93 }
94}
95
96impl ReassemblyIpExt for Ipv6 {
97 const REASSEMBLY_TIMEOUT: Duration = Duration::from_secs(60);
104
105 type FragmentCacheKeyPart = ();
111
112 fn ip_specific_key_part<B: SplitByteSlice>(
113 _packet: &Self::Packet<B>,
114 ) -> Self::FragmentCacheKeyPart {
115 ()
116 }
117}
118
119const FRAGMENT_BLOCK_SIZE: u8 = 8;
129
130const MAX_FRAGMENT_BLOCKS: u16 = 8191;
135
136const MAX_FRAGMENT_CACHE_SIZE: usize = 4 * 1024 * 1024;
143
144pub trait FragmentContext<I: Ip, BT: FragmentBindingsTypes> {
146 fn with_state_mut<O, F: FnOnce(&mut IpPacketFragmentCache<I, BT>) -> O>(&mut self, cb: F) -> O;
148}
149
150pub trait FragmentBindingsTypes: TimerBindingsTypes + InstantBindingsTypes {}
152impl<BT> FragmentBindingsTypes for BT where BT: TimerBindingsTypes + InstantBindingsTypes {}
153
154pub trait FragmentBindingsContext: TimerContext + FragmentBindingsTypes {}
156impl<BC> FragmentBindingsContext for BC where BC: TimerContext + FragmentBindingsTypes {}
157
158#[derive(Hash, Eq, PartialEq, Default, Clone, Debug, GenericOverIp)]
160#[generic_over_ip(I, Ip)]
161pub struct FragmentTimerId<I: Ip>(IpVersionMarker<I>);
162
163pub trait FragmentHandler<I: ReassemblyIpExt, BC> {
165 fn process_fragment<B: SplitByteSlice>(
171 &mut self,
172 bindings_ctx: &mut BC,
173 packet: I::Packet<B>,
174 ) -> FragmentProcessingState<I, B>
175 where
176 I::Packet<B>: FragmentablePacket;
177
178 fn reassemble_packet<B: SplitByteSliceMut, BV: BufferViewMut<B>>(
196 &mut self,
197 bindings_ctx: &mut BC,
198 key: &FragmentCacheKey<I>,
199 buffer: BV,
200 ) -> Result<(), FragmentReassemblyError>;
201}
202
203impl<I: IpExt + ReassemblyIpExt, BC: FragmentBindingsContext, CC: FragmentContext<I, BC>>
204 FragmentHandler<I, BC> for CC
205{
206 fn process_fragment<B: SplitByteSlice>(
207 &mut self,
208 bindings_ctx: &mut BC,
209 packet: I::Packet<B>,
210 ) -> FragmentProcessingState<I, B>
211 where
212 I::Packet<B>: FragmentablePacket,
213 {
214 self.with_state_mut(|cache| {
215 let (res, timer_action) = cache.process_fragment(packet);
216
217 if let Some(timer_action) = timer_action {
218 match timer_action {
219 CacheTimerAction::CreateNewTimer(key) => {
222 assert_eq!(
223 cache.timers.schedule_after(
224 bindings_ctx,
225 key,
226 (),
227 I::REASSEMBLY_TIMEOUT,
228 ),
229 None
230 )
231 }
232 CacheTimerAction::CancelExistingTimer(key) => {
233 assert_ne!(cache.timers.cancel(bindings_ctx, &key), None)
234 }
235 }
236 }
237
238 res
239 })
240 }
241
242 fn reassemble_packet<B: SplitByteSliceMut, BV: BufferViewMut<B>>(
243 &mut self,
244 bindings_ctx: &mut BC,
245 key: &FragmentCacheKey<I>,
246 buffer: BV,
247 ) -> Result<(), FragmentReassemblyError> {
248 self.with_state_mut(|cache| {
249 let res = cache.reassemble_packet(key, buffer);
250
251 match res {
252 Ok(_) | Err(FragmentReassemblyError::PacketParsingError) => {
253 assert_matches!(cache.timers.cancel(bindings_ctx, key), Some(_));
257 }
258 Err(FragmentReassemblyError::InvalidKey)
259 | Err(FragmentReassemblyError::MissingFragments) => {}
260 }
261
262 res
263 })
264 }
265}
266
267impl<I: ReassemblyIpExt, BC: FragmentBindingsContext, CC: FragmentContext<I, BC>>
268 HandleableTimer<CC, BC> for FragmentTimerId<I>
269{
270 fn handle(self, core_ctx: &mut CC, bindings_ctx: &mut BC, _: BC::UniqueTimerId) {
271 let Self(IpVersionMarker { .. }) = self;
272 core_ctx.with_state_mut(|cache| {
273 let Some((key, ())) = cache.timers.pop(bindings_ctx) else {
274 return;
275 };
276
277 let FragmentCacheData { missing_blocks: _, body_fragments, header: _, total_size } =
279 assert_matches!(cache.remove_data(&key), Some(c) => c);
280 debug!(
281 "reassembly for {key:?} \
282 timed out with {} fragments and {total_size} bytes",
283 body_fragments.len(),
284 );
285 });
286 }
287}
288
289pub trait FragmentablePacket {
291 fn fragment_data(&self) -> (u32, u16, bool);
301}
302
303impl<B: SplitByteSlice> FragmentablePacket for Ipv4Packet<B> {
304 fn fragment_data(&self) -> (u32, u16, bool) {
305 (u32::from(self.id()), self.fragment_offset().into_raw(), self.mf_flag())
306 }
307}
308
309impl<B: SplitByteSlice> FragmentablePacket for Ipv6Packet<B> {
310 fn fragment_data(&self) -> (u32, u16, bool) {
311 for ext_hdr in self.iter_extension_hdrs() {
312 if let Ipv6ExtensionHeaderData::Fragment { fragment_data } = ext_hdr.data() {
313 return (
314 fragment_data.identification(),
315 fragment_data.fragment_offset().into_raw(),
316 fragment_data.m_flag(),
317 );
318 }
319 }
320
321 unreachable!(
322 "Should never call this function if the packet does not have a fragment header"
323 );
324 }
325}
326
327#[derive(Debug)]
329pub enum FragmentProcessingState<I: ReassemblyIpExt, B: SplitByteSlice> {
330 NotNeeded(I::Packet<B>),
333
334 InvalidFragment,
346
347 NeedMoreFragments,
351
352 OutOfMemory,
355
356 Ready { key: FragmentCacheKey<I>, packet_len: usize },
361}
362
363#[derive(Debug, PartialEq, Eq)]
365pub enum FragmentReassemblyError {
366 MissingFragments,
368
369 InvalidKey,
374
375 PacketParsingError,
377}
378
379#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
384pub struct FragmentCacheKey<I: ReassemblyIpExt> {
385 src_ip: I::Addr,
386 dst_ip: I::Addr,
387 fragment_id: u32,
388 ip_specific_fields: I::FragmentCacheKeyPart,
389}
390
391#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord)]
396struct BlockRange {
397 start: u16,
398 end: u16,
399}
400
401#[derive(Debug)]
403struct FragmentCacheData {
404 missing_blocks: BTreeSet<BlockRange>,
412
413 body_fragments: BinaryHeap<PacketBodyFragment>,
425
426 header: Option<Vec<u8>>,
431
432 total_size: usize,
438}
439
440impl Default for FragmentCacheData {
441 fn default() -> FragmentCacheData {
442 FragmentCacheData {
443 missing_blocks: core::iter::once(BlockRange { start: 0, end: u16::MAX }).collect(),
444 body_fragments: BinaryHeap::new(),
445 header: None,
446 total_size: 0,
447 }
448 }
449}
450
451impl FragmentCacheData {
452 fn find_gap(&self, BlockRange { start, end }: BlockRange) -> FindGapResult {
454 let result = self.missing_blocks.iter().find_map(|gap| {
455 if gap.start <= start && gap.end >= end {
457 return Some(FindGapResult::Found { gap: *gap });
458 }
459
460 if gap.start > end || gap.end < start {
463 return None;
464 }
465
466 return Some(FindGapResult::Overlap);
469 });
470
471 match result {
472 Some(result) => result,
473 None => {
474 let last = self.body_fragments.peek().unwrap();
484 if last.offset < start {
485 FindGapResult::OutOfBounds
486 } else {
487 FindGapResult::Duplicate
488 }
489 }
490 }
491 }
492}
493
494enum FindGapResult {
496 Found {
499 gap: BlockRange,
500 },
501 Overlap,
505 OutOfBounds,
507 Duplicate,
523}
524
525#[derive(Debug)]
527pub struct IpPacketFragmentCache<I: ReassemblyIpExt, BT: FragmentBindingsTypes> {
528 cache: HashMap<FragmentCacheKey<I>, FragmentCacheData>,
529 size: usize,
530 threshold: usize,
531 timers: LocalTimerHeap<FragmentCacheKey<I>, (), BT>,
532}
533
534impl<I: ReassemblyIpExt, BC: FragmentBindingsContext> IpPacketFragmentCache<I, BC> {
535 pub fn new<CC: CoreTimerContext<FragmentTimerId<I>, BC>>(
537 bindings_ctx: &mut BC,
538 ) -> IpPacketFragmentCache<I, BC> {
539 IpPacketFragmentCache {
540 cache: HashMap::new(),
541 size: 0,
542 threshold: MAX_FRAGMENT_CACHE_SIZE,
543 timers: LocalTimerHeap::new(bindings_ctx, CC::convert_timer(Default::default())),
544 }
545 }
546}
547
548enum CacheTimerAction<I: ReassemblyIpExt> {
549 CreateNewTimer(FragmentCacheKey<I>),
550 CancelExistingTimer(FragmentCacheKey<I>),
551}
552
553impl<I: ReassemblyIpExt, BT: FragmentBindingsTypes> IpPacketFragmentCache<I, BT> {
554 fn process_fragment<B: SplitByteSlice>(
560 &mut self,
561 packet: I::Packet<B>,
562 ) -> (FragmentProcessingState<I, B>, Option<CacheTimerAction<I>>)
563 where
564 I::Packet<B>: FragmentablePacket,
565 {
566 if self.above_size_threshold() {
567 return (FragmentProcessingState::OutOfMemory, None);
568 }
569
570 let (id, offset, m_flag) = packet.fragment_data();
572
573 if offset == 0 && !m_flag {
578 return (FragmentProcessingState::NotNeeded(packet), None);
579 }
580
581 if packet.body().is_empty() {
586 return (FragmentProcessingState::NeedMoreFragments, None);
587 }
588
589 if m_flag && (packet.body().len() % (FRAGMENT_BLOCK_SIZE as usize) != 0) {
593 return (FragmentProcessingState::InvalidFragment, None);
594 }
595
596 let key = FragmentCacheKey {
598 src_ip: packet.src_ip(),
599 dst_ip: packet.dst_ip(),
600 fragment_id: id,
601 ip_specific_fields: I::ip_specific_key_part(&packet),
602 };
603
604 let num_fragment_blocks = 1 + ((packet.body().len() - 1) / (FRAGMENT_BLOCK_SIZE as usize));
618 assert!(num_fragment_blocks > 0);
619
620 let fragment_blocks_range =
626 if let Ok(offset_end) = u16::try_from((offset as usize) + num_fragment_blocks - 1) {
627 if offset_end <= MAX_FRAGMENT_BLOCKS {
628 BlockRange { start: offset, end: offset_end }
629 } else {
630 return (FragmentProcessingState::InvalidFragment, None);
631 }
632 } else {
633 return (FragmentProcessingState::InvalidFragment, None);
634 };
635
636 let (fragment_data, timer_not_yet_scheduled) = self.get_or_create(key);
638
639 let found_gap = match fragment_data.find_gap(fragment_blocks_range) {
641 FindGapResult::Overlap | FindGapResult::OutOfBounds => {
642 assert_matches!(self.remove_data(&key), Some(_));
654
655 return (
656 FragmentProcessingState::InvalidFragment,
657 (!timer_not_yet_scheduled)
658 .then_some(CacheTimerAction::CancelExistingTimer(key)),
659 );
660 }
661 FindGapResult::Duplicate => {
662 return (FragmentProcessingState::NeedMoreFragments, None);
674 }
675 FindGapResult::Found { gap } => gap,
676 };
677
678 let timer_id = timer_not_yet_scheduled.then_some(CacheTimerAction::CreateNewTimer(key));
679
680 assert!(fragment_data.missing_blocks.remove(&found_gap));
683
684 if found_gap.start < fragment_blocks_range.start {
701 assert!(fragment_data.missing_blocks.insert(BlockRange {
702 start: found_gap.start,
703 end: fragment_blocks_range.start - 1
704 }));
705 }
706
707 if found_gap.end > fragment_blocks_range.end && m_flag {
742 assert!(fragment_data
743 .missing_blocks
744 .insert(BlockRange { start: fragment_blocks_range.end + 1, end: found_gap.end }));
745 } else if found_gap.end > fragment_blocks_range.end && !m_flag && found_gap.end < u16::MAX {
746 return (FragmentProcessingState::InvalidFragment, timer_id);
750 } else {
751 assert!(
757 found_gap.end == fragment_blocks_range.end
758 || (!m_flag && found_gap.end == u16::MAX),
759 "found_gap: {:?}, fragment_blocks_range: {:?} offset: {:?}, m_flag: {:?}",
760 found_gap,
761 fragment_blocks_range,
762 offset,
763 m_flag
764 );
765 }
766
767 let mut added_bytes = 0;
768 if offset == 0 {
770 assert_eq!(fragment_data.header, None);
771 let header = get_header::<B, I>(&packet);
772 added_bytes = header.len();
773 fragment_data.header = Some(header);
774 }
775
776 let mut body = Vec::with_capacity(packet.body().len());
778 body.extend_from_slice(packet.body());
779 added_bytes += body.len();
780 fragment_data.total_size += added_bytes;
781 fragment_data.body_fragments.push(PacketBodyFragment::new(offset, body));
782
783 let result = if fragment_data.missing_blocks.is_empty() {
789 FragmentProcessingState::Ready { key, packet_len: fragment_data.total_size }
790 } else {
791 FragmentProcessingState::NeedMoreFragments
792 };
793
794 self.increment_size(added_bytes);
795 (result, timer_id)
796 }
797
798 fn reassemble_packet<B: SplitByteSliceMut, BV: BufferViewMut<B>>(
816 &mut self,
817 key: &FragmentCacheKey<I>,
818 buffer: BV,
819 ) -> Result<(), FragmentReassemblyError> {
820 let entry = match self.cache.entry(*key) {
821 Entry::Occupied(entry) => entry,
822 Entry::Vacant(_) => return Err(FragmentReassemblyError::InvalidKey),
823 };
824
825 if !entry.get().missing_blocks.is_empty() {
827 return Err(FragmentReassemblyError::MissingFragments);
828 }
829 let (_key, data) = entry.remove_entry();
832 self.size -= data.total_size;
833
834 assert_matches!(data.header, Some(_));
836
837 let body_fragments = data.body_fragments.into_sorted_vec().into_iter().map(|x| x.data);
840 I::Packet::reassemble_fragmented_packet(buffer, data.header.unwrap(), body_fragments)
841 .map_err(|_| FragmentReassemblyError::PacketParsingError)
842 }
843
844 fn get_or_create(&mut self, key: FragmentCacheKey<I>) -> (&mut FragmentCacheData, bool) {
849 match self.cache.entry(key) {
850 Entry::Occupied(e) => (e.into_mut(), false),
851 Entry::Vacant(e) => {
852 (e.insert(FragmentCacheData::default()), true)
857 }
858 }
859 }
860
861 fn above_size_threshold(&self) -> bool {
862 self.size >= self.threshold
863 }
864
865 fn increment_size(&mut self, sz: usize) {
866 assert!(!self.above_size_threshold());
867 self.size += sz;
868 }
869
870 fn remove_data(&mut self, key: &FragmentCacheKey<I>) -> Option<FragmentCacheData> {
871 let data = self.cache.remove(key)?;
872 self.size -= data.total_size;
873 Some(data)
874 }
875}
876
877fn get_header<B: SplitByteSlice, I: IpExt>(packet: &I::Packet<B>) -> Vec<u8> {
879 match packet.as_ip_addr_ref() {
880 IpAddr::V4(packet) => packet.copy_header_bytes_for_fragment(),
881 IpAddr::V6(packet) => {
882 packet.copy_header_bytes_for_fragment()
887 }
888 }
889}
890
891#[derive(Debug, PartialEq, Eq)]
893struct PacketBodyFragment {
894 offset: u16,
895 data: Vec<u8>,
896}
897
898impl PacketBodyFragment {
899 fn new(offset: u16, data: Vec<u8>) -> Self {
901 PacketBodyFragment { offset, data }
902 }
903}
904
905impl PartialOrd for PacketBodyFragment {
908 fn partial_cmp(&self, other: &PacketBodyFragment) -> Option<Ordering> {
909 Some(self.cmp(other))
910 }
911}
912
913impl Ord for PacketBodyFragment {
914 fn cmp(&self, other: &Self) -> Ordering {
915 self.offset.cmp(&other.offset)
916 }
917}
918
919#[cfg(test)]
920mod tests {
921 use alloc::vec;
922
923 use assert_matches::assert_matches;
924 use ip_test_macro::ip_test;
925 use net_declare::{net_ip_v4, net_ip_v6};
926 use net_types::ip::{Ipv4, Ipv4Addr, Ipv6, Ipv6Addr};
927 use net_types::Witness;
928 use netstack3_base::testutil::{
929 assert_empty, FakeBindingsCtx, FakeCoreCtx, FakeInstant, FakeTimerCtxExt, TEST_ADDRS_V4,
930 TEST_ADDRS_V6,
931 };
932 use netstack3_base::{CtxPair, IntoCoreTimerCtx};
933 use packet::{Buf, ParsablePacket, ParseBuffer, Serializer};
934 use packet_formats::ip::{FragmentOffset, IpProto, Ipv6Proto};
935 use packet_formats::ipv4::Ipv4PacketBuilder;
936 use packet_formats::ipv6::{Ipv6PacketBuilder, Ipv6PacketBuilderWithFragmentHeader};
937 use test_case::test_case;
938
939 use super::*;
940
941 struct FakeFragmentContext<I: ReassemblyIpExt, BT: FragmentBindingsTypes> {
942 cache: IpPacketFragmentCache<I, BT>,
943 }
944
945 impl<I: ReassemblyIpExt, BC: FragmentBindingsContext> FakeFragmentContext<I, BC>
946 where
947 BC::DispatchId: From<FragmentTimerId<I>>,
948 {
949 fn new(bindings_ctx: &mut BC) -> Self {
950 Self { cache: IpPacketFragmentCache::new::<IntoCoreTimerCtx>(bindings_ctx) }
951 }
952 }
953
954 type FakeCtxImpl<I> = CtxPair<FakeCoreCtxImpl<I>, FakeBindingsCtxImpl<I>>;
955 type FakeBindingsCtxImpl<I> = FakeBindingsCtx<FragmentTimerId<I>, (), (), ()>;
956 type FakeCoreCtxImpl<I> = FakeCoreCtx<FakeFragmentContext<I, FakeBindingsCtxImpl<I>>, (), ()>;
957
958 impl<I: ReassemblyIpExt> FragmentContext<I, FakeBindingsCtxImpl<I>> for FakeCoreCtxImpl<I> {
959 fn with_state_mut<
960 O,
961 F: FnOnce(&mut IpPacketFragmentCache<I, FakeBindingsCtxImpl<I>>) -> O,
962 >(
963 &mut self,
964 cb: F,
965 ) -> O {
966 cb(&mut self.state.cache)
967 }
968 }
969
970 #[derive(PartialEq)]
973 enum ExpectedResult<I: ReassemblyIpExt> {
974 Ready { body_fragment_blocks: u16, key: FragmentCacheKey<I> },
979
980 NeedMore,
983
984 Invalid,
986
987 OutOfMemory,
989 }
990
991 fn get_ipv4_builder() -> Ipv4PacketBuilder {
993 Ipv4PacketBuilder::new(
994 TEST_ADDRS_V4.remote_ip,
995 TEST_ADDRS_V4.local_ip,
996 10,
997 <Ipv4 as TestIpExt>::PROTOCOL,
998 )
999 }
1000
1001 fn get_ipv6_builder() -> Ipv6PacketBuilder {
1003 Ipv6PacketBuilder::new(
1004 TEST_ADDRS_V6.remote_ip,
1005 TEST_ADDRS_V6.local_ip,
1006 10,
1007 <Ipv6 as TestIpExt>::PROTOCOL,
1008 )
1009 }
1010
1011 fn validate_size<I: ReassemblyIpExt, BT: FragmentBindingsTypes>(
1013 cache: &IpPacketFragmentCache<I, BT>,
1014 ) {
1015 let mut sz: usize = 0;
1016
1017 for v in cache.cache.values() {
1018 sz += v.total_size;
1019 }
1020
1021 assert_eq!(sz, cache.size);
1022 }
1023
1024 struct FragmentSpec {
1025 id: u16,
1027 offset: u16,
1029 size: u16,
1031 m_flag: bool,
1033 }
1034
1035 fn expected_packet_size<I: TestIpExt>(num_fragment_blocks: u16) -> usize {
1036 usize::from(num_fragment_blocks) * usize::from(FRAGMENT_BLOCK_SIZE) + I::HEADER_LENGTH
1037 }
1038
1039 fn process_ipv4_fragment<CC: FragmentContext<Ipv4, BC>, BC: FragmentBindingsContext>(
1041 core_ctx: &mut CC,
1042 bindings_ctx: &mut BC,
1043 FragmentSpec { id, offset, size, m_flag }: FragmentSpec,
1044 mut builder: Ipv4PacketBuilder,
1045 expected_result: ExpectedResult<Ipv4>,
1046 ) {
1047 builder.id(id);
1048 builder.fragment_offset(FragmentOffset::new(offset).unwrap());
1049 builder.mf_flag(m_flag);
1050 let body = generate_body_fragment(
1051 id,
1052 offset,
1053 usize::from(size) * usize::from(FRAGMENT_BLOCK_SIZE),
1054 );
1055
1056 let mut buffer = Buf::new(body, ..).encapsulate(builder).serialize_vec_outer().unwrap();
1057 let packet = buffer.parse::<Ipv4Packet<_>>().unwrap();
1058
1059 let actual_result =
1060 FragmentHandler::process_fragment::<&[u8]>(core_ctx, bindings_ctx, packet);
1061 match expected_result {
1062 ExpectedResult::Ready { body_fragment_blocks, key: expected_key } => {
1063 let (key, packet_len) = assert_matches!(
1064 actual_result,
1065 FragmentProcessingState::Ready {key, packet_len} => (key, packet_len)
1066 );
1067 assert_eq!(key, expected_key);
1068 assert_eq!(packet_len, expected_packet_size::<Ipv4>(body_fragment_blocks));
1069 }
1070 ExpectedResult::NeedMore => {
1071 assert_matches!(actual_result, FragmentProcessingState::NeedMoreFragments);
1072 }
1073 ExpectedResult::Invalid => {
1074 assert_matches!(actual_result, FragmentProcessingState::InvalidFragment);
1075 }
1076 ExpectedResult::OutOfMemory => {
1077 assert_matches!(actual_result, FragmentProcessingState::OutOfMemory);
1078 }
1079 }
1080 }
1081
1082 fn process_ipv6_fragment<CC: FragmentContext<Ipv6, BC>, BC: FragmentBindingsContext>(
1086 core_ctx: &mut CC,
1087 bindings_ctx: &mut BC,
1088 FragmentSpec { id, offset, size, m_flag }: FragmentSpec,
1089 builder: Ipv6PacketBuilder,
1090 expected_result: ExpectedResult<Ipv6>,
1091 ) {
1092 let builder = Ipv6PacketBuilderWithFragmentHeader::new(
1093 builder,
1094 FragmentOffset::new(offset).unwrap(),
1095 m_flag,
1096 id.into(),
1097 );
1098
1099 let body = generate_body_fragment(
1100 id,
1101 offset,
1102 usize::from(size) * usize::from(FRAGMENT_BLOCK_SIZE),
1103 );
1104
1105 let mut buffer = Buf::new(body, ..).encapsulate(builder).serialize_vec_outer().unwrap();
1106 let packet = buffer.parse::<Ipv6Packet<_>>().unwrap();
1107
1108 let actual_result =
1109 FragmentHandler::process_fragment::<&[u8]>(core_ctx, bindings_ctx, packet);
1110 match expected_result {
1111 ExpectedResult::Ready { body_fragment_blocks, key: expected_key } => {
1112 let (key, packet_len) = assert_matches!(
1113 actual_result,
1114 FragmentProcessingState::Ready {key, packet_len} => (key, packet_len)
1115 );
1116 assert_eq!(key, expected_key);
1117 assert_eq!(packet_len, expected_packet_size::<Ipv6>(body_fragment_blocks));
1118 }
1119 ExpectedResult::NeedMore => {
1120 assert_matches!(actual_result, FragmentProcessingState::NeedMoreFragments);
1121 }
1122 ExpectedResult::Invalid => {
1123 assert_matches!(actual_result, FragmentProcessingState::InvalidFragment);
1124 }
1125 ExpectedResult::OutOfMemory => {
1126 assert_matches!(actual_result, FragmentProcessingState::OutOfMemory);
1127 }
1128 }
1129 }
1130
1131 trait TestIpExt: IpExt + netstack3_base::testutil::TestIpExt + ReassemblyIpExt {
1132 const HEADER_LENGTH: usize;
1133
1134 const PROTOCOL: Self::Proto;
1135
1136 fn process_ip_fragment<CC: FragmentContext<Self, BC>, BC: FragmentBindingsContext>(
1137 core_ctx: &mut CC,
1138 bindings_ctx: &mut BC,
1139 spec: FragmentSpec,
1140 expected_result: ExpectedResult<Self>,
1141 );
1142 }
1143
1144 impl TestIpExt for Ipv4 {
1145 const HEADER_LENGTH: usize = packet_formats::ipv4::HDR_PREFIX_LEN;
1146
1147 const PROTOCOL: Ipv4Proto = Ipv4Proto::Proto(IpProto::Tcp);
1148
1149 fn process_ip_fragment<CC: FragmentContext<Self, BC>, BC: FragmentBindingsContext>(
1150 core_ctx: &mut CC,
1151 bindings_ctx: &mut BC,
1152 spec: FragmentSpec,
1153 expected_result: ExpectedResult<Ipv4>,
1154 ) {
1155 process_ipv4_fragment(core_ctx, bindings_ctx, spec, get_ipv4_builder(), expected_result)
1156 }
1157 }
1158 impl TestIpExt for Ipv6 {
1159 const HEADER_LENGTH: usize = packet_formats::ipv6::IPV6_FIXED_HDR_LEN;
1160
1161 const PROTOCOL: Ipv6Proto = Ipv6Proto::Proto(IpProto::Tcp);
1162
1163 fn process_ip_fragment<CC: FragmentContext<Self, BC>, BC: FragmentBindingsContext>(
1164 core_ctx: &mut CC,
1165 bindings_ctx: &mut BC,
1166 spec: FragmentSpec,
1167 expected_result: ExpectedResult<Ipv6>,
1168 ) {
1169 process_ipv6_fragment(core_ctx, bindings_ctx, spec, get_ipv6_builder(), expected_result)
1170 }
1171 }
1172
1173 fn try_reassemble_ip_packet<
1177 I: TestIpExt + netstack3_base::IpExt,
1178 CC: FragmentContext<I, BC>,
1179 BC: FragmentBindingsContext,
1180 >(
1181 core_ctx: &mut CC,
1182 bindings_ctx: &mut BC,
1183 fragment_id: u16,
1184 body_fragment_blocks: u16,
1185 ) {
1186 let mut buffer: Vec<u8> = vec![
1187 0;
1188 usize::from(body_fragment_blocks)
1189 * usize::from(FRAGMENT_BLOCK_SIZE)
1190 + I::HEADER_LENGTH
1191 ];
1192 let mut buffer = &mut buffer[..];
1193 let key = test_key(fragment_id);
1194
1195 FragmentHandler::reassemble_packet(core_ctx, bindings_ctx, &key, &mut buffer).unwrap();
1196 let packet = I::Packet::parse_mut(&mut buffer, ()).unwrap();
1197
1198 let expected_body = generate_body_fragment(
1199 fragment_id,
1200 0,
1201 usize::from(body_fragment_blocks) * usize::from(FRAGMENT_BLOCK_SIZE),
1202 );
1203 assert_eq!(packet.body(), &expected_body[..]);
1204 }
1205
1206 fn generate_body_fragment(fragment_id: u16, fragment_offset: u16, len: usize) -> Vec<u8> {
1212 let start = usize::from(fragment_id)
1216 + usize::from(fragment_offset) * usize::from(FRAGMENT_BLOCK_SIZE);
1217 (start..start + len).map(|byte| byte as u8).collect()
1218 }
1219
1220 fn test_key<I: TestIpExt>(id: u16) -> FragmentCacheKey<I> {
1222 #[derive(GenericOverIp)]
1223 #[generic_over_ip(I, Ip)]
1224 struct Wrapper<I: ReassemblyIpExt>(I::FragmentCacheKeyPart);
1225
1226 let Wrapper(ip_specific_fields) =
1227 I::map_ip_out((), |()| Wrapper(Ipv4::PROTOCOL), |()| Wrapper(()));
1228
1229 FragmentCacheKey {
1230 src_ip: I::TEST_ADDRS.remote_ip.get(),
1231 dst_ip: I::TEST_ADDRS.local_ip.get(),
1232 fragment_id: id.into(),
1233 ip_specific_fields,
1234 }
1235 }
1236
1237 fn new_context<I: ReassemblyIpExt>() -> FakeCtxImpl<I> {
1238 FakeCtxImpl::<I>::with_default_bindings_ctx(|bindings_ctx| {
1239 FakeCoreCtxImpl::with_state(FakeFragmentContext::new(bindings_ctx))
1240 })
1241 }
1242
1243 #[test]
1244 fn test_ipv4_reassembly_not_needed() {
1245 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv4>();
1246
1247 let builder = get_ipv4_builder();
1251 let body = [1, 2, 3, 4, 5];
1252 let mut buffer =
1253 Buf::new(body.to_vec(), ..).encapsulate(builder).serialize_vec_outer().unwrap();
1254 let packet = buffer.parse::<Ipv4Packet<_>>().unwrap();
1255 assert_matches!(
1256 FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1257 FragmentProcessingState::NotNeeded(unfragmented) if unfragmented.body() == body
1258 );
1259 }
1260
1261 #[test]
1262 #[should_panic(
1263 expected = "internal error: entered unreachable code: Should never call this function if the packet does not have a fragment header"
1264 )]
1265 fn test_ipv6_reassembly_not_needed() {
1266 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv6>();
1267
1268 let builder = get_ipv6_builder();
1272 let mut buffer =
1273 Buf::new(vec![1, 2, 3, 4, 5], ..).encapsulate(builder).serialize_vec_outer().unwrap();
1274 let packet = buffer.parse::<Ipv6Packet<_>>().unwrap();
1275 assert_matches!(
1276 FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1277 FragmentProcessingState::InvalidFragment
1278 );
1279 }
1280
1281 #[ip_test(I)]
1282 #[test_case(1)]
1283 #[test_case(10)]
1284 #[test_case(100)]
1285 fn test_ip_reassembly<I: TestIpExt>(size: u16) {
1286 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1287 let id = 5;
1288
1289 I::process_ip_fragment(
1293 &mut core_ctx,
1294 &mut bindings_ctx,
1295 FragmentSpec { id, offset: 0, size, m_flag: true },
1296 ExpectedResult::NeedMore,
1297 );
1298
1299 I::process_ip_fragment(
1301 &mut core_ctx,
1302 &mut bindings_ctx,
1303 FragmentSpec { id, offset: size, size, m_flag: true },
1304 ExpectedResult::NeedMore,
1305 );
1306
1307 I::process_ip_fragment(
1309 &mut core_ctx,
1310 &mut bindings_ctx,
1311 FragmentSpec { id, offset: 2 * size, size, m_flag: false },
1312 ExpectedResult::Ready { body_fragment_blocks: 3 * size, key: test_key(id) },
1313 );
1314
1315 try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id, 3 * size);
1316 }
1317
1318 #[test]
1319 fn test_ipv4_key_uniqueness() {
1320 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv4>();
1321
1322 const RIGHT_SRC: Ipv4Addr = net_ip_v4!("192.0.2.1");
1323 const WRONG_SRC: Ipv4Addr = net_ip_v4!("192.0.2.2");
1324
1325 const RIGHT_DST: Ipv4Addr = net_ip_v4!("192.0.2.3");
1326 const WRONG_DST: Ipv4Addr = net_ip_v4!("192.0.2.4");
1327
1328 const RIGHT_PROTO: Ipv4Proto = Ipv4Proto::Proto(IpProto::Tcp);
1329 const WRONG_PROTO: Ipv4Proto = Ipv4Proto::Proto(IpProto::Udp);
1330
1331 const RIGHT_ID: u16 = 1;
1332 const WRONG_ID: u16 = 2;
1333
1334 const TTL: u8 = 1;
1335
1336 process_ipv4_fragment(
1338 &mut core_ctx,
1339 &mut bindings_ctx,
1340 FragmentSpec { id: RIGHT_ID, offset: 0, size: 1, m_flag: true },
1341 Ipv4PacketBuilder::new(RIGHT_SRC, RIGHT_DST, TTL, RIGHT_PROTO),
1342 ExpectedResult::NeedMore,
1343 );
1344
1345 for (id, src, dst, proto) in [
1348 (RIGHT_ID, RIGHT_SRC, RIGHT_DST, WRONG_PROTO),
1349 (RIGHT_ID, RIGHT_SRC, WRONG_DST, RIGHT_PROTO),
1350 (RIGHT_ID, WRONG_SRC, RIGHT_DST, RIGHT_PROTO),
1351 (WRONG_ID, RIGHT_SRC, RIGHT_DST, RIGHT_PROTO),
1352 ] {
1353 process_ipv4_fragment(
1354 &mut core_ctx,
1355 &mut bindings_ctx,
1356 FragmentSpec { id, offset: 1, size: 1, m_flag: false },
1357 Ipv4PacketBuilder::new(src, dst, TTL, proto),
1358 ExpectedResult::NeedMore,
1359 );
1360 }
1361
1362 const KEY: FragmentCacheKey<Ipv4> = FragmentCacheKey {
1365 src_ip: RIGHT_SRC,
1366 dst_ip: RIGHT_DST,
1367 fragment_id: RIGHT_ID as u32,
1368 ip_specific_fields: RIGHT_PROTO,
1369 };
1370 process_ipv4_fragment(
1371 &mut core_ctx,
1372 &mut bindings_ctx,
1373 FragmentSpec { id: RIGHT_ID, offset: 1, size: 1, m_flag: false },
1374 Ipv4PacketBuilder::new(RIGHT_SRC, RIGHT_DST, TTL, RIGHT_PROTO),
1375 ExpectedResult::Ready { body_fragment_blocks: 2, key: KEY },
1376 );
1377 let mut buffer: Vec<u8> = vec![0; expected_packet_size::<Ipv4>(2)];
1378 let mut buffer = &mut buffer[..];
1379 FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &KEY, &mut buffer)
1380 .expect("reassembly should succeed");
1381 let _packet = Ipv4Packet::parse_mut(&mut buffer, ()).expect("parse should succeed");
1382 }
1383
1384 #[test]
1385 fn test_ipv6_key_uniqueness() {
1386 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv6>();
1387
1388 const RIGHT_SRC: Ipv6Addr = net_ip_v6!("2001:0db8::1");
1389 const WRONG_SRC: Ipv6Addr = net_ip_v6!("2001:0db8::2");
1390
1391 const RIGHT_DST: Ipv6Addr = net_ip_v6!("2001:0db8::3");
1392 const WRONG_DST: Ipv6Addr = net_ip_v6!("2001:0db8::4");
1393
1394 const RIGHT_ID: u16 = 1;
1395 const WRONG_ID: u16 = 2;
1396
1397 const TTL: u8 = 1;
1398
1399 process_ipv6_fragment(
1401 &mut core_ctx,
1402 &mut bindings_ctx,
1403 FragmentSpec { id: RIGHT_ID, offset: 0, size: 1, m_flag: true },
1404 Ipv6PacketBuilder::new(RIGHT_SRC, RIGHT_DST, TTL, Ipv6::PROTOCOL),
1405 ExpectedResult::NeedMore,
1406 );
1407
1408 for (id, src, dst) in [
1411 (RIGHT_ID, RIGHT_SRC, WRONG_DST),
1412 (RIGHT_ID, WRONG_SRC, RIGHT_DST),
1413 (WRONG_ID, RIGHT_SRC, RIGHT_DST),
1414 ] {
1415 process_ipv6_fragment(
1416 &mut core_ctx,
1417 &mut bindings_ctx,
1418 FragmentSpec { id, offset: 1, size: 1, m_flag: false },
1419 Ipv6PacketBuilder::new(src, dst, TTL, Ipv6::PROTOCOL),
1420 ExpectedResult::NeedMore,
1421 );
1422 }
1423
1424 const KEY: FragmentCacheKey<Ipv6> = FragmentCacheKey {
1427 src_ip: RIGHT_SRC,
1428 dst_ip: RIGHT_DST,
1429 fragment_id: RIGHT_ID as u32,
1430 ip_specific_fields: (),
1431 };
1432 process_ipv6_fragment(
1433 &mut core_ctx,
1434 &mut bindings_ctx,
1435 FragmentSpec { id: RIGHT_ID, offset: 1, size: 1, m_flag: false },
1436 Ipv6PacketBuilder::new(RIGHT_SRC, RIGHT_DST, TTL, Ipv6::PROTOCOL),
1437 ExpectedResult::Ready { body_fragment_blocks: 2, key: KEY },
1438 );
1439 let mut buffer: Vec<u8> = vec![0; expected_packet_size::<Ipv6>(2)];
1440 let mut buffer = &mut buffer[..];
1441 FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &KEY, &mut buffer)
1442 .expect("reassembly should succeed");
1443 let _packet = Ipv6Packet::parse_mut(&mut buffer, ()).expect("parse should succeed");
1444 }
1445
1446 #[test]
1447 fn test_ipv6_reassemble_different_protocols() {
1448 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv6>();
1449
1450 const SRC: Ipv6Addr = net_ip_v6!("2001:0db8::1");
1451 const DST: Ipv6Addr = net_ip_v6!("2001:0db8::2");
1452 const ID: u16 = 1;
1453 const TTL: u8 = 1;
1454
1455 const PROTO1: Ipv6Proto = Ipv6Proto::Proto(IpProto::Tcp);
1456 const PROTO2: Ipv6Proto = Ipv6Proto::Proto(IpProto::Udp);
1457
1458 process_ipv6_fragment(
1460 &mut core_ctx,
1461 &mut bindings_ctx,
1462 FragmentSpec { id: ID, offset: 0, size: 1, m_flag: true },
1463 Ipv6PacketBuilder::new(SRC, DST, TTL, PROTO1),
1464 ExpectedResult::NeedMore,
1465 );
1466
1467 const KEY: FragmentCacheKey<Ipv6> = FragmentCacheKey {
1471 src_ip: SRC,
1472 dst_ip: DST,
1473 fragment_id: ID as u32,
1474 ip_specific_fields: (),
1475 };
1476 process_ipv6_fragment(
1477 &mut core_ctx,
1478 &mut bindings_ctx,
1479 FragmentSpec { id: ID, offset: 1, size: 1, m_flag: false },
1480 Ipv6PacketBuilder::new(SRC, DST, TTL, PROTO2),
1481 ExpectedResult::Ready { body_fragment_blocks: 2, key: KEY },
1482 );
1483 let mut buffer: Vec<u8> = vec![0; expected_packet_size::<Ipv6>(2)];
1484 let mut buffer = &mut buffer[..];
1485 FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &KEY, &mut buffer)
1486 .expect("reassembly should succeed");
1487 let packet = Ipv6Packet::parse_mut(&mut buffer, ()).expect("parse should succeed");
1488 assert_eq!(packet.proto(), PROTO1);
1489 }
1490
1491 #[ip_test(I)]
1492 #[test_case(1)]
1493 #[test_case(10)]
1494 #[test_case(100)]
1495 fn test_ip_reassemble_with_missing_blocks<I: TestIpExt>(size: u16) {
1496 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1497 let id = 5;
1498
1499 I::process_ip_fragment(
1504 &mut core_ctx,
1505 &mut bindings_ctx,
1506 FragmentSpec { id, offset: 0, size, m_flag: true },
1507 ExpectedResult::NeedMore,
1508 );
1509
1510 I::process_ip_fragment(
1512 &mut core_ctx,
1513 &mut bindings_ctx,
1514 FragmentSpec { id, offset: size, size, m_flag: true },
1515 ExpectedResult::NeedMore,
1516 );
1517
1518 let mut buffer: Vec<u8> = vec![0; 1];
1519 let mut buffer = &mut buffer[..];
1520 let key = test_key(id);
1521 assert_eq!(
1522 FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &key, &mut buffer)
1523 .unwrap_err(),
1524 FragmentReassemblyError::MissingFragments,
1525 );
1526 }
1527
1528 #[ip_test(I)]
1529 fn test_ip_reassemble_after_timer<I: TestIpExt>() {
1530 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1531 let id = 5;
1532 let key = test_key::<I>(id);
1533
1534 bindings_ctx.timers.assert_no_timers_installed();
1536 assert_eq!(core_ctx.state.cache.size, 0);
1537
1538 I::process_ip_fragment(
1542 &mut core_ctx,
1543 &mut bindings_ctx,
1544 FragmentSpec { id, offset: 0, size: 1, m_flag: true },
1545 ExpectedResult::NeedMore,
1546 );
1547
1548 core_ctx.state.cache.timers.assert_timers([(
1550 key,
1551 (),
1552 FakeInstant::from(I::REASSEMBLY_TIMEOUT),
1553 )]);
1554 validate_size(&core_ctx.state.cache);
1555
1556 I::process_ip_fragment(
1558 &mut core_ctx,
1559 &mut bindings_ctx,
1560 FragmentSpec { id, offset: 1, size: 1, m_flag: true },
1561 ExpectedResult::NeedMore,
1562 );
1563 core_ctx.state.cache.timers.assert_timers([(
1565 key,
1566 (),
1567 FakeInstant::from(I::REASSEMBLY_TIMEOUT),
1568 )]);
1569 validate_size(&core_ctx.state.cache);
1570
1571 I::process_ip_fragment(
1573 &mut core_ctx,
1574 &mut bindings_ctx,
1575 FragmentSpec { id, offset: 2, size: 1, m_flag: false },
1576 ExpectedResult::Ready { body_fragment_blocks: 3, key: test_key(id) },
1577 );
1578 core_ctx.state.cache.timers.assert_timers([(
1580 key,
1581 (),
1582 FakeInstant::from(I::REASSEMBLY_TIMEOUT),
1583 )]);
1584 validate_size(&core_ctx.state.cache);
1585
1586 assert_eq!(
1588 bindings_ctx.trigger_next_timer(&mut core_ctx),
1589 Some(FragmentTimerId::<I>::default())
1590 );
1591
1592 bindings_ctx.timers.assert_no_timers_installed();
1594 assert_eq!(core_ctx.state.cache.size, 0);
1595
1596 let key = test_key(id);
1599 let packet_len = 44;
1600 let mut buffer: Vec<u8> = vec![0; packet_len];
1601 let mut buffer = &mut buffer[..];
1602 assert_eq!(
1603 FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &key, &mut buffer)
1604 .unwrap_err(),
1605 FragmentReassemblyError::InvalidKey,
1606 );
1607 }
1608
1609 #[ip_test(I)]
1610 #[test_case(1)]
1611 #[test_case(10)]
1612 #[test_case(100)]
1613 fn test_ip_fragment_cache_oom<I: TestIpExt>(size: u16) {
1614 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1615 let mut id = 0;
1616 const THRESHOLD: usize = 8196usize;
1617
1618 assert_eq!(core_ctx.state.cache.size, 0);
1619 core_ctx.state.cache.threshold = THRESHOLD;
1620
1621 while core_ctx.state.cache.size + usize::from(size) <= THRESHOLD {
1624 I::process_ip_fragment(
1625 &mut core_ctx,
1626 &mut bindings_ctx,
1627 FragmentSpec { id, offset: 0, size, m_flag: true },
1628 ExpectedResult::NeedMore,
1629 );
1630 validate_size(&core_ctx.state.cache);
1631 id += 1;
1632 }
1633
1634 I::process_ip_fragment(
1636 &mut core_ctx,
1637 &mut bindings_ctx,
1638 FragmentSpec { id, offset: 0, size, m_flag: true },
1639 ExpectedResult::OutOfMemory,
1640 );
1641 validate_size(&core_ctx.state.cache);
1642
1643 let _timers = bindings_ctx
1645 .trigger_timers_for(I::REASSEMBLY_TIMEOUT + Duration::from_secs(1), &mut core_ctx);
1646 assert_eq!(core_ctx.state.cache.size, 0);
1647 validate_size(&core_ctx.state.cache);
1648
1649 I::process_ip_fragment(
1651 &mut core_ctx,
1652 &mut bindings_ctx,
1653 FragmentSpec { id, offset: 0, size, m_flag: true },
1654 ExpectedResult::NeedMore,
1655 );
1656 }
1657
1658 #[ip_test(I)]
1659 #[test_case(1)]
1660 #[test_case(10)]
1661 #[test_case(100)]
1662 fn test_unordered_fragments<I: TestIpExt>(size: u16) {
1663 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1664 let id = 5;
1665
1666 I::process_ip_fragment(
1668 &mut core_ctx,
1669 &mut bindings_ctx,
1670 FragmentSpec { id, offset: 0, size, m_flag: true },
1671 ExpectedResult::NeedMore,
1672 );
1673
1674 I::process_ip_fragment(
1676 &mut core_ctx,
1677 &mut bindings_ctx,
1678 FragmentSpec { id, offset: 2 * size, size, m_flag: false },
1679 ExpectedResult::NeedMore,
1680 );
1681
1682 I::process_ip_fragment(
1684 &mut core_ctx,
1685 &mut bindings_ctx,
1686 FragmentSpec { id, offset: size, size, m_flag: true },
1687 ExpectedResult::Ready { body_fragment_blocks: 3 * size, key: test_key(id) },
1688 );
1689 }
1690
1691 #[ip_test(I)]
1692 #[test_case(1)]
1693 #[test_case(10)]
1694 #[test_case(100)]
1695 fn test_ip_duplicate_fragment<I: TestIpExt>(size: u16) {
1696 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1697 let id = 5;
1698
1699 I::process_ip_fragment(
1701 &mut core_ctx,
1702 &mut bindings_ctx,
1703 FragmentSpec { id, offset: 0, size, m_flag: true },
1704 ExpectedResult::NeedMore,
1705 );
1706
1707 I::process_ip_fragment(
1709 &mut core_ctx,
1710 &mut bindings_ctx,
1711 FragmentSpec { id, offset: 0, size, m_flag: true },
1712 ExpectedResult::NeedMore,
1713 );
1714
1715 I::process_ip_fragment(
1718 &mut core_ctx,
1719 &mut bindings_ctx,
1720 FragmentSpec { id, offset: size, size, m_flag: false },
1721 ExpectedResult::Ready { body_fragment_blocks: 2 * size, key: test_key(id) },
1722 );
1723
1724 try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id, 2 * size);
1725 }
1726
1727 #[ip_test(I)]
1728 #[test_case(1)]
1729 #[test_case(10)]
1730 #[test_case(100)]
1731 fn test_ip_out_of_bounds_fragment<I: TestIpExt>(size: u16) {
1732 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1733 let id = 5;
1734
1735 I::process_ip_fragment(
1737 &mut core_ctx,
1738 &mut bindings_ctx,
1739 FragmentSpec { id, offset: size, size, m_flag: false },
1740 ExpectedResult::NeedMore,
1741 );
1742
1743 I::process_ip_fragment(
1746 &mut core_ctx,
1747 &mut bindings_ctx,
1748 FragmentSpec { id, offset: 2 * size, size, m_flag: false },
1749 ExpectedResult::Invalid,
1750 );
1751 }
1752
1753 #[ip_test(I)]
1754 #[test_case(50, 100; "overlaps_front")]
1755 #[test_case(150, 100; "overlaps_back")]
1756 #[test_case(50, 200; "overlaps_both")]
1757 fn test_ip_overlapping_fragment<I: TestIpExt>(offset: u16, size: u16) {
1758 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1759 let id = 5;
1760
1761 I::process_ip_fragment(
1763 &mut core_ctx,
1764 &mut bindings_ctx,
1765 FragmentSpec { id, offset: 100, size: 100, m_flag: true },
1766 ExpectedResult::NeedMore,
1767 );
1768
1769 I::process_ip_fragment(
1772 &mut core_ctx,
1773 &mut bindings_ctx,
1774 FragmentSpec { id, offset, size, m_flag: true },
1775 ExpectedResult::Invalid,
1776 );
1777 }
1778
1779 #[test]
1780 fn test_ipv4_fragment_not_multiple_of_offset_unit() {
1781 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv4>();
1782 let id = 0;
1783
1784 assert_eq!(core_ctx.state.cache.size, 0);
1785 process_ipv4_fragment(
1790 &mut core_ctx,
1791 &mut bindings_ctx,
1792 FragmentSpec { id, offset: 0, size: 1, m_flag: true },
1793 get_ipv4_builder(),
1794 ExpectedResult::NeedMore,
1795 );
1796
1797 let mut builder = get_ipv4_builder();
1800 builder.id(id);
1801 builder.fragment_offset(FragmentOffset::new(1).unwrap());
1802 builder.mf_flag(true);
1803 let mut body: Vec<u8> = Vec::new();
1806 body.extend(FRAGMENT_BLOCK_SIZE..FRAGMENT_BLOCK_SIZE * 2 - 1);
1807 let mut buffer = Buf::new(body, ..).encapsulate(builder).serialize_vec_outer().unwrap();
1808 let packet = buffer.parse::<Ipv4Packet<_>>().unwrap();
1809 assert_matches!(
1810 FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1811 FragmentProcessingState::InvalidFragment
1812 );
1813
1814 let mut builder = get_ipv4_builder();
1818 builder.id(id);
1819 builder.fragment_offset(FragmentOffset::new(1).unwrap());
1820 builder.mf_flag(false);
1821 let mut body: Vec<u8> = Vec::new();
1824 body.extend(FRAGMENT_BLOCK_SIZE..FRAGMENT_BLOCK_SIZE * 2 - 1);
1825 let mut buffer = Buf::new(body, ..).encapsulate(builder).serialize_vec_outer().unwrap();
1826 let packet = buffer.parse::<Ipv4Packet<_>>().unwrap();
1827 let (key, packet_len) = assert_matches!(
1828 FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1829 FragmentProcessingState::Ready {key, packet_len} => (key, packet_len)
1830 );
1831 assert_eq!(key, test_key(id));
1832 assert_eq!(packet_len, 35);
1833 validate_size(&core_ctx.state.cache);
1834 let mut buffer: Vec<u8> = vec![0; packet_len];
1835 let mut buffer = &mut buffer[..];
1836 FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &key, &mut buffer)
1837 .unwrap();
1838 let packet = Ipv4Packet::parse_mut(&mut buffer, ()).unwrap();
1839 let mut expected_body: Vec<u8> = Vec::new();
1840 expected_body.extend(0..15);
1841 assert_eq!(packet.body(), &expected_body[..]);
1842 assert_eq!(core_ctx.state.cache.size, 0);
1843 }
1844
1845 #[test]
1846 fn test_ipv6_fragment_not_multiple_of_offset_unit() {
1847 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv6>();
1848 let id = 0;
1849
1850 assert_eq!(core_ctx.state.cache.size, 0);
1851 process_ipv6_fragment(
1856 &mut core_ctx,
1857 &mut bindings_ctx,
1858 FragmentSpec { id, offset: 0, size: 1, m_flag: true },
1859 get_ipv6_builder(),
1860 ExpectedResult::NeedMore,
1861 );
1862
1863 let offset = 1;
1866 let body_size: usize = (FRAGMENT_BLOCK_SIZE - 1).into();
1867 let builder = Ipv6PacketBuilderWithFragmentHeader::new(
1868 get_ipv6_builder(),
1869 FragmentOffset::new(offset).unwrap(),
1870 true,
1871 id.into(),
1872 );
1873 let body = generate_body_fragment(id, offset, body_size);
1874 let mut buffer = Buf::new(body, ..).encapsulate(builder).serialize_vec_outer().unwrap();
1875 let packet = buffer.parse::<Ipv6Packet<_>>().unwrap();
1876 assert_matches!(
1877 FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1878 FragmentProcessingState::InvalidFragment
1879 );
1880
1881 let builder = Ipv6PacketBuilderWithFragmentHeader::new(
1885 get_ipv6_builder(),
1886 FragmentOffset::new(offset).unwrap(),
1887 false,
1888 id.into(),
1889 );
1890 let body = generate_body_fragment(id, offset, body_size);
1891 let mut buffer = Buf::new(body, ..).encapsulate(builder).serialize_vec_outer().unwrap();
1892 let packet = buffer.parse::<Ipv6Packet<_>>().unwrap();
1893 let (key, packet_len) = assert_matches!(
1894 FragmentHandler::process_fragment::<&[u8]>(&mut core_ctx, &mut bindings_ctx, packet),
1895 FragmentProcessingState::Ready {key, packet_len} => (key, packet_len)
1896 );
1897 assert_eq!(key, test_key(id));
1898 assert_eq!(packet_len, 55);
1899
1900 validate_size(&core_ctx.state.cache);
1901 let mut buffer: Vec<u8> = vec![0; packet_len];
1902 let mut buffer = &mut buffer[..];
1903 FragmentHandler::reassemble_packet(&mut core_ctx, &mut bindings_ctx, &key, &mut buffer)
1904 .unwrap();
1905 let packet = Ipv6Packet::parse_mut(&mut buffer, ()).unwrap();
1906 let mut expected_body: Vec<u8> = Vec::new();
1907 expected_body.extend(0..15);
1908 assert_eq!(packet.body(), &expected_body[..]);
1909 assert_eq!(core_ctx.state.cache.size, 0);
1910 }
1911
1912 #[ip_test(I)]
1913 fn test_ip_reassembly_with_multiple_intertwined_packets<I: TestIpExt>() {
1914 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1915 const SIZE: u16 = 1;
1916 let id_0 = 5;
1917 let id_1 = 10;
1918
1919 I::process_ip_fragment(
1924 &mut core_ctx,
1925 &mut bindings_ctx,
1926 FragmentSpec { id: id_0, offset: 0, size: SIZE, m_flag: true },
1927 ExpectedResult::NeedMore,
1928 );
1929
1930 I::process_ip_fragment(
1932 &mut core_ctx,
1933 &mut bindings_ctx,
1934 FragmentSpec { id: id_1, offset: 0, size: SIZE, m_flag: true },
1935 ExpectedResult::NeedMore,
1936 );
1937
1938 I::process_ip_fragment(
1940 &mut core_ctx,
1941 &mut bindings_ctx,
1942 FragmentSpec { id: id_0, offset: 1, size: SIZE, m_flag: true },
1943 ExpectedResult::NeedMore,
1944 );
1945
1946 I::process_ip_fragment(
1948 &mut core_ctx,
1949 &mut bindings_ctx,
1950 FragmentSpec { id: id_1, offset: 1, size: SIZE, m_flag: true },
1951 ExpectedResult::NeedMore,
1952 );
1953
1954 I::process_ip_fragment(
1956 &mut core_ctx,
1957 &mut bindings_ctx,
1958 FragmentSpec { id: id_0, offset: 2, size: SIZE, m_flag: false },
1959 ExpectedResult::Ready { body_fragment_blocks: 3, key: test_key(id_0) },
1960 );
1961
1962 try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id_0, 3);
1963
1964 I::process_ip_fragment(
1966 &mut core_ctx,
1967 &mut bindings_ctx,
1968 FragmentSpec { id: id_1, offset: 2, size: SIZE, m_flag: false },
1969 ExpectedResult::Ready { body_fragment_blocks: 3, key: test_key(id_1) },
1970 );
1971
1972 try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id_1, 3);
1973 }
1974
1975 #[ip_test(I)]
1976 fn test_ip_reassembly_timer_with_multiple_intertwined_packets<I: TestIpExt>() {
1977 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
1978 const SIZE: u16 = 1;
1979 let id_0 = 5;
1980 let id_1 = 10;
1981 let id_2 = 15;
1982
1983 const BEFORE_TIMEOUT1: Duration = Duration::from_secs(1);
2008 const BEFORE_TIMEOUT2: Duration = Duration::from_secs(2);
2009 const BEFORE_TIMEOUT3: Duration = Duration::from_secs(3);
2010 assert!(BEFORE_TIMEOUT1 < I::REASSEMBLY_TIMEOUT);
2011 assert!(BEFORE_TIMEOUT2 < I::REASSEMBLY_TIMEOUT);
2012 assert!(BEFORE_TIMEOUT3 < I::REASSEMBLY_TIMEOUT);
2013
2014 I::process_ip_fragment(
2016 &mut core_ctx,
2017 &mut bindings_ctx,
2018 FragmentSpec { id: id_0, offset: 0, size: SIZE, m_flag: true },
2019 ExpectedResult::NeedMore,
2020 );
2021
2022 I::process_ip_fragment(
2024 &mut core_ctx,
2025 &mut bindings_ctx,
2026 FragmentSpec { id: id_1, offset: 2, size: SIZE, m_flag: false },
2027 ExpectedResult::NeedMore,
2028 );
2029
2030 I::process_ip_fragment(
2032 &mut core_ctx,
2033 &mut bindings_ctx,
2034 FragmentSpec { id: id_2, offset: 2, size: SIZE, m_flag: false },
2035 ExpectedResult::NeedMore,
2036 );
2037
2038 assert_empty(
2040 bindings_ctx
2041 .trigger_timers_until_instant(FakeInstant::from(BEFORE_TIMEOUT1), &mut core_ctx),
2042 );
2043
2044 I::process_ip_fragment(
2046 &mut core_ctx,
2047 &mut bindings_ctx,
2048 FragmentSpec { id: id_0, offset: 2, size: SIZE, m_flag: false },
2049 ExpectedResult::NeedMore,
2050 );
2051
2052 assert_empty(
2054 bindings_ctx
2055 .trigger_timers_until_instant(FakeInstant::from(BEFORE_TIMEOUT2), &mut core_ctx),
2056 );
2057
2058 I::process_ip_fragment(
2060 &mut core_ctx,
2061 &mut bindings_ctx,
2062 FragmentSpec { id: id_2, offset: 1, size: SIZE, m_flag: true },
2063 ExpectedResult::NeedMore,
2064 );
2065
2066 I::process_ip_fragment(
2068 &mut core_ctx,
2069 &mut bindings_ctx,
2070 FragmentSpec { id: id_0, offset: 1, size: SIZE, m_flag: true },
2071 ExpectedResult::Ready { body_fragment_blocks: 3, key: test_key(id_0) },
2072 );
2073
2074 try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id_0, 3);
2075
2076 assert_empty(
2078 bindings_ctx
2079 .trigger_timers_until_instant(FakeInstant::from(BEFORE_TIMEOUT3), &mut core_ctx),
2080 );
2081
2082 I::process_ip_fragment(
2084 &mut core_ctx,
2085 &mut bindings_ctx,
2086 FragmentSpec { id: id_1, offset: 0, size: SIZE, m_flag: true },
2087 ExpectedResult::NeedMore,
2088 );
2089
2090 I::process_ip_fragment(
2092 &mut core_ctx,
2093 &mut bindings_ctx,
2094 FragmentSpec { id: id_2, offset: 0, size: SIZE, m_flag: true },
2095 ExpectedResult::Ready { body_fragment_blocks: 3, key: test_key(id_2) },
2096 );
2097
2098 try_reassemble_ip_packet(&mut core_ctx, &mut bindings_ctx, id_2, 3);
2099
2100 bindings_ctx.trigger_timers_until_and_expect_unordered(
2103 FakeInstant::from(I::REASSEMBLY_TIMEOUT),
2104 [FragmentTimerId::<I>::default()],
2105 &mut core_ctx,
2106 );
2107
2108 bindings_ctx.timers.assert_no_timers_installed();
2110
2111 I::process_ip_fragment(
2115 &mut core_ctx,
2116 &mut bindings_ctx,
2117 FragmentSpec { id: id_1, offset: 2, size: SIZE, m_flag: true },
2118 ExpectedResult::NeedMore,
2119 );
2120 }
2121
2122 #[test]
2123 fn test_no_more_fragments_in_middle_of_block() {
2124 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<Ipv4>();
2125 process_ipv4_fragment(
2126 &mut core_ctx,
2127 &mut bindings_ctx,
2128 FragmentSpec { id: 0, offset: 100, size: 1, m_flag: false },
2129 get_ipv4_builder(),
2130 ExpectedResult::NeedMore,
2131 );
2132
2133 process_ipv4_fragment(
2134 &mut core_ctx,
2135 &mut bindings_ctx,
2136 FragmentSpec { id: 0, offset: 50, size: 1, m_flag: false },
2137 get_ipv4_builder(),
2138 ExpectedResult::Invalid,
2139 );
2140 }
2141
2142 #[ip_test(I)]
2143 fn test_cancel_timer_on_overlap<I: TestIpExt>() {
2144 const FRAGMENT_ID: u16 = 1;
2145
2146 let FakeCtxImpl { mut core_ctx, mut bindings_ctx } = new_context::<I>();
2147
2148 let key = test_key(FRAGMENT_ID);
2149
2150 for _ in 0..=2 {
2153 I::process_ip_fragment(
2154 &mut core_ctx,
2155 &mut bindings_ctx,
2156 FragmentSpec { id: FRAGMENT_ID, offset: 0, size: 10, m_flag: true },
2157 ExpectedResult::NeedMore,
2158 );
2159 core_ctx
2160 .state
2161 .cache
2162 .timers
2163 .assert_timers_after(&mut bindings_ctx, [(key, (), I::REASSEMBLY_TIMEOUT)]);
2164
2165 I::process_ip_fragment(
2166 &mut core_ctx,
2167 &mut bindings_ctx,
2168 FragmentSpec { id: FRAGMENT_ID, offset: 5, size: 10, m_flag: true },
2169 ExpectedResult::Invalid,
2170 );
2171 assert_eq!(bindings_ctx.timers.timers(), [],);
2172 }
2173 }
2174}