1use crate::crypto;
5use crate::dir::InlineDentry;
6use crate::reader::{Reader, NEW_ADDR, NULL_ADDR};
7use crate::superblock::BLOCK_SIZE;
8use crate::xattr::{decode_xattr, XattrEntry};
9use anyhow::{anyhow, ensure, Error};
10use bitflags::bitflags;
11use std::collections::HashMap;
12use std::fmt::Debug;
13use storage_device::buffer::Buffer;
14use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout, Ref, Unaligned};
15
16const NAME_MAX: usize = 255;
17const INODE_BLOCK_MAX_ADDR: usize = 923;
19const ADDR_BLOCK_NUM_ADDR: u32 = 1018;
21
22#[repr(C, packed)]
25#[derive(Copy, Clone, Debug, Immutable, KnownLayout, FromBytes, IntoBytes, Unaligned)]
26pub struct Extent {
27 file_offset: u32,
28 block_address: u32,
29 len: u32,
30}
31
32#[derive(Copy, Clone, Debug, Immutable, FromBytes, IntoBytes)]
33pub struct Mode(u16);
34bitflags! {
35 impl Mode: u16 {
36 const RegularFile = 0o100000;
37 const Directory = 0o040000;
38 }
39}
40
41#[derive(Copy, Clone, Debug, Immutable, FromBytes, IntoBytes)]
42pub struct AdviseFlags(u8);
43bitflags! {
44 impl AdviseFlags: u8 {
45 const Encrypted = 0x04;
46 const EncryptedName = 0x08;
47 const Verity = 0x40;
48 }
49}
50
51#[derive(Copy, Clone, Debug, Immutable, FromBytes, IntoBytes)]
52pub struct InlineFlags(u8);
53bitflags! {
54 impl InlineFlags: u8 {
55 const Xattr = 0b00000001;
56 const Data = 0b00000010;
57 const Dentry = 0b00000100;
58 const ExtraAttr = 0b00100000;
59 }
60}
61
62#[derive(Copy, Clone, Debug, Immutable, FromBytes, IntoBytes)]
63pub struct Flags(u32);
64bitflags! {
65 impl Flags: u32 {
66 const Casefold = 0x40000000;
67 }
68}
69
70#[repr(C, packed)]
71#[derive(Copy, Clone, Debug, Immutable, KnownLayout, FromBytes, IntoBytes, Unaligned)]
72pub struct InodeHeader {
73 pub mode: Mode,
74 pub advise_flags: AdviseFlags,
75 pub inline_flags: InlineFlags,
76 pub uid: u32,
77 pub gid: u32,
78 pub links: u32,
79 pub size: u64,
80 pub block_size: u64,
81 pub atime: u64,
82 pub ctime: u64,
83 pub mtime: u64,
84 pub atime_nanos: u32,
85 pub ctime_nanos: u32,
86 pub mtime_nanos: u32,
87 pub generation: u32,
88 pub dir_depth: u32,
89 pub xattr_nid: u32,
90 pub flags: Flags,
91 pub parent_inode: u32,
92 pub name_len: u32,
93 pub name: [u8; NAME_MAX],
94 pub dir_level: u8,
95
96 ext: Extent, }
98
99#[repr(C, packed)]
101#[derive(Copy, Clone, Debug, Immutable, KnownLayout, FromBytes, IntoBytes, Unaligned)]
102pub struct InodeExtraAttr {
103 pub extra_size: u16,
104 pub inline_xattr_size: u16,
105 pub project_id: u32,
106 pub inode_checksum: u32,
107 pub creation_time: u64,
108 pub creation_time_nanos: u32,
109 pub compressed_blocks: u64,
110 pub compression_algorithm: u8,
111 pub log_cluster_size: u8,
112 pub compression_flags: u16,
113}
114
115#[repr(C, packed)]
116#[derive(Copy, Clone, Debug, Immutable, KnownLayout, FromBytes, IntoBytes, Unaligned)]
117pub struct InodeFooter {
118 pub nid: u32,
119 pub ino: u32,
120 pub flag: u32,
121 pub cp_ver: u64,
122 pub next_blkaddr: u32,
123}
124
125pub struct Inode {
170 pub header: InodeHeader,
171 pub extra: Option<InodeExtraAttr>,
172 pub inline_data: Option<Box<[u8]>>,
173 pub(super) inline_dentry: Option<InlineDentry>,
174 pub(super) i_addrs: Vec<u32>,
175 nids: [u32; 5],
176 pub footer: InodeFooter,
177
178 nid_pages: HashMap<u32, Box<RawAddrBlock>>,
180 pub xattr: Vec<XattrEntry>,
181
182 pub context: Option<fscrypt::Context>,
184
185 pub block_addrs: Vec<u32>,
188}
189
190#[repr(C, packed)]
194#[derive(Copy, Clone, Debug, Immutable, KnownLayout, FromBytes, IntoBytes, Unaligned)]
195pub struct RawAddrBlock {
196 pub addrs: [u32; ADDR_BLOCK_NUM_ADDR as usize],
197 _reserved:
198 [u8; BLOCK_SIZE - std::mem::size_of::<InodeFooter>() - 4 * ADDR_BLOCK_NUM_ADDR as usize],
199 pub footer: InodeFooter,
200}
201
202impl TryFrom<Buffer<'_>> for Box<RawAddrBlock> {
203 type Error = Error;
204 fn try_from(block: Buffer<'_>) -> Result<Self, Self::Error> {
205 Ok(Box::new(
206 RawAddrBlock::read_from_bytes(block.as_slice())
207 .map_err(|_| anyhow!("RawAddrBlock read failed"))?,
208 ))
209 }
210}
211
212impl Debug for Inode {
213 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
214 let mut out = f.debug_struct("Inode");
215 out.field("header", &self.header);
216 if let Some(extra) = &self.extra {
217 out.field("extra", &extra);
218 }
219 if let Some(inline_dentry) = &self.inline_dentry {
220 out.field("inline_dentry", &inline_dentry);
221 }
222 out.field("i_addrs", &self.i_addrs).field("footer", &self.footer);
223 out.field("xattr", &self.xattr);
224 out.finish()
225 }
226}
227
228impl Inode {
229 pub(super) async fn try_load(f2fs: &impl Reader, ino: u32) -> Result<Box<Inode>, Error> {
231 let mut block_addrs = vec![];
232 let mut raw_xattr = vec![];
233 let mut this = {
234 let block = f2fs.read_node(ino).await?;
235 block_addrs.push(f2fs.get_nat_entry(ino).await?.block_addr);
236 let (header, rest): (Ref<_, InodeHeader>, _) =
244 Ref::from_prefix(block.as_slice()).unwrap();
245 let (rest, footer): (_, Ref<_, InodeFooter>) = Ref::from_suffix(rest).unwrap();
246 ensure!(footer.ino == ino, "Footer ino doesn't match.");
247
248 let mut nids = [0u32; 5];
253 nids.as_mut_bytes()
254 .copy_from_slice(&rest[INODE_BLOCK_MAX_ADDR * 4..(INODE_BLOCK_MAX_ADDR + 5) * 4]);
255 let rest = &rest[..INODE_BLOCK_MAX_ADDR * 4];
256
257 let (extra, rest) = if header.inline_flags.contains(InlineFlags::ExtraAttr) {
258 let (extra, _): (Ref<_, InodeExtraAttr>, _) = Ref::from_prefix(rest).unwrap();
259 let extra_size = extra.extra_size as usize;
260 ensure!(extra_size <= rest.len(), "Bad extra_size in inode");
261 (Some((*extra).clone()), &rest[extra_size..])
262 } else {
263 (None, rest)
264 };
265 let rest = if header.inline_flags.contains(InlineFlags::Xattr) {
266 ensure!(
268 rest.len() >= 200,
269 "Insufficient space for inline xattr. Likely bad extra_size."
270 );
271 raw_xattr.extend_from_slice(&rest[rest.len() - 200..]);
272 &rest[..rest.len() - 200]
273 } else {
274 rest
275 };
276
277 let mut inline_data = None;
278 let mut inline_dentry = None;
279 let mut i_addrs: Vec<u32> = Vec::new();
280
281 if header.inline_flags.contains(InlineFlags::Data) {
282 ensure!(header.size as usize + 4 < rest.len(), "Invalid or corrupt inode.");
284 inline_data = Some(rest[4..4 + header.size as usize].to_vec().into_boxed_slice());
285 } else if header.inline_flags.contains(InlineFlags::Dentry) {
286 inline_dentry = Some(InlineDentry::try_from_bytes(rest)?);
288 } else {
289 i_addrs.resize(rest.len() / 4, 0);
291 i_addrs.as_mut_bytes().copy_from_slice(&rest[..rest.len() / 4 * 4]);
292 };
293
294 Box::new(Self {
295 header: (*header).clone(),
296 extra,
297 inline_data: inline_data.map(|x| x.into()),
298 inline_dentry,
299 i_addrs,
300 nids,
301 footer: (*footer).clone(),
302
303 nid_pages: HashMap::new(),
304 xattr: vec![],
305 context: None,
306
307 block_addrs,
308 })
309 };
310
311 if this.header.xattr_nid != 0 {
314 raw_xattr.extend_from_slice(f2fs.read_node(this.header.xattr_nid).await?.as_slice());
315 this.block_addrs.push(f2fs.get_nat_entry(this.header.xattr_nid).await?.block_addr);
316 }
317 this.xattr = decode_xattr(&raw_xattr)?;
318
319 this.context = crypto::try_read_context_from_xattr(&this.xattr)?;
320
321 for (i, nid) in this.nids.into_iter().enumerate() {
324 if nid == NULL_ADDR {
325 continue;
326 }
327 match i {
328 0..2 => {
329 this.nid_pages.insert(nid, f2fs.read_node(nid).await?.try_into()?);
330 this.block_addrs.push(f2fs.get_nat_entry(nid).await?.block_addr);
331 }
332 2..4 => {
333 let indirect = Box::<RawAddrBlock>::try_from(f2fs.read_node(nid).await?)?;
334 this.block_addrs.push(f2fs.get_nat_entry(nid).await?.block_addr);
335 for nid in indirect.addrs {
336 if nid != NULL_ADDR {
337 this.nid_pages.insert(nid, f2fs.read_node(nid).await?.try_into()?);
338 this.block_addrs.push(f2fs.get_nat_entry(nid).await?.block_addr);
339 }
340 }
341 this.nid_pages.insert(nid, indirect);
342 }
343 4 => {
344 let double_indirect =
345 Box::<RawAddrBlock>::try_from(f2fs.read_node(nid).await?)?;
346 this.block_addrs.push(f2fs.get_nat_entry(nid).await?.block_addr);
347 for nid in double_indirect.addrs {
348 if nid != NULL_ADDR {
349 let indirect =
350 Box::<RawAddrBlock>::try_from(f2fs.read_node(nid).await?)?;
351 this.block_addrs.push(f2fs.get_nat_entry(nid).await?.block_addr);
352 for nid in indirect.addrs {
353 if nid != NULL_ADDR {
354 this.nid_pages
355 .insert(nid, f2fs.read_node(nid).await?.try_into()?);
356 this.block_addrs
357 .push(f2fs.get_nat_entry(nid).await?.block_addr);
358 }
359 }
360 this.nid_pages.insert(nid, indirect);
361 }
362 }
363 this.nid_pages.insert(nid, double_indirect);
364 }
365 _ => unreachable!(),
366 }
367 }
368
369 Ok(this)
370 }
371
372 pub fn data_blocks(&self) -> DataBlocksIter<'_> {
375 DataBlocksIter { inode: self, stage: 0, offset: 0, a: 0, b: 0, c: 0 }
376 }
377
378 pub fn data_block_addr(&self, mut block_num: u32) -> u32 {
381 let offset = block_num;
382
383 if block_num < self.i_addrs.len() as u32 {
384 return self.i_addrs[block_num as usize];
385 }
386 block_num -= self.i_addrs.len() as u32;
387
388 const NID0_END: u32 = ADDR_BLOCK_NUM_ADDR;
390 const NID1_END: u32 = NID0_END + ADDR_BLOCK_NUM_ADDR;
391 const NID2_END: u32 = NID1_END + ADDR_BLOCK_NUM_ADDR * ADDR_BLOCK_NUM_ADDR;
392 const NID3_END: u32 = NID2_END + ADDR_BLOCK_NUM_ADDR * ADDR_BLOCK_NUM_ADDR;
393
394 let mut iter = match block_num {
395 ..NID0_END => {
396 let a = block_num;
397 DataBlocksIter { inode: self, stage: 1, offset, a, b: 0, c: 0 }
398 }
399 ..NID1_END => {
400 let a = block_num - NID0_END;
401 DataBlocksIter { inode: self, stage: 2, offset, a, b: 0, c: 0 }
402 }
403 ..NID2_END => {
404 block_num -= NID1_END;
405 let a = block_num / ADDR_BLOCK_NUM_ADDR;
406 let b = block_num % ADDR_BLOCK_NUM_ADDR;
407 DataBlocksIter { inode: self, stage: 3, offset, a, b, c: 0 }
408 }
409 ..NID3_END => {
410 block_num -= NID2_END;
411 let a = block_num / ADDR_BLOCK_NUM_ADDR;
412 let b = block_num % ADDR_BLOCK_NUM_ADDR;
413 DataBlocksIter { inode: self, stage: 4, offset, a, b, c: 0 }
414 }
415 _ => {
416 block_num -= NID3_END;
417 let a = block_num / ADDR_BLOCK_NUM_ADDR / ADDR_BLOCK_NUM_ADDR;
418 let b = (block_num / ADDR_BLOCK_NUM_ADDR) % ADDR_BLOCK_NUM_ADDR;
419 let c = block_num % ADDR_BLOCK_NUM_ADDR;
420 DataBlocksIter { inode: self, stage: 5, offset, a, b, c }
421 }
422 };
423 if let Some((logical, physical)) = iter.next() {
424 if logical == offset {
425 physical
426 } else {
427 NULL_ADDR
428 }
429 } else {
430 NULL_ADDR
431 }
432 }
433}
434
435pub struct DataBlocksIter<'a> {
436 inode: &'a Inode,
437 stage: u32, offset: u32,
439 a: u32, b: u32, c: u32, }
443
444impl Iterator for DataBlocksIter<'_> {
445 type Item = (u32, u32);
446 fn next(&mut self) -> Option<Self::Item> {
447 loop {
448 match self.stage {
449 0 => {
450 while let Some(&addr) = self.inode.i_addrs.get(self.a as usize) {
452 self.a += 1;
453 self.offset += 1;
454 if addr != NULL_ADDR && addr != NEW_ADDR {
455 return Some((self.offset - 1, addr));
456 }
457 }
458 self.stage += 1;
459 self.a = 0;
460 }
461 1..3 => {
462 let nid = self.inode.nids[self.stage as usize - 1];
464
465 if nid == NULL_ADDR || nid == NEW_ADDR {
466 self.stage += 1;
467 self.offset += ADDR_BLOCK_NUM_ADDR;
468 } else {
469 let addrs = self.inode.nid_pages.get(&nid).unwrap().addrs;
470 while let Some(&addr) = addrs.get(self.a as usize) {
471 self.a += 1;
472 self.offset += 1;
473 if addr != NULL_ADDR && addr != NEW_ADDR {
474 return Some((self.offset - 1, addr));
475 }
476 }
477 self.stage += 1;
478 self.a = 0;
479 }
480 }
481
482 3..5 => {
483 let nid = self.inode.nids[self.stage as usize - 1];
484 if nid == NULL_ADDR || nid == NEW_ADDR {
486 self.stage += 1;
487 self.offset += ADDR_BLOCK_NUM_ADDR * ADDR_BLOCK_NUM_ADDR;
488 } else {
489 let addrs = self.inode.nid_pages.get(&nid).unwrap().addrs;
490 while let Some(&nid) = addrs.get(self.a as usize) {
491 if nid == NULL_ADDR || nid == NEW_ADDR {
492 self.a += 1;
493 self.offset += ADDR_BLOCK_NUM_ADDR;
494 } else {
495 let addrs = self.inode.nid_pages.get(&nid).unwrap().addrs;
496 while let Some(&addr) = addrs.get(self.b as usize) {
497 self.b += 1;
498 self.offset += 1;
499 if addr != NULL_ADDR && addr != NEW_ADDR {
500 return Some((self.offset - 1, addr));
501 }
502 }
503 self.a += 1;
504 self.b = 0;
505 }
506 }
507 self.stage += 1;
508 self.a = 0;
509 }
510 }
511
512 5 => {
513 let nid = self.inode.nids[4];
514 if nid != NULL_ADDR && nid != NEW_ADDR {
516 let addrs = self.inode.nid_pages.get(&nid).unwrap().addrs;
517 while let Some(&nid) = addrs.get(self.a as usize) {
518 if nid == NULL_ADDR || nid == NEW_ADDR {
519 self.a += 1;
520 self.offset += ADDR_BLOCK_NUM_ADDR * ADDR_BLOCK_NUM_ADDR;
521 } else {
522 let addrs = self.inode.nid_pages.get(&nid).unwrap().addrs;
523 while let Some(&nid) = addrs.get(self.b as usize) {
524 if nid == NULL_ADDR || nid == NEW_ADDR {
525 self.b += 1;
526 self.offset += ADDR_BLOCK_NUM_ADDR;
527 } else {
528 let addrs = self.inode.nid_pages.get(&nid).unwrap().addrs;
529 while let Some(&addr) = addrs.get(self.c as usize) {
530 self.c += 1;
531 self.offset += 1;
532 if addr != NULL_ADDR && addr != NEW_ADDR {
533 return Some((self.offset - 1, addr));
534 }
535 }
536 self.b += 1;
537 self.c = 0;
538 }
539 }
540
541 self.a += 1;
542 self.b = 0;
543 }
544 }
545 }
546 self.stage += 1;
547 }
548 _ => {
549 break;
550 }
551 }
552 }
553 None
554 }
555}
556
557#[cfg(test)]
558mod test {
559 use super::*;
560 use crate::nat::RawNatEntry;
561 use crate::reader;
562 use anyhow;
563 use async_trait::async_trait;
564 use storage_device::buffer_allocator::{BufferAllocator, BufferSource};
565 use zerocopy::FromZeros;
566
567 struct FakeReader {
569 data: HashMap<u32, Box<[u8; 4096]>>,
570 nids: HashMap<u32, Box<[u8; 4096]>>,
571 allocator: BufferAllocator,
572 }
573
574 #[async_trait]
575 impl reader::Reader for FakeReader {
576 async fn read_raw_block(&self, block_addr: u32) -> Result<Buffer<'_>, Error> {
577 match self.data.get(&block_addr) {
578 None => Err(anyhow!("unexpected block {block_addr}")),
579 Some(value) => {
580 let mut block = self.allocator.allocate_buffer(BLOCK_SIZE).await;
581 block.as_mut_slice().copy_from_slice(value.as_ref());
582 Ok(block)
583 }
584 }
585 }
586
587 async fn read_node(&self, nid: u32) -> Result<Buffer<'_>, Error> {
588 match self.nids.get(&nid) {
589 None => Err(anyhow!("unexpected nid {nid}")),
590 Some(value) => {
591 let mut block = self.allocator.allocate_buffer(BLOCK_SIZE).await;
592 block.as_mut_slice().copy_from_slice(value.as_ref());
593 Ok(block)
594 }
595 }
596 }
597
598 fn fs_uuid(&self) -> &[u8; 16] {
599 &[0; 16]
600 }
601
602 async fn get_nat_entry(&self, nid: u32) -> Result<RawNatEntry, Error> {
603 Ok(RawNatEntry { ino: nid, block_addr: 0, ..Default::default() })
604 }
605 }
606
607 fn build_inode(ino: u32) -> Box<[u8; BLOCK_SIZE]> {
609 let mut header = InodeHeader::new_zeroed();
610 let mut footer = InodeFooter::new_zeroed();
611 let mut extra = InodeExtraAttr::new_zeroed();
612
613 extra.extra_size = std::mem::size_of::<InodeExtraAttr>().try_into().unwrap();
614
615 header.mode = Mode::RegularFile;
616 header.inline_flags.set(InlineFlags::ExtraAttr, true);
617 header.inline_flags.set(InlineFlags::Xattr, true);
618 footer.ino = ino;
619
620 let mut out = [0u8; BLOCK_SIZE];
621 out[..std::mem::size_of::<InodeHeader>()].copy_from_slice(&header.as_bytes());
622 out[std::mem::size_of::<InodeHeader>()
623 ..std::mem::size_of::<InodeHeader>() + std::mem::size_of::<InodeExtraAttr>()]
624 .copy_from_slice(&extra.as_bytes());
625 out[BLOCK_SIZE - std::mem::size_of::<InodeFooter>()..].copy_from_slice(&footer.as_bytes());
626 Box::new(out)
627 }
628
629 #[fuchsia::test]
630 async fn test_xattr_bounds() {
631 let mut reader = FakeReader {
632 data: [].into(),
633 nids: [(1, build_inode(1)), (2, [0u8; 4096].into()), (3, [0u8; 4096].into())].into(),
634 allocator: BufferAllocator::new(BLOCK_SIZE, BufferSource::new(BLOCK_SIZE * 10)),
635 };
636 assert!(Inode::try_load(&reader, 1).await.is_ok());
637
638 let header_len = std::mem::size_of::<InodeHeader>();
639 let footer_len = std::mem::size_of::<InodeFooter>();
640 let nids_len = std::mem::size_of::<u32>() * 5;
641 let overheads = header_len + footer_len + nids_len;
642
643 let mut extra = InodeExtraAttr::new_zeroed();
645 extra.extra_size = (BLOCK_SIZE - overheads - 200) as u16;
646 reader.nids.get_mut(&1).unwrap()[std::mem::size_of::<InodeHeader>()
647 ..std::mem::size_of::<InodeHeader>() + std::mem::size_of::<InodeExtraAttr>()]
648 .copy_from_slice(&extra.as_bytes());
649 assert!(Inode::try_load(&reader, 1).await.is_ok());
650
651 let mut extra = InodeExtraAttr::new_zeroed();
653 extra.extra_size = (BLOCK_SIZE - overheads - 199) as u16;
654 reader.nids.get_mut(&1).unwrap()[std::mem::size_of::<InodeHeader>()
655 ..std::mem::size_of::<InodeHeader>() + std::mem::size_of::<InodeExtraAttr>()]
656 .copy_from_slice(&extra.as_bytes());
657 assert!(Inode::try_load(&reader, 1).await.is_err());
658 }
659}
660
661#[cfg(test)]
662mod tests {
663 use zerocopy::FromZeros;
664
665 use super::*;
666
667 fn last_addr_block(addr: u32) -> Box<RawAddrBlock> {
668 let mut addr_block = RawAddrBlock::new_zeroed();
669 addr_block.addrs[ADDR_BLOCK_NUM_ADDR as usize - 1] = addr;
670 Box::new(addr_block)
671 }
672
673 #[test]
674 fn test_data_iter() {
675 let header = InodeHeader::new_zeroed();
682 let footer = InodeFooter::new_zeroed();
683 let mut nids = [0u32; 5];
684 let mut nid_pages = HashMap::new();
685 nid_pages.insert(101, last_addr_block(1001));
686 nid_pages.insert(102, last_addr_block(1002));
687
688 let mut i_addrs: Vec<u32> = Vec::new();
689 i_addrs.resize(INODE_BLOCK_MAX_ADDR, 0);
690 i_addrs[INODE_BLOCK_MAX_ADDR - 1] = 1000;
691
692 nids[0] = 101;
693 nid_pages.insert(101, last_addr_block(1001));
694
695 nids[1] = 102;
696 nid_pages.insert(102, last_addr_block(1002));
697
698 nids[2] = 103;
699 nid_pages.insert(103, last_addr_block(104));
700 nid_pages.insert(104, last_addr_block(1003));
701
702 nids[3] = 105;
703 nid_pages.insert(105, last_addr_block(106));
704 nid_pages.insert(106, last_addr_block(1004));
705
706 nids[4] = 107;
707 nid_pages.insert(107, last_addr_block(108));
708 nid_pages.insert(108, last_addr_block(109));
709 nid_pages.insert(109, last_addr_block(1005));
710
711 let inode = Box::new(Inode {
712 header,
713 extra: None,
714 inline_data: None,
715 inline_dentry: None,
716 i_addrs,
717 nids,
718 footer: footer,
719
720 nid_pages,
721 xattr: vec![],
722 context: None,
723
724 block_addrs: vec![],
725 });
726
727 assert_eq!(inode.data_block_addr(0), 0);
729
730 let mut iter = inode.data_blocks();
731 let mut block_num = 922;
732 assert_eq!(iter.next(), Some((block_num, 1000))); assert_eq!(inode.data_block_addr(block_num), 1000);
734 block_num += ADDR_BLOCK_NUM_ADDR;
735 assert_eq!(iter.next(), Some((block_num, 1001))); assert_eq!(inode.data_block_addr(block_num), 1001);
737 block_num += ADDR_BLOCK_NUM_ADDR;
738 assert_eq!(iter.next(), Some((block_num, 1002))); assert_eq!(inode.data_block_addr(block_num), 1002);
740 block_num += ADDR_BLOCK_NUM_ADDR * ADDR_BLOCK_NUM_ADDR;
741 assert_eq!(iter.next(), Some((block_num, 1003))); assert_eq!(inode.data_block_addr(block_num), 1003);
743 block_num += ADDR_BLOCK_NUM_ADDR * ADDR_BLOCK_NUM_ADDR;
744 assert_eq!(iter.next(), Some((block_num, 1004))); assert_eq!(inode.data_block_addr(block_num), 1004);
746 block_num += ADDR_BLOCK_NUM_ADDR * ADDR_BLOCK_NUM_ADDR * ADDR_BLOCK_NUM_ADDR;
747 assert_eq!(iter.next(), Some((block_num, 1005))); assert_eq!(inode.data_block_addr(block_num), 1005);
749 assert_eq!(iter.next(), None);
750 assert_eq!(inode.data_block_addr(block_num - 1), 0);
751 assert_eq!(inode.data_block_addr(block_num + 1), 0);
752 }
753}