fsverity_merkle/
util.rs

1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::{SHA256_SALT_PADDING, SHA512_SALT_PADDING};
6use anyhow::{Error, anyhow, ensure};
7use fidl_fuchsia_io as fio;
8use mundane::hash::{Digest, Hasher, Sha256, Sha512};
9use std::fmt;
10use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout};
11
12/// `FsVerityHasherOptions` contains relevant metadata for the FsVerityHasher. The `salt` is set
13/// according to the FsverityMetadata struct stored in fxfs and `block_size` is that of the
14/// filesystem.
15#[derive(Clone)]
16pub struct FsVerityHasherOptions {
17    salt: Vec<u8>,
18    block_size: usize,
19    fsverity: bool,
20}
21
22impl FsVerityHasherOptions {
23    pub fn new(salt: Vec<u8>, block_size: usize) -> Self {
24        FsVerityHasherOptions { salt, block_size, fsverity: true }
25    }
26
27    pub fn new_dmverity(salt: Vec<u8>, block_size: usize) -> Self {
28        FsVerityHasherOptions { salt, block_size, fsverity: false }
29    }
30}
31
32#[derive(Debug, KnownLayout, FromBytes, Immutable, IntoBytes)]
33#[repr(C, packed)]
34pub struct FsVerityDescriptorRaw {
35    version: u8,
36    algorithm: u8,
37    block_size_log2: u8,
38    salt_size: u8,
39    _reserved_1: [u8; 4],
40    file_size: [u8; 8],
41    root_digest: [u8; 64],
42    salt: [u8; 32],
43    _reserved_2: [u8; 144],
44}
45
46impl FsVerityDescriptorRaw {
47    pub fn new(
48        algorithm: fio::HashAlgorithm,
49        block_size: u64,
50        file_size: u64,
51        root: &[u8],
52        salt: &[u8],
53    ) -> Result<Self, Error> {
54        ensure!(block_size.is_power_of_two() && block_size >= 1024, "Invalid merkle block size");
55        ensure!(salt.len() <= 32, "Salt too long");
56        let (hash_len, algorithm) = match algorithm {
57            fio::HashAlgorithm::Sha256 => (<Sha256 as Hasher>::Digest::DIGEST_LEN, 1),
58            fio::HashAlgorithm::Sha512 => (<Sha512 as Hasher>::Digest::DIGEST_LEN, 2),
59            _ => return Err(anyhow!("Unknown hash type")),
60        };
61        ensure!(root.len() == hash_len, "Wrong length of root digest");
62
63        let mut this = Self {
64            version: 1,
65            algorithm,
66            block_size_log2: block_size.trailing_zeros() as u8,
67            salt_size: salt.len() as u8,
68            _reserved_1: [0u8; 4],
69            file_size: file_size.to_le_bytes(),
70            root_digest: [0u8; 64],
71            salt: [0u8; 32],
72            _reserved_2: [0u8; 144],
73        };
74        this.root_digest.as_mut_slice()[0..hash_len].copy_from_slice(root);
75        this.salt.as_mut_slice()[0..salt.len()].copy_from_slice(salt);
76        Ok(this)
77    }
78
79    pub fn write_to_slice(&self, dest: &mut [u8]) -> Result<(), Error> {
80        self.write_to_prefix(dest).map_err(|_| anyhow!("Buffer too short"))
81    }
82}
83
84/// A descriptor struct for fsverity. It does not own the bytes backing it.
85#[derive(Debug)]
86pub struct FsVerityDescriptor<'a> {
87    inner: &'a FsVerityDescriptorRaw,
88    bytes: &'a [u8],
89}
90
91impl<'a> FsVerityDescriptor<'a> {
92    /// Create a descriptor from the raw bytes of the entire block-aligned fsverity data.
93    pub fn from_bytes(bytes: &'a [u8], block_size: usize) -> Result<Self, Error> {
94        ensure!(block_size.is_power_of_two() && block_size > 0, "Invalid block size.");
95        // Descriptor is placed in the last block. Go to the start of the last block.
96        let descriptor_offset = if bytes.len() == 0 {
97            // This will fail properly below.
98            0
99        } else {
100            ((bytes.len() - 1) / block_size) * block_size
101        };
102        let inner = FsVerityDescriptorRaw::ref_from_prefix(&bytes[descriptor_offset..])
103            .map_err(|_| anyhow!("Descriptor bytes too small"))?
104            .0;
105
106        ensure!(inner.version == 1, "Unsupported version {}", inner.version);
107
108        ensure!(
109            inner.algorithm == 1 || inner.algorithm == 2,
110            "Unsupported algorithm {}",
111            inner.algorithm
112        );
113
114        // Merkle block size here doesn't necessarily need to match fs block size, but it is the
115        // most efficient choice, greatly simplifies handling, and is the only supported choice in
116        // the destination fxfs. It it stored in the descriptor as the log_2 of the value. It must
117        // be at least 1024 and also no more than system page size. We won't verify page size here
118        // but also won't support more than 64KiB.
119        ensure!(
120            inner.block_size_log2 >= 10 && inner.block_size_log2 <= 16,
121            "Only supports 1KiB-64KiB"
122        );
123
124        ensure!(inner.salt_size <= 32, "Salt too big for struct");
125        let this = Self { inner, bytes };
126        ensure!(this.block_size() == block_size, "Only support same block size as file system");
127        Ok(this)
128    }
129
130    pub fn digest_len(&self) -> usize {
131        match self.inner.algorithm {
132            1 => <Sha256 as Hasher>::Digest::DIGEST_LEN,
133            2 => <Sha512 as Hasher>::Digest::DIGEST_LEN,
134            _ => unreachable!("This should be verified at creation time."),
135        }
136    }
137
138    pub fn digest_algorithm(&self) -> fio::HashAlgorithm {
139        match self.inner.algorithm {
140            1 => fio::HashAlgorithm::Sha256,
141            2 => fio::HashAlgorithm::Sha512,
142            _ => unreachable!("This should be verified at creation time."),
143        }
144    }
145
146    pub fn block_size(&self) -> usize {
147        1usize << self.inner.block_size_log2
148    }
149
150    pub fn file_size(&self) -> usize {
151        u64::from_le_bytes(self.inner.file_size) as usize
152    }
153
154    pub fn root_digest(&self) -> &'a [u8] {
155        &self.inner.root_digest[..self.digest_len()]
156    }
157
158    pub fn salt(&self) -> &'a [u8] {
159        &self.inner.salt[..self.inner.salt_size as usize]
160    }
161
162    /// Return a hasher configured based on this descriptor.
163    pub fn hasher(&self) -> FsVerityHasher {
164        match self.inner.algorithm {
165            1 => FsVerityHasher::Sha256(FsVerityHasherOptions::new(
166                self.salt().to_vec(),
167                self.block_size(),
168            )),
169            2 => FsVerityHasher::Sha512(FsVerityHasherOptions::new(
170                self.salt().to_vec(),
171                self.block_size(),
172            )),
173            _ => unreachable!("This should be verified at creation time."),
174        }
175    }
176
177    /// A slice of all the leaf digests required for the file.
178    pub fn leaf_digests(&self) -> Result<&'a [u8], Error> {
179        let block_size = self.block_size();
180        Ok(match self.file_size().div_ceil(block_size) {
181            0 => [0u8; 0].as_slice(),
182            1 => self.root_digest(),
183            file_blocks => {
184                let leaf_size = file_blocks * self.digest_len();
185                let layer_size = leaf_size.next_multiple_of(block_size);
186                let descriptor_offset = ((self.bytes.len() - 1) / block_size) * block_size;
187                ensure!(descriptor_offset >= layer_size, "No space for leaves in descriptor");
188                let leaf_offset = descriptor_offset - layer_size;
189                &self.bytes[leaf_offset..(leaf_offset + leaf_size)]
190            }
191        })
192    }
193}
194
195/// `FsVerityHasher` is used by fsverity to construct merkle trees for verity-enabled files.
196/// `FsVerityHasher` is parameterized by a salt and a block size.
197#[derive(Clone)]
198pub enum FsVerityHasher {
199    Sha256(FsVerityHasherOptions),
200    Sha512(FsVerityHasherOptions),
201}
202
203impl fmt::Debug for FsVerityHasher {
204    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
205        match self {
206            FsVerityHasher::Sha256(metadata) => f
207                .debug_struct("FsVerityHasher::Sha256")
208                .field("salt", &metadata.salt)
209                .field("block_size", &metadata.block_size)
210                .finish(),
211            FsVerityHasher::Sha512(metadata) => f
212                .debug_struct("FsVerityHasher::Sha512")
213                .field("salt", &metadata.salt)
214                .field("block_size", &metadata.block_size)
215                .finish(),
216        }
217    }
218}
219
220impl FsVerityHasher {
221    pub fn block_size(&self) -> usize {
222        match self {
223            FsVerityHasher::Sha256(metadata) => metadata.block_size,
224            FsVerityHasher::Sha512(metadata) => metadata.block_size,
225        }
226    }
227
228    pub fn hash_size(&self) -> usize {
229        match self {
230            FsVerityHasher::Sha256(_) => <Sha256 as Hasher>::Digest::DIGEST_LEN,
231            FsVerityHasher::Sha512(_) => <Sha512 as Hasher>::Digest::DIGEST_LEN,
232        }
233    }
234
235    pub fn fsverity(&self) -> bool {
236        match &self {
237            FsVerityHasher::Sha256(metadata) => metadata.fsverity,
238            FsVerityHasher::Sha512(metadata) => metadata.fsverity,
239        }
240    }
241
242    /// Computes the MerkleTree digest from a `block` of data.
243    ///
244    /// A MerkleTree digest is a hash of a block of data. The block will be zero filled if its
245    /// len is less than the block_size, except for when the first data block is completely empty.
246    /// If `salt.len() > 0`, we prepend the block with the salt which itself is zero filled up
247    /// to the padding.
248    ///
249    /// # Panics
250    ///
251    /// Panics if `block.len()` exceeds `self.block_size()`.
252    pub fn hash_block(&self, block: &[u8]) -> Vec<u8> {
253        match self {
254            FsVerityHasher::Sha256(metadata) => {
255                if block.is_empty() {
256                    // Empty files have a root hash of all zeroes.
257                    return vec![0; <Sha256 as Hasher>::Digest::DIGEST_LEN];
258                }
259                assert!(block.len() <= metadata.block_size);
260                let mut hasher = Sha256::default();
261                let salt_size = metadata.salt.len() as u8;
262
263                if salt_size > 0 {
264                    hasher.update(&metadata.salt);
265                    if metadata.fsverity && salt_size % SHA256_SALT_PADDING != 0 {
266                        hasher.update(&vec![
267                            0;
268                            (SHA256_SALT_PADDING - salt_size % SHA256_SALT_PADDING)
269                                as usize
270                        ])
271                    }
272                }
273
274                hasher.update(block);
275                // Zero fill block up to self.block_size(). As a special case, if the first data
276                // block is completely empty, it is not zero filled.
277                if block.len() != metadata.block_size {
278                    hasher.update(&vec![0; metadata.block_size - block.len()]);
279                }
280                hasher.finish().bytes().to_vec()
281            }
282            FsVerityHasher::Sha512(metadata) => {
283                if block.is_empty() {
284                    // Empty files have a root hash of all zeroes.
285                    return vec![0; <Sha512 as Hasher>::Digest::DIGEST_LEN];
286                }
287                assert!(block.len() <= metadata.block_size);
288                let mut hasher = Sha512::default();
289                let salt_size = metadata.salt.len() as u8;
290
291                if salt_size > 0 {
292                    hasher.update(&metadata.salt);
293                    if metadata.fsverity && salt_size % SHA512_SALT_PADDING != 0 {
294                        hasher.update(&vec![
295                            0;
296                            (SHA512_SALT_PADDING - salt_size % SHA512_SALT_PADDING)
297                                as usize
298                        ])
299                    }
300                }
301
302                hasher.update(block);
303                // Zero fill block up to self.block_size(). As a special case, if the first data
304                // block is completely empty, it is not zero filled.
305                if block.len() != metadata.block_size {
306                    hasher.update(&vec![0; metadata.block_size - block.len()]);
307                }
308                hasher.finish().bytes().to_vec()
309            }
310        }
311    }
312
313    /// Computes a MerkleTree digest from a block of `hashes`.
314    ///
315    /// Like `hash_block`, `hash_hashes` zero fills incomplete buffers and prepends the digests
316    /// with a salt, which is zero filled up to the padding.
317    ///
318    /// # Panics
319    ///
320    /// Panics if any of the following conditions are met:
321    /// - `hashes.len()` is 0
322    /// - `hashes.len() > self.block_size() / digest length`
323    pub fn hash_hashes(&self, hashes: &[Vec<u8>]) -> Vec<u8> {
324        assert_ne!(hashes.len(), 0);
325        match self {
326            FsVerityHasher::Sha256(metadata) => {
327                assert!(
328                    hashes.len() <= (metadata.block_size / <Sha256 as Hasher>::Digest::DIGEST_LEN)
329                );
330                let mut hasher = Sha256::default();
331                let salt_size = metadata.salt.len() as u8;
332                if salt_size > 0 {
333                    hasher.update(&metadata.salt);
334                    if metadata.fsverity && salt_size % SHA256_SALT_PADDING != 0 {
335                        hasher.update(&vec![
336                            0;
337                            (SHA256_SALT_PADDING - salt_size % SHA256_SALT_PADDING)
338                                as usize
339                        ])
340                    }
341                }
342
343                for hash in hashes {
344                    hasher.update(hash.as_slice());
345                }
346                for _ in 0..((metadata.block_size / <Sha256 as Hasher>::Digest::DIGEST_LEN)
347                    - hashes.len())
348                {
349                    hasher.update(&[0; <Sha256 as Hasher>::Digest::DIGEST_LEN]);
350                }
351
352                hasher.finish().bytes().to_vec()
353            }
354            FsVerityHasher::Sha512(metadata) => {
355                assert!(
356                    hashes.len() <= (metadata.block_size / <Sha512 as Hasher>::Digest::DIGEST_LEN)
357                );
358
359                let mut hasher = Sha512::default();
360                let salt_size = metadata.salt.len() as u8;
361                if salt_size > 0 {
362                    hasher.update(&metadata.salt);
363                    if metadata.fsverity && salt_size % SHA512_SALT_PADDING != 0 {
364                        hasher.update(&vec![
365                            0;
366                            (SHA512_SALT_PADDING - salt_size % SHA512_SALT_PADDING)
367                                as usize
368                        ])
369                    }
370                }
371
372                for hash in hashes {
373                    hasher.update(hash.as_slice());
374                }
375                for _ in 0..((metadata.block_size / <Sha512 as Hasher>::Digest::DIGEST_LEN)
376                    - hashes.len())
377                {
378                    hasher.update(&[0; <Sha512 as Hasher>::Digest::DIGEST_LEN]);
379                }
380
381                hasher.finish().bytes().to_vec()
382            }
383        }
384    }
385}
386
387#[cfg(test)]
388mod tests {
389    use super::*;
390    use crate::MerkleTreeBuilder;
391    use fidl_fuchsia_io as fio;
392    use hex::FromHex;
393    use test_case::test_case;
394
395    const BLOCK_SIZE: usize = 4096;
396
397    #[test]
398    fn test_hash_block_empty_sha256() {
399        let hasher = FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xFF; 8], 4096));
400        let block = [];
401        let hash = hasher.hash_block(&block[..]);
402        assert_eq!(hash, [0; 32]);
403    }
404
405    #[test]
406    fn test_hash_block_empty_sha512() {
407        let hasher = FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xFF; 8], 4096));
408        let block = [];
409        let hash = hasher.hash_block(&block[..]);
410        assert_eq!(hash, [0; 64]);
411    }
412
413    #[test]
414    fn test_hash_block_partial_block_sha256() {
415        let hasher = FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xFF; 8], 4096));
416        let block = vec![0xFF; hasher.block_size()];
417        let mut block2: Vec<u8> = vec![0xFF; hasher.block_size() / 2];
418        block2.append(&mut vec![0; hasher.block_size() / 2]);
419        let hash = hasher.hash_block(&block[..]);
420        let expected = hasher.hash_block(&block[..]);
421        assert_eq!(hash, expected);
422    }
423
424    #[test]
425    fn test_hash_block_partial_block_sha512() {
426        let hasher = FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xFF; 8], 4096));
427        let block = vec![0xFF; hasher.block_size()];
428        let mut block2: Vec<u8> = vec![0xFF; hasher.block_size() / 2];
429        block2.append(&mut vec![0; hasher.block_size() / 2]);
430        let hash = hasher.hash_block(&block[..]);
431        let expected = hasher.hash_block(&block[..]);
432        assert_eq!(hash, expected);
433    }
434
435    #[test]
436    fn test_hash_block_single_sha256() {
437        let hasher = FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xFF; 8], 4096));
438        let block = vec![0xFF; hasher.block_size()];
439        let hash = hasher.hash_block(&block[..]);
440        // Root hash of file size 4096 = block_size
441        let expected: [u8; 32] =
442            FromHex::from_hex("207f18729b037894447f948b81f63abe68007d0cd7c99a4ae0a3e323c52013a5")
443                .unwrap();
444        assert_eq!(hash, expected);
445    }
446
447    #[test]
448    fn test_hash_block_single_sha512() {
449        let hasher = FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xFF; 8], 4096));
450        let block = vec![0xFF; hasher.block_size()];
451        let hash = hasher.hash_block(&block[..]);
452        // Root hash of file size 4096 = block_size
453        let expected: [u8; 64] = FromHex::from_hex("96d217a5f593384eb266b4bb2574b93c145ff1fd5ca89af52af6d4a14d2ce5200b2ddad30771c7cbcd139688e1a3847da7fd681490690adc945c3776154c42f6").unwrap();
454        assert_eq!(hash, expected);
455    }
456
457    #[test]
458    fn test_hash_hashes_full_block_sha256() {
459        let hasher = FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xFF; 8], 4096));
460        let mut leafs = Vec::new();
461        {
462            let block = vec![0xFF; hasher.block_size()];
463            for _i in 0..hasher.block_size() / hasher.hash_size() {
464                leafs.push(hasher.hash_block(&block));
465            }
466        }
467        let root = hasher.hash_hashes(&leafs);
468        // Root hash of file size 524288 = block_size * (block_size / hash_size) = 4096 * (4096 / 32)
469        let expected: [u8; 32] =
470            FromHex::from_hex("827c28168aba953cf74706d4f3e776bd8892f6edf7b25d89645409f24108fb0b")
471                .unwrap();
472        assert_eq!(root, expected);
473    }
474
475    #[test]
476    fn test_hash_hashes_full_block_sha512() {
477        let hasher = FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xFF; 8], 4096));
478        let mut leafs = Vec::new();
479        {
480            let block = vec![0xFF; hasher.block_size()];
481            for _i in 0..hasher.block_size() / hasher.hash_size() {
482                leafs.push(hasher.hash_block(&block));
483            }
484        }
485        let root = hasher.hash_hashes(&leafs);
486        // Root hash of file size 262144 = block_size * (block_size / hash_size) = 4096 * (4096 / 64)
487        let expected: [u8; 64] = FromHex::from_hex("17d1728518330e0d48951ba43908ea7ad73ea018597643aabba9af2e43dea70468ba54fa09f9c7d02b1c240bd8009d1abd49c05559815a3b73ce31c5c26f93ba").unwrap();
488        assert_eq!(root, expected);
489    }
490
491    #[test_case(FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xFF; 8], 4096)); "sha256")]
492    #[test_case(FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xFF; 8], 4096)); "sha512")]
493    fn test_hash_hashes_zero_pad_same_length(hasher: FsVerityHasher) {
494        let data_hash = hasher.hash_block(&vec![0xFF; hasher.block_size()]);
495        let mut zero_hash = Vec::with_capacity(hasher.hash_size());
496        zero_hash.extend(std::iter::repeat(0).take(hasher.hash_size()));
497        let hash_of_single_hash = hasher.hash_hashes(&[data_hash.clone()]);
498        let hash_of_single_hash_and_zero_hash = hasher.hash_hashes(&[data_hash, zero_hash]);
499        assert_eq!(hash_of_single_hash, hash_of_single_hash_and_zero_hash);
500    }
501
502    #[test_case(vec![0u8; BLOCK_SIZE + 256], BLOCK_SIZE ; "test_exact_size")]
503    #[test_case(vec![0u8; 256], 0 ; "test_exact_size_from_zero")]
504    #[test_case(vec![0u8; BLOCK_SIZE * 2], BLOCK_SIZE ; "test_block_aligned")]
505    #[test_case(vec![0u8; BLOCK_SIZE], 0 ; "test_block_aligned_from_zero")]
506    #[test_case(vec![0u8; BLOCK_SIZE + 300], BLOCK_SIZE ; "test_trailing_space")]
507    #[test_case(vec![0u8; 300], 0 ; "test_trailing_space_from_zero")]
508    fn descriptor_read_write_locations(mut buf: Vec<u8>, descriptor_offset: usize) {
509        let salt = [4u8; 6];
510        let root = [65u8; 32];
511        let descriptor = FsVerityDescriptorRaw::new(
512            fio::HashAlgorithm::Sha256,
513            BLOCK_SIZE as u64,
514            8192,
515            root.as_slice(),
516            salt.as_slice(),
517        )
518        .expect("Create raw descriptor");
519
520        descriptor
521            .write_to_slice(&mut buf.as_mut_slice()[descriptor_offset..])
522            .expect("Writing to buf.");
523
524        let descriptor2 = FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE)
525            .expect("Parsing descriptor back");
526        // Verify the raw values.
527        assert_eq!(descriptor2.inner.version, descriptor.version);
528        assert_eq!(descriptor2.inner.algorithm, descriptor.algorithm);
529        assert_eq!(descriptor2.inner.block_size_log2, descriptor.block_size_log2);
530        assert_eq!(descriptor2.inner.salt_size, descriptor.salt_size);
531        assert_eq!(descriptor2.inner.file_size, descriptor.file_size);
532        assert_eq!(descriptor2.inner.root_digest, descriptor.root_digest);
533        assert_eq!(descriptor2.inner.salt, descriptor.salt);
534
535        // Verify the processed values.
536        assert_eq!(descriptor2.file_size(), 8192);
537        assert_eq!(descriptor2.digest_len(), 32);
538        assert_eq!(descriptor2.digest_algorithm(), fio::HashAlgorithm::Sha256);
539        assert_eq!(descriptor2.root_digest(), root.as_slice());
540        assert_eq!(descriptor2.salt(), salt.as_slice());
541    }
542
543    #[test_case(2, vec![0u8; BLOCK_SIZE * 2], BLOCK_SIZE, 0, FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha256")]
544    #[test_case(2, vec![0u8; BLOCK_SIZE * 2], BLOCK_SIZE, 0, FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha512")]
545    // Enough blocks to have a second layer of merkle tree.
546    #[test_case(129, vec![0u8; BLOCK_SIZE * 3], BLOCK_SIZE * 2, 0, FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha256_big_file")]
547    #[test_case(129, vec![0u8; BLOCK_SIZE * 4], BLOCK_SIZE * 3, 0, FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha512_big_file")]
548    // Don't block align the end, just enough space for the descriptor.
549    #[test_case(2, vec![0u8; BLOCK_SIZE + 256], BLOCK_SIZE, 0, FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha256_exact_fit")]
550    #[test_case(2, vec![0u8; BLOCK_SIZE + 256], BLOCK_SIZE, 0, FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha512_exact_fit")]
551    // A really big merkle buffer, everything should still be at the end of it.
552    #[test_case(2, vec![0u8; BLOCK_SIZE * 100], BLOCK_SIZE * 99, BLOCK_SIZE * 98, FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha256_big_buf")]
553    #[test_case(2, vec![0u8; BLOCK_SIZE * 100], BLOCK_SIZE * 99, BLOCK_SIZE * 98, FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha512_big_buf")]
554    // File has only a single block. This is a special case for generating the leaf data.
555    #[test_case(1, vec![0u8; BLOCK_SIZE * 2], BLOCK_SIZE, 0, FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha256_one_block")]
556    #[test_case(1, vec![0u8; BLOCK_SIZE * 2], BLOCK_SIZE, 0, FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha512_one_block")]
557    // File has no data blocks. This is a special case for generating the leaf data.
558    #[test_case(0, vec![0u8; BLOCK_SIZE * 2], BLOCK_SIZE, 0, FsVerityHasher::Sha256(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha256_empty_file")]
559    #[test_case(0, vec![0u8; BLOCK_SIZE * 2], BLOCK_SIZE, 0, FsVerityHasher::Sha512(FsVerityHasherOptions::new(vec![0xAB; 8], BLOCK_SIZE)); "sha512_empty_file")]
560    fn descriptor_merkle_leaves_locations(
561        file_blocks: usize,
562        mut buf: Vec<u8>,
563        descriptor_offset: usize,
564        leaf_offset: usize,
565        hasher: FsVerityHasher,
566    ) {
567        let mut file = vec![0u8; BLOCK_SIZE * file_blocks];
568        for i in 0..file_blocks {
569            let offset = i * BLOCK_SIZE;
570            file.as_mut_slice()[offset..(offset + BLOCK_SIZE)].fill(i as u8);
571        }
572
573        let (algorithm, salt) = match &hasher {
574            FsVerityHasher::Sha256(options) => (fio::HashAlgorithm::Sha256, options.salt.clone()),
575            FsVerityHasher::Sha512(options) => (fio::HashAlgorithm::Sha512, options.salt.clone()),
576        };
577
578        let hash_size = hasher.hash_size();
579        let mut builder = MerkleTreeBuilder::new(hasher);
580        builder.write(file.as_slice());
581        let tree = builder.finish();
582
583        let descriptor = FsVerityDescriptorRaw::new(
584            algorithm,
585            BLOCK_SIZE as u64,
586            file.len() as u64,
587            tree.root(),
588            salt.as_slice(),
589        )
590        .expect("Creating raw descriptor");
591
592        descriptor
593            .write_to_slice(&mut buf.as_mut_slice()[descriptor_offset..])
594            .expect("Writing descriptor");
595        // FsVerity doesn't actually write out the leaves if there is one or fewer blocks.
596        if file_blocks > 1 {
597            let leaf_bytes: Vec<u8> = tree.as_ref()[0].iter().flatten().copied().collect();
598            buf.as_mut_slice()[leaf_offset..(leaf_offset + (file_blocks * hash_size))]
599                .copy_from_slice(leaf_bytes.as_slice());
600        }
601
602        let descriptor2 =
603            FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE).expect("Parsing decsriptor");
604        assert_eq!(descriptor2.root_digest(), tree.root());
605
606        let mut verifier_builder = MerkleTreeBuilder::new(descriptor2.hasher());
607        let leaves = descriptor2.leaf_digests().expect("Finding leaf digests");
608        for leaf in leaves.chunks_exact(hash_size) {
609            verifier_builder.push_data_hash(leaf.to_vec());
610        }
611
612        let verifier_tree = verifier_builder.finish();
613        assert_eq!(verifier_tree.root(), tree.root());
614    }
615
616    #[test]
617    fn test_raw_descriptor_failure_cases() {
618        // The base case is valid.
619        let descriptor = FsVerityDescriptorRaw::new(
620            fio::HashAlgorithm::Sha256,
621            BLOCK_SIZE as u64,
622            12,
623            &[0u8; 32],
624            &[0u8; 32],
625        )
626        .expect("Creating valid descriptor");
627        {
628            let mut buf = vec![0u8; 256];
629            descriptor.write_to_slice(buf.as_mut_slice()).expect("Writing out descriptor");
630        }
631
632        // Try with buf too small.
633        {
634            let mut buf = vec![0u8; 200];
635            descriptor.write_to_slice(buf.as_mut_slice()).expect_err("Buffer too small");
636        }
637
638        // Block is too small or not power of two.
639        FsVerityDescriptorRaw::new(fio::HashAlgorithm::Sha256, 256, 12, &[0u8; 32], &[0u8; 32])
640            .expect_err("Bad block size");
641        FsVerityDescriptorRaw::new(
642            fio::HashAlgorithm::Sha256,
643            4097 as u64,
644            12,
645            &[0u8; 32],
646            &[0u8; 32],
647        )
648        .expect_err("Bad block size");
649
650        // Salt is too long.
651        FsVerityDescriptorRaw::new(
652            fio::HashAlgorithm::Sha256,
653            BLOCK_SIZE as u64,
654            12,
655            &[0u8; 32],
656            &[0u8; 33],
657        )
658        .expect_err("Bad salt");
659
660        // Hash length wrong at 33
661        FsVerityDescriptorRaw::new(
662            fio::HashAlgorithm::Sha256,
663            BLOCK_SIZE as u64,
664            12,
665            &[0u8; 33],
666            &[0u8; 32],
667        )
668        .expect_err("Bad hash length");
669        FsVerityDescriptorRaw::new(
670            fio::HashAlgorithm::Sha512,
671            BLOCK_SIZE as u64,
672            12,
673            &[0u8; 33],
674            &[0u8; 32],
675        )
676        .expect_err("Bad hash length");
677    }
678
679    #[test]
680    fn test_descriptor_buf_too_small_for_leaves() {
681        let raw_descriptor = FsVerityDescriptorRaw {
682            version: 1,
683            algorithm: 1,
684            block_size_log2: BLOCK_SIZE.trailing_zeros() as u8,
685            salt_size: 8,
686            _reserved_1: [0u8; 4],
687            file_size: 3000000u64.to_le_bytes(),
688            root_digest: [0u8; 64],
689            salt: [0u8; 32],
690            _reserved_2: [0u8; 144],
691        };
692        let mut buf = vec![0u8; BLOCK_SIZE * 2];
693        raw_descriptor.write_to_slice(&mut buf[BLOCK_SIZE..]).expect("Writing out descriptor");
694        let descriptor =
695            FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE).expect("Parsing fine");
696        descriptor.leaf_digests().expect_err("Not enough space for leaves");
697    }
698
699    #[test]
700    fn test_descriptor_from_bytes_validation() {
701        // Base case, success.
702        {
703            let descriptor = FsVerityDescriptorRaw {
704                version: 1,
705                algorithm: 1,
706                block_size_log2: BLOCK_SIZE.trailing_zeros() as u8,
707                salt_size: 8,
708                _reserved_1: [0u8; 4],
709                file_size: 25u64.to_le_bytes(),
710                root_digest: [0u8; 64],
711                salt: [0u8; 32],
712                _reserved_2: [0u8; 144],
713            };
714            let mut buf = vec![0u8; 256];
715            descriptor.write_to_slice(buf.as_mut_slice()).expect("Writing out descriptor");
716            FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE).expect("Parsing fine");
717        }
718
719        // Buffer too small to parse.
720        {
721            let buf = vec![0u8; 200];
722            FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE).expect_err("Buff too small");
723        }
724
725        // Bad block sizes provided to method
726        {
727            let descriptor = FsVerityDescriptorRaw {
728                version: 1,
729                algorithm: 1,
730                block_size_log2: BLOCK_SIZE.trailing_zeros() as u8,
731                salt_size: 8,
732                _reserved_1: [0u8; 4],
733                file_size: 25u64.to_le_bytes(),
734                root_digest: [0u8; 64],
735                salt: [0u8; 32],
736                _reserved_2: [0u8; 144],
737            };
738            let mut buf = vec![0u8; 256];
739            descriptor.write_to_slice(buf.as_mut_slice()).expect("Writing out descriptor");
740            FsVerityDescriptor::from_bytes(buf.as_slice(), 4097)
741                .expect_err("Bad provided block size");
742        }
743        {
744            let descriptor = FsVerityDescriptorRaw {
745                version: 1,
746                algorithm: 1,
747                block_size_log2: BLOCK_SIZE.trailing_zeros() as u8,
748                salt_size: 8,
749                _reserved_1: [0u8; 4],
750                file_size: 25u64.to_le_bytes(),
751                root_digest: [0u8; 64],
752                salt: [0u8; 32],
753                _reserved_2: [0u8; 144],
754            };
755            let mut buf = vec![0u8; 256];
756            descriptor.write_to_slice(buf.as_mut_slice()).expect("Writing out descriptor");
757            FsVerityDescriptor::from_bytes(buf.as_slice(), 0).expect_err("Bad provided block size");
758        }
759
760        // Bad version
761        {
762            let descriptor = FsVerityDescriptorRaw {
763                version: 2,
764                algorithm: 1,
765                block_size_log2: BLOCK_SIZE.trailing_zeros() as u8,
766                salt_size: 8,
767                _reserved_1: [0u8; 4],
768                file_size: 25u64.to_le_bytes(),
769                root_digest: [0u8; 64],
770                salt: [0u8; 32],
771                _reserved_2: [0u8; 144],
772            };
773            let mut buf = vec![0u8; 256];
774            descriptor.write_to_slice(buf.as_mut_slice()).expect("Writing out descriptor");
775            FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE).expect_err("Bad version");
776        }
777
778        // Bad algorithm type.
779        {
780            let descriptor = FsVerityDescriptorRaw {
781                version: 1,
782                algorithm: 3,
783                block_size_log2: BLOCK_SIZE.trailing_zeros() as u8,
784                salt_size: 8,
785                _reserved_1: [0u8; 4],
786                file_size: 25u64.to_le_bytes(),
787                root_digest: [0u8; 64],
788                salt: [0u8; 32],
789                _reserved_2: [0u8; 144],
790            };
791            let mut buf = vec![0u8; 256];
792            descriptor.write_to_slice(buf.as_mut_slice()).expect("Writing out descriptor");
793            FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE).expect_err("Bad algorithm");
794        }
795
796        // Bad block size. Too small.
797        {
798            let descriptor = FsVerityDescriptorRaw {
799                version: 1,
800                algorithm: 1,
801                block_size_log2: 9,
802                salt_size: 8,
803                _reserved_1: [0u8; 4],
804                file_size: 25u64.to_le_bytes(),
805                root_digest: [0u8; 64],
806                salt: [0u8; 32],
807                _reserved_2: [0u8; 144],
808            };
809            let mut buf = vec![0u8; 256];
810            descriptor.write_to_slice(buf.as_mut_slice()).expect("Writing out descriptor");
811            FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE).expect_err("Bad block size");
812        }
813
814        // Bad block size. Too big.
815        {
816            let descriptor = FsVerityDescriptorRaw {
817                version: 1,
818                algorithm: 1,
819                block_size_log2: 128,
820                salt_size: 8,
821                _reserved_1: [0u8; 4],
822                file_size: 25u64.to_le_bytes(),
823                root_digest: [0u8; 64],
824                salt: [0u8; 32],
825                _reserved_2: [0u8; 144],
826            };
827            let mut buf = vec![0u8; 256];
828            descriptor.write_to_slice(buf.as_mut_slice()).expect("Writing out descriptor");
829            FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE).expect_err("Bad block size");
830        }
831
832        // Salt size too big.
833        {
834            let descriptor = FsVerityDescriptorRaw {
835                version: 1,
836                algorithm: 1,
837                block_size_log2: BLOCK_SIZE.trailing_zeros() as u8,
838                salt_size: 40,
839                _reserved_1: [0u8; 4],
840                file_size: 25u64.to_le_bytes(),
841                root_digest: [0u8; 64],
842                salt: [0u8; 32],
843                _reserved_2: [0u8; 144],
844            };
845            let mut buf = vec![0u8; 256];
846            descriptor.write_to_slice(buf.as_mut_slice()).expect("Writing out descriptor");
847            FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE).expect_err("Bad salt size");
848        }
849
850        // Block size doesn't match.
851        {
852            let descriptor = FsVerityDescriptorRaw {
853                version: 1,
854                algorithm: 1,
855                block_size_log2: 2048usize.trailing_zeros() as u8,
856                salt_size: 8,
857                _reserved_1: [0u8; 4],
858                file_size: 25u64.to_le_bytes(),
859                root_digest: [0u8; 64],
860                salt: [0u8; 32],
861                _reserved_2: [0u8; 144],
862            };
863            let mut buf = vec![0u8; 256];
864            descriptor.write_to_slice(buf.as_mut_slice()).expect("Writing out descriptor");
865            FsVerityDescriptor::from_bytes(buf.as_slice(), BLOCK_SIZE)
866                .expect_err("Block size mismatch");
867        }
868    }
869}