sparse/
lib.rs

1// Copyright 2022 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#[cfg(target_endian = "big")]
6assert!(false, "This library assumes little-endian!");
7
8pub mod builder;
9mod format;
10pub mod reader;
11
12use crate::format::{ChunkHeader, SparseHeader, CHUNK_HEADER_SIZE, SPARSE_HEADER_SIZE};
13use crate::reader::SparseReader;
14use anyhow::{bail, ensure, Context, Result};
15use core::fmt;
16use serde::de::DeserializeOwned;
17use std::fs::File;
18use std::io::{Cursor, Read, Seek, SeekFrom, Write};
19use std::path::Path;
20use tempfile::{NamedTempFile, TempPath};
21
22// Size of blocks to write.  Note that the format supports varied block sizes; this is the preferred
23// size by this library.
24const BLK_SIZE: u32 = 0x1000;
25
26fn deserialize_from<'a, T: DeserializeOwned, R: Read + ?Sized>(source: &mut R) -> Result<T> {
27    let mut buf = vec![0u8; std::mem::size_of::<T>()];
28    source.read_exact(&mut buf[..]).context("Failed to read bytes")?;
29    Ok(bincode::deserialize(&buf[..])?)
30}
31
32/// A union trait for `Write` and `Seek` that also allows truncation.
33pub trait Writer: Write + Seek {
34    /// Sets the length of the output stream.
35    fn set_len(&mut self, size: u64) -> Result<()>;
36}
37
38impl Writer for File {
39    fn set_len(&mut self, size: u64) -> Result<()> {
40        Ok(File::set_len(self, size)?)
41    }
42}
43
44impl Writer for Cursor<Vec<u8>> {
45    fn set_len(&mut self, size: u64) -> Result<()> {
46        Vec::resize(self.get_mut(), size as usize, 0u8);
47        Ok(())
48    }
49}
50
51// A wrapper around a Reader, which makes it seem like the underlying stream is only self.1 bytes
52// long.  The underlying reader is still advanced upon reading.
53// This is distinct from `std::io::Take` in that it does not modify the seek offset of the
54// underlying reader.  In other words, `LimitedReader` can be used to read a window within the
55// reader (by setting seek offset to the start, and the size limit to the end).
56struct LimitedReader<'a, R>(pub &'a mut R, pub usize);
57
58impl<'a, R: Read + Seek> Read for LimitedReader<'a, R> {
59    fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
60        let offset = self.0.stream_position()?;
61        let avail = self.1.saturating_sub(offset as usize);
62        let to_read = std::cmp::min(avail, buf.len());
63        self.0.read(&mut buf[..to_read])
64    }
65}
66
67/// Returns whether the image in `reader` appears to be in the sparse format.
68pub fn is_sparse_image<R: Read + Seek>(reader: &mut R) -> bool {
69    || -> Option<bool> {
70        let header: SparseHeader = deserialize_from(reader).ok()?;
71        let is_sparse = header.magic == format::SPARSE_HEADER_MAGIC;
72        reader.seek(SeekFrom::Start(0)).ok()?;
73        Some(is_sparse)
74    }()
75    .unwrap_or(false)
76}
77
78#[derive(Clone, PartialEq, Debug)]
79enum Chunk {
80    /// `Raw` represents a set of blocks to be written to disk as-is.
81    /// `start` is the offset in the expanded image at which the Raw section starts.
82    /// `start` and `size` are in bytes, but must be block-aligned.
83    Raw { start: u64, size: u64 },
84    /// `Fill` represents a Chunk that has the `value` repeated enough to fill `size` bytes.
85    /// `start` is the offset in the expanded image at which the Fill section starts.
86    /// `start` and `size` are in bytes, but must be block-aligned.
87    Fill { start: u64, size: u64, value: u32 },
88    /// `DontCare` represents a set of blocks that need to be "offset" by the
89    /// image recipient.  If an image needs to be broken up into two sparse images, and we flash n
90    /// bytes for Sparse Image 1, Sparse Image 2 needs to start with a DontCareChunk with
91    /// (n/blocksize) blocks as its "size" property.
92    /// `start` is the offset in the expanded image at which the DontCare section starts.
93    /// `start` and `size` are in bytes, but must be block-aligned.
94    DontCare { start: u64, size: u64 },
95    /// `Crc32Chunk` is used as a checksum of a given set of Chunks for a SparseImage.  This is not
96    /// required and unused in most implementations of the Sparse Image format. The type is included
97    /// for completeness. It has 4 bytes of CRC32 checksum as describable in a u32.
98    #[allow(dead_code)]
99    Crc32 { checksum: u32 },
100}
101
102impl Chunk {
103    /// Attempts to read a `Chunk` from `reader`.  The reader will be positioned at the first byte
104    /// following the chunk header and any extra data; for a Raw chunk this means it will point at
105    /// the data payload, and for other chunks it will point at the next chunk header (or EOF).
106    /// `offset` is the current offset in the logical volume.
107    pub fn read_metadata<R: Read>(reader: &mut R, offset: u64, block_size: u32) -> Result<Self> {
108        let header: ChunkHeader =
109            deserialize_from(reader).context("Failed to read chunk header")?;
110        ensure!(header.valid(), "Invalid chunk header");
111
112        let size = header
113            .chunk_sz
114            .checked_mul(block_size)
115            .context("Chunk size * block size can not be larger than 2^32")?;
116        match header.chunk_type {
117            format::CHUNK_TYPE_RAW => Ok(Self::Raw { start: offset, size: size.into() }),
118            format::CHUNK_TYPE_FILL => {
119                let value: u32 =
120                    deserialize_from(reader).context("Failed to deserialize fill value")?;
121                Ok(Self::Fill { start: offset, size: size.into(), value })
122            }
123            format::CHUNK_TYPE_DONT_CARE => Ok(Self::DontCare { start: offset, size: size.into() }),
124            format::CHUNK_TYPE_CRC32 => {
125                let checksum: u32 =
126                    deserialize_from(reader).context("Failed to deserialize checksum")?;
127                Ok(Self::Crc32 { checksum })
128            }
129            // We already validated the chunk_type in `ChunkHeader::is_valid`.
130            _ => unreachable!(),
131        }
132    }
133
134    fn valid(&self, block_size: u32) -> bool {
135        self.output_size() % (block_size as u64) == 0
136    }
137
138    /// Returns the offset into the logical image the chunk refers to, or None if the chunk has no
139    /// output data.
140    fn output_offset(&self) -> Option<u64> {
141        match self {
142            Self::Raw { start, .. } => Some(*start),
143            Self::Fill { start, .. } => Some(*start),
144            Self::DontCare { start, .. } => Some(*start),
145            Self::Crc32 { .. } => None,
146        }
147    }
148
149    /// Return number of bytes the chunk expands to when written to the partition.
150    fn output_size(&self) -> u64 {
151        match self {
152            Self::Raw { size, .. } => *size,
153            Self::Fill { size, .. } => *size,
154            Self::DontCare { size, .. } => *size,
155            Self::Crc32 { .. } => 0,
156        }
157    }
158
159    /// Return number of blocks the chunk expands to when written to the partition.
160    fn output_blocks(&self, block_size: u32) -> u32 {
161        self.output_size().div_ceil(block_size as u64) as u32
162    }
163
164    /// `chunk_type` returns the integer flag to represent the type of chunk
165    /// to use in the ChunkHeader
166    fn chunk_type(&self) -> u16 {
167        match self {
168            Self::Raw { .. } => format::CHUNK_TYPE_RAW,
169            Self::Fill { .. } => format::CHUNK_TYPE_FILL,
170            Self::DontCare { .. } => format::CHUNK_TYPE_DONT_CARE,
171            Self::Crc32 { .. } => format::CHUNK_TYPE_CRC32,
172        }
173    }
174
175    /// `chunk_data_len` returns the length of the chunk's header plus the
176    /// length of the data when serialized.
177    ///
178    /// This gets included in the sparse header and is encoded as a u32.
179    /// But while we are tracking the offsets and total sizes we need it to be
180    /// a u64 to help keep track of files that are greater than 4 GiB
181    fn chunk_data_len(&self) -> u32 {
182        let header_size = format::CHUNK_HEADER_SIZE;
183        let data_size = match self {
184            Self::Raw { size, .. } => *size as u32,
185            Self::Fill { .. } => std::mem::size_of::<u32>() as u32,
186            Self::DontCare { .. } => 0,
187            Self::Crc32 { .. } => std::mem::size_of::<u32>() as u32,
188        };
189        header_size.checked_add(data_size).unwrap()
190    }
191
192    /// Writes the chunk to the given Writer.  `source` is a Reader containing the data payload for
193    /// a Raw type chunk, with the seek offset pointing to the first byte of the data payload, and
194    /// with exactly enough bytes available for the rest of the data payload.
195    fn write<W: Write, R: Read>(
196        &self,
197        source: Option<&mut R>,
198        dest: &mut W,
199        block_size: u32,
200    ) -> Result<()> {
201        ensure!(self.valid(block_size), "Not writing invalid chunk",);
202        let header = ChunkHeader::new(
203            self.chunk_type(),
204            0x0,
205            self.output_blocks(block_size),
206            self.chunk_data_len(),
207        );
208
209        bincode::serialize_into(&mut *dest, &header)?;
210
211        match self {
212            Self::Raw { size, .. } => {
213                ensure!(source.is_some(), "No source for Raw chunk");
214                let n = std::io::copy(source.unwrap(), dest)?;
215                let size = *size as u64;
216                if n < size {
217                    let zeroes = vec![0u8; (size - n) as usize];
218                    dest.write_all(&zeroes)?;
219                }
220            }
221            Self::Fill { value, .. } => {
222                // Serialize the value,
223                bincode::serialize_into(dest, value)?;
224            }
225            Self::DontCare { .. } => {
226                // DontCare has no data to write
227            }
228            Self::Crc32 { checksum } => {
229                bincode::serialize_into(dest, checksum)?;
230            }
231        }
232        Ok(())
233    }
234}
235
236impl fmt::Display for Chunk {
237    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
238        let message = match self {
239            Self::Raw { start, size } => {
240                format!("RawChunk: start: {}, total bytes: {}", start, size)
241            }
242            Self::Fill { start, size, value } => {
243                format!("FillChunk: start: {}, value: {}, n_blocks: {}", start, value, size)
244            }
245            Self::DontCare { start, size } => {
246                format!("DontCareChunk: start: {}, bytes: {}", start, size)
247            }
248            Self::Crc32 { checksum } => format!("Crc32Chunk: checksum: {:?}", checksum),
249        };
250        write!(f, "{}", message)
251    }
252}
253
254/// Chunk::write takes an Option of something that implements Read. The compiler still requires a
255/// concrete type for the generic argument even when the Option is None. This constant can be used
256/// in place of None to avoid having to specify a type for the source.
257pub const NO_SOURCE: Option<&mut Cursor<&[u8]>> = None;
258
259#[derive(Clone, Debug, PartialEq)]
260struct SparseFileWriter {
261    chunks: Vec<Chunk>,
262}
263
264impl SparseFileWriter {
265    fn new(chunks: Vec<Chunk>) -> SparseFileWriter {
266        SparseFileWriter { chunks }
267    }
268
269    fn total_blocks(&self) -> u32 {
270        self.chunks.iter().map(|c| c.output_blocks(BLK_SIZE)).sum()
271    }
272
273    fn total_bytes(&self) -> u64 {
274        self.chunks.iter().map(|c| c.output_size() as u64).sum()
275    }
276
277    fn write<W: Write + Seek, R: Read + Seek>(&self, reader: &mut R, writer: &mut W) -> Result<()> {
278        let header = SparseHeader::new(
279            BLK_SIZE.try_into().unwrap(),          // Size of the blocks
280            self.total_blocks(),                   // Total blocks in this image
281            self.chunks.len().try_into().unwrap(), // Total chunks in this image
282        );
283
284        bincode::serialize_into(&mut *writer, &header)?;
285
286        for chunk in &self.chunks {
287            let mut reader = if let &Chunk::Raw { start, size } = chunk {
288                reader.seek(SeekFrom::Start(start))?;
289                Some(LimitedReader(reader, start as usize + size as usize))
290            } else {
291                None
292            };
293            chunk.write(reader.as_mut(), writer, BLK_SIZE)?;
294        }
295
296        Ok(())
297    }
298}
299
300impl fmt::Display for SparseFileWriter {
301    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
302        write!(f, r"SparseFileWriter: {} Chunks:", self.chunks.len())
303    }
304}
305
306/// `add_sparse_chunk` takes the input vec, v and given `Chunk`, chunk, and
307/// attempts to add the chunk to the end of the vec. If the current last chunk
308/// is the same kind of Chunk as the `chunk`, then it will merge the two chunks
309/// into one chunk.
310///
311/// Example: A `FillChunk` with value 0 and size 1 is the last chunk
312/// in `v`, and `chunk` is a FillChunk with value 0 and size 1, after this,
313/// `v`'s last element will be a FillChunk with value 0 and size 2.
314fn add_sparse_chunk(r: &mut Vec<Chunk>, chunk: Chunk) -> Result<()> {
315    match r.last_mut() {
316        // We've got something in the Vec... if they are both the same type,
317        // merge them, otherwise, just push the new one
318        Some(last) => match (&last, &chunk) {
319            (Chunk::Raw { start, size }, Chunk::Raw { size: new_length, .. })
320                if size.checked_add(*new_length).is_some() =>
321            {
322                *last = Chunk::Raw { start: *start, size: size + new_length };
323                return Ok(());
324            }
325            (
326                Chunk::Fill { start, size, value },
327                Chunk::Fill { size: new_size, value: new_value, .. },
328            ) if value == new_value && size.checked_add(*new_size).is_some() => {
329                *last = Chunk::Fill { start: *start, size: size + new_size, value: *value };
330                return Ok(());
331            }
332            (Chunk::DontCare { start, size }, Chunk::DontCare { size: new_size, .. })
333                if size.checked_add(*new_size).is_some() =>
334            {
335                *last = Chunk::DontCare { start: *start, size: size + new_size };
336                return Ok(());
337            }
338            _ => {}
339        },
340        None => {}
341    }
342
343    // If the chunk types differ they cannot be merged.
344    // If they are both Fill but have different values, they cannot be merged.
345    // Crc32 cannot be merged.
346    // If we don't have any chunks then we add it
347    r.push(chunk);
348    Ok(())
349}
350
351/// Reads a sparse image from `source` and expands it to its unsparsed representation in `dest`.
352pub fn unsparse<W: Writer, R: Read + Seek>(source: &mut R, dest: &mut W) -> Result<()> {
353    let header: SparseHeader = deserialize_from(source).context("Failed to read header")?;
354    ensure!(header.valid(), "Invalid sparse image header {:?}", header);
355
356    for _ in 0..header.total_chunks {
357        expand_chunk(source, dest, header.blk_sz).context("Failed to expand chunk")?;
358    }
359    // Truncate output to its current seek offset, in case the last chunk we wrote was DontNeed.
360    let offset = dest.stream_position()?;
361    dest.set_len(offset).context("Failed to truncate output")?;
362    dest.flush()?;
363    Ok(())
364}
365
366/// Reads a chunk from `source`, and expands it, writing the result to `dest`.
367fn expand_chunk<R: Read + Seek, W: Write + Seek>(
368    source: &mut R,
369    dest: &mut W,
370    block_size: u32,
371) -> Result<()> {
372    let header: ChunkHeader =
373        deserialize_from(source).context("Failed to deserialize chunk header")?;
374    ensure!(header.valid(), "Invalid chunk header {:x?}", header);
375    let size = (header.chunk_sz * block_size) as usize;
376    match header.chunk_type {
377        format::CHUNK_TYPE_RAW => {
378            let limit = source.stream_position()? as usize + size;
379            std::io::copy(&mut LimitedReader(source, limit), dest)
380                .context("Failed to copy contents")?;
381        }
382        format::CHUNK_TYPE_FILL => {
383            let value: [u8; 4] =
384                deserialize_from(source).context("Failed to deserialize fill value")?;
385            assert!(size % 4 == 0);
386            let repeated = value.repeat(size / 4);
387            dest.write_all(&repeated).context("Failed to fill contents")?;
388        }
389        format::CHUNK_TYPE_DONT_CARE => {
390            dest.seek(SeekFrom::Current(size as i64)).context("Failed to skip contents")?;
391        }
392        format::CHUNK_TYPE_CRC32 => {
393            let _: u32 = deserialize_from(source).context("Failed to deserialize fill value")?;
394        }
395        _ => bail!("Invalid type {}", header.chunk_type),
396    };
397    Ok(())
398}
399
400/// `resparse` takes a SparseFile and a maximum size and will
401/// break the single SparseFile into multiple SparseFiles whose
402/// size will not exceed the maximum_download_size.
403///
404/// This will return an error if max_download_size is <= BLK_SIZE
405fn resparse(
406    sparse_file: SparseFileWriter,
407    max_download_size: u64,
408) -> Result<Vec<SparseFileWriter>> {
409    if max_download_size <= BLK_SIZE as u64 {
410        anyhow::bail!(
411            "Given maximum download size ({}) is less than the block size ({})",
412            max_download_size,
413            BLK_SIZE
414        );
415    }
416    let mut ret = Vec::<SparseFileWriter>::new();
417
418    // File length already starts with a header for the SparseFile as
419    // well as the size of a potential DontCare and Crc32 Chunk
420    let sunk_file_length = format::SPARSE_HEADER_SIZE as u64
421        + Chunk::DontCare { start: 0, size: BLK_SIZE.into() }.chunk_data_len() as u64
422        + Chunk::Crc32 { checksum: 2345 }.chunk_data_len() as u64;
423
424    let mut chunk_pos = 0;
425    let mut output_offset = 0;
426    while chunk_pos < sparse_file.chunks.len() {
427        log::trace!("Starting a new file at chunk position: {}", chunk_pos);
428
429        let mut file_len = 0;
430        file_len += sunk_file_length;
431
432        let mut chunks = Vec::<Chunk>::new();
433        if chunk_pos > 0 {
434            // If we already have some chunks... add a DontCare block to
435            // move the pointer
436            log::trace!("Adding a DontCare chunk offset: {}", chunk_pos);
437            let dont_care = Chunk::DontCare { start: 0, size: output_offset };
438            chunks.push(dont_care);
439        }
440
441        loop {
442            match sparse_file.chunks.get(chunk_pos) {
443                Some(chunk) => {
444                    let curr_chunk_data_len = chunk.chunk_data_len() as u64;
445                    if (file_len + curr_chunk_data_len) > max_download_size {
446                        log::trace!(
447                            "Current file size is: {} and adding another chunk of len: {} would \
448                             put us over our max: {}",
449                            file_len,
450                            curr_chunk_data_len,
451                            max_download_size
452                        );
453
454                        // Add a don't care chunk to cover everything to the end of the image. While
455                        // this is not strictly speaking needed, other tools (simg2simg) produce
456                        // this chunk, and the Sparse image inspection tool simg_dump will produce a
457                        // warning if a sparse file does not have the same number of output blocks
458                        // as declared in the header.
459                        let remainder_size = sparse_file.total_bytes() - output_offset;
460                        let dont_care =
461                            Chunk::DontCare { start: output_offset, size: remainder_size };
462                        chunks.push(dont_care);
463                        break;
464                    }
465                    log::trace!(
466                        "chunk: {} curr_chunk_data_len: {} current file size: {} \
467                         max_download_size: {} diff: {}",
468                        chunk_pos,
469                        curr_chunk_data_len,
470                        file_len,
471                        max_download_size,
472                        (max_download_size - file_len - curr_chunk_data_len)
473                    );
474                    add_sparse_chunk(&mut chunks, chunk.clone())?;
475                    file_len += curr_chunk_data_len;
476                    chunk_pos = chunk_pos + 1;
477                    output_offset += chunk.output_size() as u64;
478                }
479                None => {
480                    log::trace!("Finished iterating chunks");
481                    break;
482                }
483            }
484        }
485        let resparsed = SparseFileWriter::new(chunks);
486        log::trace!("resparse: Adding new SparseFile: {}", resparsed);
487        ret.push(resparsed);
488    }
489
490    Ok(ret)
491}
492
493/// Takes a provided `reader` and generates a set of temporary files in `dir`
494/// in the Sparse image format. With the provided `max_download_size`
495/// constraining file size.
496///
497/// # Arguments
498///
499/// * `reader` - The Sparse Reader of a Sparse File
500/// * `dir` - Path to the directory to write the Sparse file(s).
501/// * `max_download_size` - Maximum size that can be downloaded by the device.
502pub fn resparse_sparse_img<R: Read + std::io::Seek>(
503    reader: &mut SparseReader<R>,
504    dir: &Path,
505    max_download_size: u64,
506) -> Result<Vec<TempPath>> {
507    log::debug!("Building writer from Reader");
508    let mut chunks = vec![];
509    // The sparse image we are reading from has a header and a chunk
510    // in it already. we need to have the offset reflect that.
511    let header_sunk = SPARSE_HEADER_SIZE as u64;
512    let mut raw_chunks_encountered = 0;
513    for (chunk, offset) in reader.chunks() {
514        log::info!("resparse_sparse_img. Processing chunk: {} with offset: {:#?}", chunk, offset);
515        if chunk.chunk_type() == format::CHUNK_TYPE_RAW {
516            raw_chunks_encountered += 1;
517            // This is a raw chunk. We'll split it up into blocks
518            let blks = chunk.output_blocks(BLK_SIZE);
519            log::trace!("resparse_sparse_image: splitting RAW chunk into {} chunks", blks);
520            let sunk: u64 = header_sunk + (raw_chunks_encountered * CHUNK_HEADER_SIZE) as u64;
521            for i in 0..blks {
522                let start: u64 = offset.unwrap_or(0) + (BLK_SIZE * i) as u64 - sunk;
523                log::debug!("resparse_sparse_img: adding RAW chunk at start: {}", start);
524                chunks.push(Chunk::Raw { start, size: BLK_SIZE.into() })
525            }
526        } else {
527            chunks.push(chunk.clone());
528        }
529    }
530    let sparse_file = SparseFileWriter::new(chunks);
531
532    let mut ret = Vec::<TempPath>::new();
533    log::debug!("Resparsing sparse file");
534    for re_sparsed_file in resparse(sparse_file, max_download_size)? {
535        let (file, temp_path) = NamedTempFile::new_in(dir)?.into_parts();
536        let mut file_create = File::from(file);
537
538        log::debug!("Writing resparsed {} to disk", re_sparsed_file);
539        re_sparsed_file.write(reader, &mut file_create)?;
540
541        ret.push(temp_path);
542    }
543
544    log::debug!("Finished building sparse files");
545
546    Ok(ret)
547}
548
549/// Takes the given `file_to_upload` for the `named` partition and creates a
550/// set of temporary files in the given `dir` in Sparse Image Format. With the
551/// provided `max_download_size` constraining file size.
552///
553/// # Arguments
554///
555/// * `name` - Name of the partition the image. Used for logs only.
556/// * `file_to_upload` - Path to the file to translate to sparse image format.
557/// * `dir` - Path to write the Sparse file(s).
558/// * `max_download_size` - Maximum size that can be downloaded by the device.
559pub fn build_sparse_files(
560    name: &str,
561    file_to_upload: &str,
562    dir: &Path,
563    max_download_size: u64,
564) -> Result<Vec<TempPath>> {
565    if max_download_size <= BLK_SIZE as u64 {
566        anyhow::bail!(
567            "Given maximum download size ({}) is less than the block size ({})",
568            max_download_size,
569            BLK_SIZE
570        );
571    }
572    log::debug!("Building sparse files for: {}. File: {}", name, file_to_upload);
573    let mut in_file = File::open(file_to_upload)?;
574
575    let mut total_read: usize = 0;
576    // Preallocate vector to avoid reallocations as it grows.
577    let mut chunks =
578        Vec::<Chunk>::with_capacity((in_file.metadata()?.len() as usize / BLK_SIZE as usize) + 1);
579
580    let mut buf = [0u8; BLK_SIZE as usize];
581    loop {
582        let read = in_file.read(&mut buf)?;
583        if read == 0 {
584            break;
585        }
586
587        let is_fill = buf.chunks(4).collect::<Vec<&[u8]>>().windows(2).all(|w| w[0] == w[1]);
588
589        if is_fill {
590            // The Android Sparse Image Format specifies that a fill block
591            // is a four-byte u32 repeated to fill BLK_SIZE. Here we use
592            // bincode::deserialize to get the repeated four byte pattern from
593            // the buffer so that it can be serialized later when we write
594            // the sparse file with bincode::serialize.
595            let value: u32 = bincode::deserialize(&buf[0..4])?;
596            // Add a fill chunk
597            let fill = Chunk::Fill {
598                start: total_read as u64,
599                size: buf.len().try_into().unwrap(),
600                value,
601            };
602            log::trace!("Sparsing file: {}. Created: {}", file_to_upload, fill);
603            chunks.push(fill);
604        } else {
605            // Add a raw chunk
606            let raw = Chunk::Raw { start: total_read as u64, size: buf.len().try_into().unwrap() };
607            log::trace!("Sparsing file: {}. Created: {}", file_to_upload, raw);
608            chunks.push(raw);
609            if read < buf.len() {
610                // We've reached the end of the file add a DontCare chunk to
611                // skip the last bit of the file which is zeroed out from the previous
612                // raw buffer
613                let skip_end =
614                    Chunk::DontCare { start: (total_read + read) as u64, size: BLK_SIZE.into() };
615                chunks.push(skip_end);
616            }
617        }
618        total_read += read;
619    }
620
621    log::trace!("Creating sparse file from: {} chunks", chunks.len());
622
623    // At this point we are making a new sparse file fom an unoptimized set of
624    // Chunks. This primarily means that adjacent Fill chunks of same value are
625    // not collapsed into a single Fill chunk (with a larger size). The advantage
626    // to this two pass approach is that (with some future work), we can create
627    // the "unoptimized" sparse file from a given image, and then "resparse" it
628    // as many times as desired with different `max_download_size` parameters.
629    // This would simplify the scenario where we want to flash the same image
630    // to multiple physical devices which may have slight differences in their
631    // hardware (and therefore different `max_download_size`es)
632    let sparse_file = SparseFileWriter::new(chunks);
633    log::trace!("Created sparse file: {}", sparse_file);
634
635    let mut ret = Vec::<TempPath>::new();
636    log::trace!("Resparsing sparse file");
637    for re_sparsed_file in resparse(sparse_file, max_download_size)? {
638        let (file, temp_path) = NamedTempFile::new_in(dir)?.into_parts();
639        let mut file_create = File::from(file);
640
641        log::trace!("Writing resparsed {} to disk", re_sparsed_file);
642        re_sparsed_file.write(&mut in_file, &mut file_create)?;
643
644        ret.push(temp_path);
645    }
646
647    log::debug!("Finished building sparse files");
648
649    Ok(ret)
650}
651
652////////////////////////////////////////////////////////////////////////////////
653// tests
654
655#[cfg(test)]
656mod test {
657    #[cfg(target_os = "linux")]
658    use crate::build_sparse_files;
659
660    use super::builder::{DataSource, SparseImageBuilder};
661    use super::{
662        add_sparse_chunk, resparse, unsparse, Chunk, SparseFileWriter, BLK_SIZE, NO_SOURCE,
663    };
664    use rand::rngs::SmallRng;
665    use rand::{RngCore, SeedableRng};
666    use std::io::{Cursor, Read as _, Seek as _, SeekFrom, Write as _};
667    #[cfg(target_os = "linux")]
668    use std::path::Path;
669    #[cfg(target_os = "linux")]
670    use std::process::{Command, Stdio};
671    use tempfile::{NamedTempFile, TempDir};
672
673    #[test]
674    fn test_fill_into_bytes() {
675        let mut dest = Cursor::new(Vec::<u8>::new());
676
677        let fill_chunk = Chunk::Fill { start: 0, size: (5 * BLK_SIZE).into(), value: 365 };
678        fill_chunk.write(NO_SOURCE, &mut dest, BLK_SIZE).unwrap();
679        assert_eq!(dest.into_inner(), [194, 202, 0, 0, 5, 0, 0, 0, 16, 0, 0, 0, 109, 1, 0, 0]);
680    }
681
682    #[test]
683    fn test_raw_into_bytes() {
684        const EXPECTED_RAW_BYTES: [u8; 22] =
685            [193, 202, 0, 0, 1, 0, 0, 0, 12, 16, 0, 0, 49, 50, 51, 52, 53, 0, 0, 0, 0, 0];
686
687        let mut source = Cursor::new(Vec::<u8>::from(&b"12345"[..]));
688        let mut sparse = Cursor::new(Vec::<u8>::new());
689        let chunk = Chunk::Raw { start: 0, size: BLK_SIZE.into() };
690
691        chunk.write(Some(&mut source), &mut sparse, BLK_SIZE).unwrap();
692        let buf = sparse.into_inner();
693        assert_eq!(buf.len(), 4108);
694        assert_eq!(&buf[..EXPECTED_RAW_BYTES.len()], EXPECTED_RAW_BYTES);
695        assert_eq!(&buf[EXPECTED_RAW_BYTES.len()..], &[0u8; 4108 - EXPECTED_RAW_BYTES.len()]);
696    }
697
698    #[test]
699    fn test_dont_care_into_bytes() {
700        let mut dest = Cursor::new(Vec::<u8>::new());
701        let chunk = Chunk::DontCare { start: 0, size: (5 * BLK_SIZE).into() };
702
703        chunk.write(NO_SOURCE, &mut dest, BLK_SIZE).unwrap();
704        assert_eq!(dest.into_inner(), [195, 202, 0, 0, 5, 0, 0, 0, 12, 0, 0, 0]);
705    }
706
707    #[test]
708    fn test_sparse_file_into_bytes() {
709        let mut source = Cursor::new(Vec::<u8>::from(&b"123"[..]));
710        let mut sparse = Cursor::new(Vec::<u8>::new());
711        let mut chunks = Vec::<Chunk>::new();
712        // Add a fill chunk
713        let fill = Chunk::Fill { start: 0, size: 4096, value: 5 };
714        chunks.push(fill);
715        // Add a raw chunk
716        let raw = Chunk::Raw { start: 0, size: 12288 };
717        chunks.push(raw);
718        // Add a dontcare chunk
719        let dontcare = Chunk::DontCare { start: 0, size: 4096 };
720        chunks.push(dontcare);
721
722        let sparsefile = SparseFileWriter::new(chunks);
723        sparsefile.write(&mut source, &mut sparse).unwrap();
724
725        sparse.seek(SeekFrom::Start(0)).unwrap();
726        let mut unsparsed = Cursor::new(Vec::<u8>::new());
727        unsparse(&mut sparse, &mut unsparsed).unwrap();
728        let buf = unsparsed.into_inner();
729        assert_eq!(buf.len(), 4096 + 12288 + 4096);
730        {
731            let chunks = buf[..4096].chunks(4);
732            for chunk in chunks {
733                assert_eq!(chunk, &[5u8, 0, 0, 0]);
734            }
735        }
736        assert_eq!(&buf[4096..4099], b"123");
737        assert_eq!(&buf[4099..16384], &[0u8; 12285]);
738        assert_eq!(&buf[16384..], &[0u8; 4096]);
739    }
740
741    ////////////////////////////////////////////////////////////////////////////
742    // Tests for resparse
743
744    #[test]
745    fn test_resparse_bails_on_too_small_size() {
746        let sparse = SparseFileWriter::new(Vec::<Chunk>::new());
747        assert!(resparse(sparse, 4095).is_err());
748    }
749
750    #[test]
751    fn test_resparse_splits() {
752        let max_download_size = 4096 * 2;
753
754        let mut chunks = Vec::<Chunk>::new();
755        chunks.push(Chunk::Raw { start: 0, size: 4096 });
756        chunks.push(Chunk::Fill { start: 4096, size: 4096, value: 2 });
757        // We want 2 sparse files with the second sparse file having a
758        // DontCare chunk and then this chunk
759        chunks.push(Chunk::Raw { start: 8192, size: 4096 });
760
761        let input_sparse_file = SparseFileWriter::new(chunks);
762        let resparsed_files = resparse(input_sparse_file, max_download_size).unwrap();
763        assert_eq!(2, resparsed_files.len());
764
765        assert_eq!(3, resparsed_files[0].chunks.len());
766        assert_eq!(Chunk::Raw { start: 0, size: 4096 }, resparsed_files[0].chunks[0]);
767        assert_eq!(Chunk::Fill { start: 4096, size: 4096, value: 2 }, resparsed_files[0].chunks[1]);
768        assert_eq!(Chunk::DontCare { start: 8192, size: 4096 }, resparsed_files[0].chunks[2]);
769
770        assert_eq!(2, resparsed_files[1].chunks.len());
771        assert_eq!(Chunk::DontCare { start: 0, size: 8192 }, resparsed_files[1].chunks[0]);
772        assert_eq!(Chunk::Raw { start: 8192, size: 4096 }, resparsed_files[1].chunks[1]);
773    }
774
775    ////////////////////////////////////////////////////////////////////////////
776    // Tests for add_sparse_chunk
777
778    #[test]
779    fn test_add_sparse_chunk_adds_empty() {
780        let init_vec = Vec::<Chunk>::new();
781        let mut res = init_vec.clone();
782        add_sparse_chunk(&mut res, Chunk::Fill { start: 0, size: 4096, value: 1 }).unwrap();
783        assert_eq!(0, init_vec.len());
784        assert_ne!(init_vec, res);
785        assert_eq!(Chunk::Fill { start: 0, size: 4096, value: 1 }, res[0]);
786    }
787
788    #[test]
789    fn test_add_sparse_chunk_fill() {
790        // Test they merge.
791        {
792            let mut init_vec = Vec::<Chunk>::new();
793            init_vec.push(Chunk::Fill { start: 0, size: 8192, value: 1 });
794            let mut res = init_vec.clone();
795            add_sparse_chunk(&mut res, Chunk::Fill { start: 0, size: 8192, value: 1 }).unwrap();
796            assert_eq!(1, res.len());
797            assert_eq!(Chunk::Fill { start: 0, size: 16384, value: 1 }, res[0]);
798        }
799
800        // Test don't merge on different value.
801        {
802            let mut init_vec = Vec::<Chunk>::new();
803            init_vec.push(Chunk::Fill { start: 0, size: 4096, value: 1 });
804            let mut res = init_vec.clone();
805            add_sparse_chunk(&mut res, Chunk::Fill { start: 0, size: 4096, value: 2 }).unwrap();
806            assert_ne!(res, init_vec);
807            assert_eq!(2, res.len());
808            assert_eq!(
809                res,
810                [
811                    Chunk::Fill { start: 0, size: 4096, value: 1 },
812                    Chunk::Fill { start: 0, size: 4096, value: 2 }
813                ]
814            );
815        }
816
817        // Test don't merge on different type.
818        {
819            let mut init_vec = Vec::<Chunk>::new();
820            init_vec.push(Chunk::Fill { start: 0, size: 4096, value: 2 });
821            let mut res = init_vec.clone();
822            add_sparse_chunk(&mut res, Chunk::DontCare { start: 0, size: 4096 }).unwrap();
823            assert_ne!(res, init_vec);
824            assert_eq!(2, res.len());
825            assert_eq!(
826                res,
827                [
828                    Chunk::Fill { start: 0, size: 4096, value: 2 },
829                    Chunk::DontCare { start: 0, size: 4096 }
830                ]
831            );
832        }
833
834        // Test don't merge when too large.
835        {
836            let mut init_vec = Vec::<Chunk>::new();
837            init_vec.push(Chunk::Fill { start: 0, size: 4096, value: 1 });
838            let mut res = init_vec.clone();
839            add_sparse_chunk(&mut res, Chunk::Fill { start: 0, size: u64::MAX - 4095, value: 1 })
840                .unwrap();
841            assert_ne!(res, init_vec);
842            assert_eq!(2, res.len());
843            assert_eq!(
844                res,
845                [
846                    Chunk::Fill { start: 0, size: 4096, value: 1 },
847                    Chunk::Fill { start: 0, size: u64::MAX - 4095, value: 1 }
848                ]
849            );
850        }
851    }
852
853    #[test]
854    fn test_add_sparse_chunk_dont_care() {
855        // Test they merge.
856        {
857            let mut init_vec = Vec::<Chunk>::new();
858            init_vec.push(Chunk::DontCare { start: 0, size: 4096 });
859            let mut res = init_vec.clone();
860            add_sparse_chunk(&mut res, Chunk::DontCare { start: 0, size: 4096 }).unwrap();
861            assert_eq!(1, res.len());
862            assert_eq!(Chunk::DontCare { start: 0, size: 8192 }, res[0]);
863        }
864
865        // Test they don't merge on different type.
866        {
867            let mut init_vec = Vec::<Chunk>::new();
868            init_vec.push(Chunk::DontCare { start: 0, size: 4096 });
869            let mut res = init_vec.clone();
870            add_sparse_chunk(&mut res, Chunk::Fill { start: 0, size: 4096, value: 1 }).unwrap();
871            assert_eq!(2, res.len());
872            assert_eq!(
873                res,
874                [
875                    Chunk::DontCare { start: 0, size: 4096 },
876                    Chunk::Fill { start: 0, size: 4096, value: 1 }
877                ]
878            );
879        }
880
881        // Test they don't merge when too large.
882        {
883            let mut init_vec = Vec::<Chunk>::new();
884            init_vec.push(Chunk::DontCare { start: 0, size: 4096 });
885            let mut res = init_vec.clone();
886            add_sparse_chunk(&mut res, Chunk::DontCare { start: 0, size: u64::MAX - 4095 })
887                .unwrap();
888            assert_eq!(2, res.len());
889            assert_eq!(
890                res,
891                [
892                    Chunk::DontCare { start: 0, size: 4096 },
893                    Chunk::DontCare { start: 0, size: u64::MAX - 4095 }
894                ]
895            );
896        }
897    }
898
899    #[test]
900    fn test_add_sparse_chunk_raw() {
901        // Test they merge.
902        {
903            let mut init_vec = Vec::<Chunk>::new();
904            init_vec.push(Chunk::Raw { start: 0, size: 12288 });
905            let mut res = init_vec.clone();
906            add_sparse_chunk(&mut res, Chunk::Raw { start: 0, size: 16384 }).unwrap();
907            assert_eq!(1, res.len());
908            assert_eq!(Chunk::Raw { start: 0, size: 28672 }, res[0]);
909        }
910
911        // Test they don't merge on different type.
912        {
913            let mut init_vec = Vec::<Chunk>::new();
914            init_vec.push(Chunk::Raw { start: 0, size: 12288 });
915            let mut res = init_vec.clone();
916            add_sparse_chunk(&mut res, Chunk::Fill { start: 3, size: 8192, value: 1 }).unwrap();
917            assert_eq!(2, res.len());
918            assert_eq!(
919                res,
920                [
921                    Chunk::Raw { start: 0, size: 12288 },
922                    Chunk::Fill { start: 3, size: 8192, value: 1 }
923                ]
924            );
925        }
926
927        // Test they don't merge when too large.
928        {
929            let mut init_vec = Vec::<Chunk>::new();
930            init_vec.push(Chunk::Raw { start: 0, size: 4096 });
931            let mut res = init_vec.clone();
932            add_sparse_chunk(&mut res, Chunk::Raw { start: 0, size: u64::MAX - 4095 }).unwrap();
933            assert_eq!(2, res.len());
934            assert_eq!(
935                res,
936                [
937                    Chunk::Raw { start: 0, size: 4096 },
938                    Chunk::Raw { start: 0, size: u64::MAX - 4095 }
939                ]
940            );
941        }
942    }
943
944    #[test]
945    fn test_add_sparse_chunk_crc32() {
946        // Test they don't merge on same type (Crc32 is special).
947        {
948            let mut init_vec = Vec::<Chunk>::new();
949            init_vec.push(Chunk::Crc32 { checksum: 1234 });
950            let mut res = init_vec.clone();
951            add_sparse_chunk(&mut res, Chunk::Crc32 { checksum: 2345 }).unwrap();
952            assert_eq!(2, res.len());
953            assert_eq!(res, [Chunk::Crc32 { checksum: 1234 }, Chunk::Crc32 { checksum: 2345 }]);
954        }
955
956        // Test they don't merge on different type.
957        {
958            let mut init_vec = Vec::<Chunk>::new();
959            init_vec.push(Chunk::Crc32 { checksum: 1234 });
960            let mut res = init_vec.clone();
961            add_sparse_chunk(&mut res, Chunk::Fill { start: 0, size: 4096, value: 1 }).unwrap();
962            assert_eq!(2, res.len());
963            assert_eq!(
964                res,
965                [Chunk::Crc32 { checksum: 1234 }, Chunk::Fill { start: 0, size: 4096, value: 1 }]
966            );
967        }
968    }
969
970    ////////////////////////////////////////////////////////////////////////////
971    // Integration
972    //
973
974    #[test]
975    fn test_roundtrip() {
976        let tmpdir = TempDir::new().unwrap();
977
978        // Generate a large temporary file
979        let (mut file, _temp_path) = NamedTempFile::new_in(&tmpdir).unwrap().into_parts();
980        let mut rng = SmallRng::from_entropy();
981        let mut buf = Vec::<u8>::new();
982        buf.resize(1 * 4096, 0);
983        rng.fill_bytes(&mut buf);
984        file.write_all(&buf).unwrap();
985        file.flush().unwrap();
986        file.seek(SeekFrom::Start(0)).unwrap();
987        let content_size = buf.len();
988
989        // build a sparse file
990        let mut sparse_file = NamedTempFile::new_in(&tmpdir).unwrap().into_file();
991        SparseImageBuilder::new()
992            .add_chunk(DataSource::Buffer(Box::new([0xffu8; 8192])))
993            .add_chunk(DataSource::Reader { reader: Box::new(file), size: content_size as u64 })
994            .add_chunk(DataSource::Fill(0xaaaa_aaaau32, 1024))
995            .add_chunk(DataSource::Skip(16384))
996            .build(&mut sparse_file)
997            .expect("Build sparse image failed");
998        sparse_file.seek(SeekFrom::Start(0)).unwrap();
999
1000        let mut orig_file = NamedTempFile::new_in(&tmpdir).unwrap().into_file();
1001        unsparse(&mut sparse_file, &mut orig_file).expect("unsparse failed");
1002        orig_file.seek(SeekFrom::Start(0)).unwrap();
1003
1004        let mut unsparsed_bytes = vec![];
1005        orig_file.read_to_end(&mut unsparsed_bytes).expect("Failed to read unsparsed image");
1006        assert_eq!(unsparsed_bytes.len(), 8192 + 20480 + content_size);
1007        assert_eq!(&unsparsed_bytes[..8192], &[0xffu8; 8192]);
1008        assert_eq!(&unsparsed_bytes[8192..8192 + content_size], &buf[..]);
1009        assert_eq!(&unsparsed_bytes[8192 + content_size..12288 + content_size], &[0xaau8; 4096]);
1010        assert_eq!(&unsparsed_bytes[12288 + content_size..], &[0u8; 16384]);
1011    }
1012
1013    #[test]
1014    /// test_with_simg2img is a "round trip" test that does the following
1015    ///
1016    /// 1. Generates a pseudorandom temporary file
1017    /// 2. Builds sparse files out of it
1018    /// 3. Uses the android tool simg2img to take the sparse files and generate
1019    ///    the "original" image file out of them.
1020    /// 4. Asserts the originally created file and the one created by simg2img
1021    ///    have binary equivalent contents.
1022    ///
1023    /// This gives us a reasonable expectation of correctness given that the
1024    /// Android-provided sparse tools are able to interpret our sparse images.
1025    #[cfg(target_os = "linux")]
1026    fn test_with_simg2img() {
1027        let simg2img_path = Path::new("./host_x64/test_data/storage/sparse/simg2img");
1028        assert!(
1029            Path::exists(simg2img_path),
1030            "simg2img binary must exist at {}",
1031            simg2img_path.display()
1032        );
1033
1034        let tmpdir = TempDir::new().unwrap();
1035
1036        // Generate a large temporary file
1037        let (mut file, temp_path) = NamedTempFile::new_in(&tmpdir).unwrap().into_parts();
1038        let mut rng = SmallRng::from_entropy();
1039        let mut buf = Vec::<u8>::new();
1040        // Dont want it to neatly fit a block size
1041        buf.resize(50 * 4096 + 1244, 0);
1042        rng.fill_bytes(&mut buf);
1043        file.write_all(&buf).unwrap();
1044        file.flush().unwrap();
1045        file.seek(SeekFrom::Start(0)).unwrap();
1046
1047        // build a sparse file
1048        let files = build_sparse_files(
1049            "test",
1050            temp_path.to_path_buf().to_str().expect("Should succeed"),
1051            tmpdir.path(),
1052            4096 * 2,
1053        )
1054        .unwrap();
1055
1056        let mut simg2img_output = tmpdir.path().to_path_buf();
1057        simg2img_output.push("output");
1058
1059        let mut simg2img = Command::new(simg2img_path)
1060            .args(&files[..])
1061            .arg(&simg2img_output)
1062            .stdout(Stdio::piped())
1063            .stderr(Stdio::piped())
1064            .spawn()
1065            .expect("Failed to spawn simg2img");
1066        let res = simg2img.wait().expect("simg2img did was not running");
1067        assert!(res.success(), "simg2img did not succeed");
1068        let mut simg2img_stdout = simg2img.stdout.take().expect("Get stdout from simg2img");
1069        let mut simg2img_stderr = simg2img.stderr.take().expect("Get stderr from simg2img");
1070
1071        let mut stdout = String::new();
1072        simg2img_stdout.read_to_string(&mut stdout).expect("Reading simg2img stdout");
1073        assert_eq!(stdout, "");
1074
1075        let mut stderr = String::new();
1076        simg2img_stderr.read_to_string(&mut stderr).expect("Reading simg2img stderr");
1077        assert_eq!(stderr, "");
1078
1079        let simg2img_output_bytes =
1080            std::fs::read(simg2img_output).expect("Failed to read simg2img output");
1081
1082        assert_eq!(
1083            buf,
1084            simg2img_output_bytes[0..buf.len()],
1085            "Output from simg2img should match our generated file"
1086        );
1087
1088        assert_eq!(
1089            simg2img_output_bytes[buf.len()..],
1090            vec![0u8; simg2img_output_bytes.len() - buf.len()],
1091            "The remainder of our simg2img_output_bytes should be 0"
1092        );
1093    }
1094
1095    #[test]
1096    #[cfg(target_os = "linux")]
1097    fn test_resparse_from_sparse() {
1098        use crate::reader::SparseReader;
1099        use crate::resparse_sparse_img;
1100
1101        let simg2img_path = Path::new("./host_x64/test_data/storage/sparse/simg2img");
1102        assert!(
1103            Path::exists(simg2img_path),
1104            "simg2img binary must exist at {}",
1105            simg2img_path.display()
1106        );
1107
1108        let tmpdir = TempDir::new().unwrap();
1109
1110        // Generate a large temporary file
1111        let (mut file, _temp_path) = NamedTempFile::new_in(&tmpdir).unwrap().into_parts();
1112        let mut rng = SmallRng::from_entropy();
1113        let mut buf = Vec::<u8>::new();
1114        buf.resize(10 * 4096, 0);
1115        rng.fill_bytes(&mut buf);
1116        file.write_all(&buf).unwrap();
1117        file.flush().unwrap();
1118        file.seek(SeekFrom::Start(0)).unwrap();
1119        let content_size = buf.len();
1120
1121        // build a sparse file
1122        let sparse_file_tmp = NamedTempFile::new_in(&tmpdir).unwrap();
1123        let mut sparse_file = sparse_file_tmp.into_file();
1124        SparseImageBuilder::new()
1125            .add_chunk(DataSource::Buffer(Box::new([0xffu8; 4096 * 2])))
1126            .add_chunk(DataSource::Reader { reader: Box::new(file), size: content_size as u64 })
1127            .add_chunk(DataSource::Fill(0xaaaa_aaaau32, 1024))
1128            .add_chunk(DataSource::Skip(16384))
1129            .build(&mut sparse_file)
1130            .expect("Build sparse image failed");
1131        sparse_file.seek(SeekFrom::Start(0)).unwrap();
1132
1133        let mut reader = SparseReader::new(sparse_file).expect("create reader");
1134
1135        let files = resparse_sparse_img(&mut reader, tmpdir.path(), 4096 * 3).unwrap();
1136
1137        // Re build the image from the sparse files
1138        let mut simg2img_output_sparsed = tmpdir.path().to_path_buf();
1139        simg2img_output_sparsed.push("output_sparsed");
1140
1141        let mut simg2img_sparsed = Command::new(simg2img_path)
1142            .args(&files[..])
1143            .arg(&simg2img_output_sparsed)
1144            .stdout(Stdio::piped())
1145            .stderr(Stdio::piped())
1146            .spawn()
1147            .expect("Failed to spawn simg2img");
1148        let res = simg2img_sparsed.wait().expect("simg2img did was not running");
1149        assert!(res.success(), "simg2img did not succeed");
1150        let mut simg2img_stdout = simg2img_sparsed.stdout.take().expect("Get stdout from simg2img");
1151        let mut simg2img_stderr = simg2img_sparsed.stderr.take().expect("Get stderr from simg2img");
1152
1153        let mut stdout = String::new();
1154        simg2img_stdout.read_to_string(&mut stdout).expect("Reading simg2img stdout");
1155        assert_eq!(stdout, "");
1156
1157        let mut stderr = String::new();
1158        simg2img_stderr.read_to_string(&mut stderr).expect("Reading simg2img stderr");
1159        assert_eq!(stderr, "");
1160    }
1161}