1use crate::compression::{ChunkedArchive, ChunkedDecompressor};
25use crate::format::SerializedType1Blob;
26use serde::{Deserialize, Serialize};
27use static_assertions::assert_eq_size;
28use thiserror::Error;
29use zerocopy::{IntoBytes, Ref};
30
31pub mod compression;
32mod format;
33
34assert_eq_size!(usize, u64);
36
37pub const DELIVERY_PATH_PREFIX: &'static str = "v1-";
39
40pub fn generate(delivery_type: DeliveryBlobType, data: &[u8]) -> Vec<u8> {
42 match delivery_type {
43 DeliveryBlobType::Type1 => Type1Blob::generate(data, CompressionMode::Attempt),
44 _ => panic!("Unsupported delivery blob type: {:?}", delivery_type),
45 }
46}
47
48pub fn generate_to(
51 delivery_type: DeliveryBlobType,
52 data: &[u8],
53 writer: impl std::io::Write,
54) -> Result<(), std::io::Error> {
55 match delivery_type {
56 DeliveryBlobType::Type1 => Type1Blob::generate_to(data, CompressionMode::Attempt, writer),
57 _ => panic!("Unsupported delivery blob type: {:?}", delivery_type),
58 }
59}
60
61pub fn decompressed_size(delivery_blob: &[u8]) -> Result<u64, DecompressError> {
63 let header = DeliveryBlobHeader::parse(delivery_blob)?.ok_or(DecompressError::NeedMoreData)?;
64 match header.delivery_type {
65 DeliveryBlobType::Type1 => Type1Blob::decompressed_size(delivery_blob),
66 _ => Err(DecompressError::DeliveryBlob(DeliveryBlobError::InvalidType)),
67 }
68}
69
70pub fn decompressed_size_from_reader(
72 mut reader: impl std::io::Read,
73) -> Result<u64, DecompressError> {
74 let mut buf = vec![];
75 loop {
76 let already_read = buf.len();
77 let new_size = already_read + 4096;
78 buf.resize(new_size, 0);
79 let new_size = already_read + reader.read(&mut buf[already_read..new_size])?;
80 if new_size == already_read {
81 return Err(DecompressError::NeedMoreData);
82 }
83 buf.truncate(new_size);
84 match decompressed_size(&buf) {
85 Ok(size) => {
86 return Ok(size);
87 }
88 Err(DecompressError::NeedMoreData) => {}
89 Err(e) => {
90 return Err(e);
91 }
92 }
93 }
94}
95
96pub fn decompress(delivery_blob: &[u8]) -> Result<Vec<u8>, DecompressError> {
98 let header = DeliveryBlobHeader::parse(delivery_blob)?.ok_or(DecompressError::NeedMoreData)?;
99 match header.delivery_type {
100 DeliveryBlobType::Type1 => Type1Blob::decompress(delivery_blob),
101 _ => Err(DecompressError::DeliveryBlob(DeliveryBlobError::InvalidType)),
102 }
103}
104
105pub fn decompress_to(
108 delivery_blob: &[u8],
109 writer: impl std::io::Write,
110) -> Result<(), DecompressError> {
111 let header = DeliveryBlobHeader::parse(delivery_blob)?.ok_or(DecompressError::NeedMoreData)?;
112 match header.delivery_type {
113 DeliveryBlobType::Type1 => Type1Blob::decompress_to(delivery_blob, writer),
114 _ => Err(DecompressError::DeliveryBlob(DeliveryBlobError::InvalidType)),
115 }
116}
117
118pub fn calculate_digest(delivery_blob: &[u8]) -> Result<fuchsia_merkle::Hash, DecompressError> {
121 let mut writer = fuchsia_merkle::MerkleTreeWriter::new(std::io::sink());
122 let header = DeliveryBlobHeader::parse(delivery_blob)?.ok_or(DecompressError::NeedMoreData)?;
123 match header.delivery_type {
124 DeliveryBlobType::Type1 => {
125 let () = Type1Blob::decompress_to(delivery_blob, &mut writer)?;
126 }
127 _ => return Err(DecompressError::DeliveryBlob(DeliveryBlobError::InvalidType)),
128 }
129 Ok(writer.finish().root())
130}
131
132pub fn delivery_blob_path(blob_name: impl std::fmt::Display) -> String {
134 format!("{}{}", DELIVERY_PATH_PREFIX, blob_name)
135}
136
137#[derive(Clone, Copy, Debug, Eq, Error, PartialEq)]
138pub enum DeliveryBlobError {
139 #[error("Invalid or unsupported delivery blob type.")]
140 InvalidType,
141
142 #[error("Delivery blob header has incorrect magic.")]
143 BadMagic,
144
145 #[error("Integrity/checksum or other validity checks failed.")]
146 IntegrityError,
147}
148
149#[derive(Debug, Error)]
150pub enum DecompressError {
151 #[error("DeliveryBlob error")]
152 DeliveryBlob(#[from] DeliveryBlobError),
153
154 #[error("ChunkedArchive error")]
155 ChunkedArchive(#[from] compression::ChunkedArchiveError),
156
157 #[error("Need more data")]
158 NeedMoreData,
159
160 #[error("io error")]
161 IoError(#[from] std::io::Error),
162}
163
164#[cfg(target_os = "fuchsia")]
165impl From<DeliveryBlobError> for zx::Status {
166 fn from(value: DeliveryBlobError) -> Self {
167 match value {
168 DeliveryBlobError::InvalidType => zx::Status::NOT_SUPPORTED,
170 DeliveryBlobError::BadMagic | DeliveryBlobError::IntegrityError => {
172 zx::Status::IO_DATA_INTEGRITY
173 }
174 }
175 }
176}
177
178#[derive(Clone, Copy, Debug, PartialEq, Eq)]
180pub struct DeliveryBlobHeader {
181 pub delivery_type: DeliveryBlobType,
182 pub header_length: u32,
183}
184
185impl DeliveryBlobHeader {
186 pub fn parse(data: &[u8]) -> Result<Option<DeliveryBlobHeader>, DeliveryBlobError> {
190 let Ok((serialized_header, _metadata_and_payload)) =
191 Ref::<_, format::SerializedHeader>::from_prefix(data)
192 else {
193 return Ok(None);
194 };
195 serialized_header.decode().map(Some)
196 }
197}
198
199#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)]
204#[repr(u32)]
205pub enum DeliveryBlobType {
206 Reserved = 0,
208 Type1 = 1,
210}
211
212impl TryFrom<u32> for DeliveryBlobType {
213 type Error = DeliveryBlobError;
214 fn try_from(value: u32) -> Result<Self, Self::Error> {
215 match value {
216 value if value == DeliveryBlobType::Reserved as u32 => Ok(DeliveryBlobType::Reserved),
217 value if value == DeliveryBlobType::Type1 as u32 => Ok(DeliveryBlobType::Type1),
218 _ => Err(DeliveryBlobError::InvalidType),
219 }
220 }
221}
222
223impl From<DeliveryBlobType> for u32 {
224 fn from(value: DeliveryBlobType) -> Self {
225 value as u32
226 }
227}
228
229#[derive(Clone, Copy, Debug, Eq, PartialEq)]
231pub enum CompressionMode {
232 Never,
234 Attempt,
236 Always,
238}
239
240#[derive(Clone, Copy, Debug)]
246pub struct Type1Blob {
247 pub header: DeliveryBlobHeader,
249 pub payload_length: usize,
251 pub is_compressed: bool,
252}
253
254impl Type1Blob {
255 pub const HEADER: DeliveryBlobHeader = DeliveryBlobHeader {
256 delivery_type: DeliveryBlobType::Type1,
257 header_length: std::mem::size_of::<SerializedType1Blob>() as u32,
258 };
259
260 const CHUNK_ALIGNMENT: usize = fuchsia_merkle::BLOCK_SIZE;
261
262 pub fn generate(data: &[u8], mode: CompressionMode) -> Vec<u8> {
267 let mut delivery_blob: Vec<u8> = vec![];
268 Self::generate_to(data, mode, &mut delivery_blob).unwrap();
269 delivery_blob
270 }
271
272 pub fn generate_to(
278 data: &[u8],
279 mode: CompressionMode,
280 mut writer: impl std::io::Write,
281 ) -> Result<(), std::io::Error> {
282 let compressed = match mode {
284 CompressionMode::Attempt | CompressionMode::Always => {
285 let compressed = ChunkedArchive::new(data, Self::CHUNK_ALIGNMENT)
286 .expect("failed to compress data");
287 if mode == CompressionMode::Always || compressed.serialized_size() <= data.len() {
288 Some(compressed)
289 } else {
290 None
291 }
292 }
293 CompressionMode::Never => None,
294 };
295
296 let payload_length =
298 compressed.as_ref().map(|archive| archive.serialized_size()).unwrap_or(data.len());
299 let header =
300 Self { header: Type1Blob::HEADER, payload_length, is_compressed: compressed.is_some() };
301 let serialized_header: SerializedType1Blob = header.into();
302 writer.write_all(serialized_header.as_bytes())?;
303
304 if let Some(archive) = compressed {
306 archive.write(writer)?;
307 } else {
308 writer.write_all(data)?;
309 }
310 Ok(())
311 }
312
313 pub fn parse(data: &[u8]) -> Result<Option<(Type1Blob, &[u8])>, DeliveryBlobError> {
318 let Ok((serialized_header, payload)) = Ref::<_, SerializedType1Blob>::from_prefix(data)
319 else {
320 return Ok(None);
321 };
322 serialized_header.decode().map(|metadata| Some((metadata, payload)))
323 }
324
325 pub fn decompressed_size(delivery_blob: &[u8]) -> Result<u64, DecompressError> {
327 let (header, payload) = Self::parse(delivery_blob)?.ok_or(DecompressError::NeedMoreData)?;
328 if !header.is_compressed {
329 return Ok(header.payload_length as u64);
330 }
331
332 let (seek_table, _chunk_data) =
333 compression::decode_archive(payload, header.payload_length)?
334 .ok_or(DecompressError::NeedMoreData)?;
335 Ok(seek_table.into_iter().map(|chunk| chunk.decompressed_range.len() as u64).sum())
336 }
337
338 pub fn decompress(delivery_blob: &[u8]) -> Result<Vec<u8>, DecompressError> {
340 let mut decompressed = vec![];
341 decompressed.reserve(Self::decompressed_size(delivery_blob)? as usize);
342 Self::decompress_to(delivery_blob, &mut decompressed)?;
343 Ok(decompressed)
344 }
345
346 pub fn decompress_to(
348 delivery_blob: &[u8],
349 mut writer: impl std::io::Write,
350 ) -> Result<(), DecompressError> {
351 let (header, payload) = Self::parse(delivery_blob)?.ok_or(DecompressError::NeedMoreData)?;
352 if !header.is_compressed {
353 return Ok(writer.write_all(payload)?);
354 }
355
356 let (seek_table, chunk_data) = compression::decode_archive(payload, header.payload_length)?
357 .ok_or(DecompressError::NeedMoreData)?;
358 let mut decompressor = ChunkedDecompressor::new(seek_table)?;
359 let mut result = Ok(());
360 let mut chunk_callback = |chunk: &[u8]| {
361 if let Err(e) = writer.write_all(chunk) {
362 result = Err(e.into());
363 }
364 };
365 decompressor.update(chunk_data, &mut chunk_callback)?;
366 result
367 }
368}
369
370#[cfg(test)]
371mod tests {
372
373 use super::*;
374 use rand::Rng;
375
376 const DATA_LEN: usize = 500_000;
377
378 #[test]
379 fn compression_mode_never() {
380 let data: Vec<u8> = vec![0; DATA_LEN];
381 let delivery_blob = Type1Blob::generate(&data, CompressionMode::Never);
382 let (header, _) = Type1Blob::parse(&delivery_blob).unwrap().unwrap();
384 assert!(!header.is_compressed);
385 assert_eq!(header.payload_length, data.len());
386 assert_eq!(Type1Blob::decompress(&delivery_blob).unwrap(), data);
387 }
388
389 #[test]
390 fn compression_mode_always() {
391 let data: Vec<u8> = {
392 let range = rand::distributions::Uniform::<u8>::new_inclusive(0, 255);
393 rand::thread_rng().sample_iter(&range).take(DATA_LEN).collect()
394 };
395 let delivery_blob = Type1Blob::generate(&data, CompressionMode::Always);
396 let (header, _) = Type1Blob::parse(&delivery_blob).unwrap().unwrap();
397 assert!(header.is_compressed);
399 assert!(header.payload_length > data.len());
400 assert_eq!(Type1Blob::decompress(&delivery_blob).unwrap(), data);
401 }
402
403 #[test]
404 fn compression_mode_attempt_uncompressible() {
405 let data: Vec<u8> = {
406 let range = rand::distributions::Uniform::<u8>::new_inclusive(0, 255);
407 rand::thread_rng().sample_iter(&range).take(DATA_LEN).collect()
408 };
409 let delivery_blob = Type1Blob::generate(&data, CompressionMode::Attempt);
411 let (header, _) = Type1Blob::parse(&delivery_blob).unwrap().unwrap();
412 assert!(!header.is_compressed);
413 assert_eq!(header.payload_length, data.len());
414 assert_eq!(Type1Blob::decompress(&delivery_blob).unwrap(), data);
415 }
416
417 #[test]
418 fn compression_mode_attempt_compressible() {
419 let data: Vec<u8> = vec![0; DATA_LEN];
420 let delivery_blob = Type1Blob::generate(&data, CompressionMode::Attempt);
421 let (header, _) = Type1Blob::parse(&delivery_blob).unwrap().unwrap();
422 assert!(header.is_compressed);
424 assert!(header.payload_length < data.len());
425 assert_eq!(Type1Blob::decompress(&delivery_blob).unwrap(), data);
426 }
427
428 #[test]
429 fn get_decompressed_size() {
430 let data: Vec<u8> = {
431 let range = rand::distributions::Uniform::<u8>::new_inclusive(0, 255);
432 rand::thread_rng().sample_iter(&range).take(DATA_LEN).collect()
433 };
434 let delivery_blob = Type1Blob::generate(&data, CompressionMode::Always);
435 assert_eq!(decompressed_size(&delivery_blob).unwrap(), DATA_LEN as u64);
436 assert_eq!(decompressed_size_from_reader(&delivery_blob[..]).unwrap(), DATA_LEN as u64);
437 }
438
439 #[test]
440 fn test_calculate_digest() {
441 let data: Vec<u8> = {
442 let range = rand::distributions::Uniform::<u8>::new_inclusive(0, 255);
443 rand::thread_rng().sample_iter(&range).take(DATA_LEN).collect()
444 };
445 let delivery_blob = Type1Blob::generate(&data, CompressionMode::Always);
446 assert_eq!(
447 calculate_digest(&delivery_blob).unwrap(),
448 fuchsia_merkle::from_slice(&data).root()
449 );
450 }
451}