1#![allow(clippy::let_unit_value)]
6
7use crate::args::{
8 Args, Command, GcCommand, GetHashCommand, OpenCommand, PkgStatusCommand, RepoAddCommand,
9 RepoAddFileCommand, RepoAddSubCommand, RepoAddUrlCommand, RepoCommand, RepoRemoveCommand,
10 RepoShowCommand, RepoSubCommand, ResolveCommand, RuleClearCommand, RuleCommand,
11 RuleDumpDynamicCommand, RuleListCommand, RuleReplaceCommand, RuleReplaceFileCommand,
12 RuleReplaceJsonCommand, RuleReplaceSubCommand, RuleSubCommand,
13};
14use anyhow::{bail, format_err, Context as _};
15use fidl_fuchsia_net_http::{self as http};
16use fidl_fuchsia_pkg_rewrite::EngineMarker;
17use fidl_fuchsia_pkg_rewrite_ext::{do_transaction, Rule as RewriteRule, RuleConfig};
18use fidl_fuchsia_space::ManagerMarker as SpaceManagerMarker;
19use fuchsia_component::client::connect_to_protocol;
20use fuchsia_url::RepositoryUrl;
21use futures::io::copy;
22use futures::stream::TryStreamExt;
23use std::fs::File;
24use std::io;
25use std::process::exit;
26use {fidl_fuchsia_pkg as fpkg, fidl_fuchsia_pkg_ext as pkg, fuchsia_async as fasync};
27
28mod args;
29mod error;
30
31pub fn main() -> Result<(), anyhow::Error> {
32 let mut executor = fasync::LocalExecutor::new();
33 let Args { command } = argh::from_env();
34 exit(executor.run_singlethreaded(main_helper(command))?)
35}
36
37async fn main_helper(command: Command) -> Result<i32, anyhow::Error> {
38 match command {
39 Command::Resolve(ResolveCommand { pkg_url, verbose }) => {
40 let resolver = connect_to_protocol::<fpkg::PackageResolverMarker>()
41 .context("Failed to connect to resolver service")?;
42 println!("resolving {pkg_url}");
43
44 let (dir, dir_server_end) = fidl::endpoints::create_proxy();
45
46 let _: fpkg::ResolutionContext = resolver
47 .resolve(&pkg_url, dir_server_end)
48 .await?
49 .map_err(fidl_fuchsia_pkg_ext::ResolveError::from)
50 .with_context(|| format!("Failed to resolve {pkg_url}"))?;
51
52 if verbose {
53 println!("package contents:");
54 let mut stream =
55 fuchsia_fs::directory::readdir_recursive(&dir, None);
56 while let Some(entry) = stream.try_next().await? {
57 println!("/{}", entry.name);
58 }
59 }
60
61 Ok(0)
62 }
63 Command::GetHash(GetHashCommand { pkg_url }) => {
64 let resolver = connect_to_protocol::<fpkg::PackageResolverMarker>()
65 .context("Failed to connect to resolver service")?;
66 let blob_id =
67 resolver.get_hash(&fpkg::PackageUrl { url: pkg_url }).await?.map_err(|i| {
68 format_err!(
69 "Failed to get package hash with error: {}",
70 zx::Status::from_raw(i)
71 )
72 })?;
73 println!("{}", pkg::BlobId::from(blob_id));
74 Ok(0)
75 }
76 Command::PkgStatus(PkgStatusCommand { pkg_url }) => {
77 let resolver = connect_to_protocol::<fpkg::PackageResolverMarker>()
78 .context("Failed to connect to resolver service")?;
79 let blob_id = match resolver.get_hash(&fpkg::PackageUrl { url: pkg_url }).await? {
80 Ok(blob_id) => pkg::BlobId::from(blob_id),
81 Err(status) => match zx::Status::from_raw(status) {
82 zx::Status::NOT_FOUND => {
83 println!("Package in registered TUF repo: no");
84 println!("Package on disk: unknown (did not check since not in tuf repo)");
85 return Ok(3);
86 }
87 other_failure_status => {
88 bail!("Cannot determine pkg status. Failed fuchsia.pkg.PackageResolver.GetHash with unexpected status: {:?}",
89 other_failure_status
90 );
91 }
92 },
93 };
94 println!("Package in registered TUF repo: yes (merkle={blob_id})");
95
96 let cache = pkg::cache::Client::from_proxy(
97 connect_to_protocol::<fpkg::PackageCacheMarker>()
98 .context("Failed to connect to cache service")?,
99 );
100
101 match cache.get_already_cached(blob_id).await {
102 Ok(_) => {}
103 Err(e) if e.was_not_cached() => {
104 println!("Package on disk: no");
105 return Ok(2);
106 }
107 Err(e) => {
108 bail!(
109 "Cannot determine pkg status. Failed fuchsia.pkg.PackageCache.Get: {:?}",
110 e
111 );
112 }
113 }
114 println!("Package on disk: yes");
115 Ok(0)
116 }
117 Command::Open(OpenCommand { meta_far_blob_id }) => {
118 let cache = pkg::cache::Client::from_proxy(
119 connect_to_protocol::<fpkg::PackageCacheMarker>()
120 .context("Failed to connect to cache service")?,
121 );
122 println!("opening {meta_far_blob_id}");
123
124 let dir = cache.get_already_cached(meta_far_blob_id).await?.into_proxy();
125 let entries = fuchsia_fs::directory::readdir_recursive(&dir, None)
126 .try_collect::<Vec<_>>()
127 .await?;
128 println!("package contents:");
129 for entry in entries {
130 println!("/{}", entry.name);
131 }
132
133 Ok(0)
134 }
135 Command::Repo(RepoCommand { verbose, subcommand }) => {
136 let repo_manager = connect_to_protocol::<fpkg::RepositoryManagerMarker>()
137 .context("Failed to connect to resolver service")?;
138
139 match subcommand {
140 None => {
141 if !verbose {
142 let repos = fetch_repos(repo_manager).await?;
144
145 let mut urls =
146 repos.into_iter().map(|r| r.repo_url().to_string()).collect::<Vec<_>>();
147 urls.sort_unstable();
148 urls.into_iter().for_each(|url| println!("{url}"));
149 } else {
150 let repos = fetch_repos(repo_manager).await?;
151
152 let s = serde_json::to_string_pretty(&repos).expect("valid json");
153 println!("{s}");
154 }
155 Ok(0)
156 }
157 Some(RepoSubCommand::Add(RepoAddCommand { subcommand })) => {
158 match subcommand {
159 RepoAddSubCommand::File(RepoAddFileCommand { persist, name, file }) => {
160 let mut repo: pkg::RepositoryConfig =
161 serde_json::from_reader(io::BufReader::new(File::open(file)?))?;
162 if let Some(n) = name {
165 repo = pkg::RepositoryConfigBuilder::from(repo)
166 .repo_url(RepositoryUrl::parse_host(n)?)
167 .build();
168 }
169 if persist {
172 repo = pkg::RepositoryConfigBuilder::from(repo)
173 .repo_storage_type(pkg::RepositoryStorageType::Persistent)
174 .build();
175 }
176
177 let res = repo_manager.add(&repo.into()).await?;
178 let () = res.map_err(zx::Status::from_raw)?;
179 }
180 RepoAddSubCommand::Url(RepoAddUrlCommand { persist, name, repo_url }) => {
181 let res = fetch_url(repo_url).await?;
182 let mut repo: pkg::RepositoryConfig = serde_json::from_slice(&res)?;
183 if let Some(n) = name {
186 repo = pkg::RepositoryConfigBuilder::from(repo)
187 .repo_url(RepositoryUrl::parse_host(n)?)
188 .build();
189 }
190 if persist {
193 repo = pkg::RepositoryConfigBuilder::from(repo)
194 .repo_storage_type(pkg::RepositoryStorageType::Persistent)
195 .build();
196 }
197
198 let res = repo_manager.add(&repo.into()).await?;
199 let () = res.map_err(zx::Status::from_raw)?;
200 }
201 }
202
203 Ok(0)
204 }
205
206 Some(RepoSubCommand::Remove(RepoRemoveCommand { repo_url })) => {
207 let res = repo_manager.remove(&repo_url).await?;
208 let () = res.map_err(zx::Status::from_raw)?;
209
210 Ok(0)
211 }
212
213 Some(RepoSubCommand::Show(RepoShowCommand { repo_url })) => {
214 let repos = fetch_repos(repo_manager).await?;
215 for repo in repos.into_iter() {
216 if repo.repo_url().to_string() == repo_url {
217 let s = serde_json::to_string_pretty(&repo).expect("valid json");
218 println!("{s}");
219 return Ok(0);
220 }
221 }
222
223 println!("Package repository not found: {repo_url:?}");
224 Ok(1)
225 }
226 }
227 }
228 Command::Rule(RuleCommand { subcommand }) => {
229 let engine = connect_to_protocol::<EngineMarker>()
230 .context("Failed to connect to rewrite engine service")?;
231
232 match subcommand {
233 RuleSubCommand::List(RuleListCommand {}) => {
234 let (iter, iter_server_end) = fidl::endpoints::create_proxy();
235 engine.list(iter_server_end)?;
236
237 let mut rules = Vec::new();
238 loop {
239 let more = iter.next().await?;
240 if more.is_empty() {
241 break;
242 }
243 rules.extend(more);
244 }
245 let rules = rules.into_iter().map(|rule| rule.try_into()).collect::<Result<
246 Vec<RewriteRule>,
247 _,
248 >>(
249 )?;
250
251 for rule in rules {
252 println!("{rule:#?}");
253 }
254 }
255 RuleSubCommand::Clear(RuleClearCommand {}) => {
256 do_transaction(&engine, |transaction| async move {
257 transaction.reset_all()?;
258 Ok(transaction)
259 })
260 .await?;
261 }
262 RuleSubCommand::DumpDynamic(RuleDumpDynamicCommand {}) => {
263 let (transaction, transaction_server_end) = fidl::endpoints::create_proxy();
264 let () = engine.start_edit_transaction(transaction_server_end)?;
265 let (iter, iter_server_end) = fidl::endpoints::create_proxy();
266 transaction.list_dynamic(iter_server_end)?;
267 let mut rules = Vec::new();
268 loop {
269 let more = iter.next().await?;
270 if more.is_empty() {
271 break;
272 }
273 rules.extend(more);
274 }
275 let rules = rules.into_iter().map(|rule| rule.try_into()).collect::<Result<
276 Vec<RewriteRule>,
277 _,
278 >>(
279 )?;
280 let rule_configs = RuleConfig::Version1(rules);
281 let dynamic_rules = serde_json::to_string_pretty(&rule_configs)?;
282 println!("{dynamic_rules}");
283 }
284 RuleSubCommand::Replace(RuleReplaceCommand { subcommand }) => {
285 let RuleConfig::Version1(ref rules) = match subcommand {
286 RuleReplaceSubCommand::File(RuleReplaceFileCommand { file }) => {
287 serde_json::from_reader(io::BufReader::new(File::open(file)?))?
288 }
289 RuleReplaceSubCommand::Json(RuleReplaceJsonCommand { config }) => config,
290 };
291
292 do_transaction(&engine, |transaction| {
293 async move {
294 transaction.reset_all()?;
295 for rule in rules.iter().rev() {
298 let () = transaction.add(rule.clone()).await?;
299 }
300 Ok(transaction)
301 }
302 })
303 .await?;
304 }
305 }
306
307 Ok(0)
308 }
309 Command::Gc(GcCommand {}) => {
310 let space_manager = connect_to_protocol::<SpaceManagerMarker>()
311 .context("Failed to connect to space manager service")?;
312 space_manager
313 .gc()
314 .await?
315 .map_err(|err| format_err!("Garbage collection failed with error: {:?}", err))
316 .map(|_| 0i32)
317 }
318 }
319}
320
321async fn fetch_repos(
322 repo_manager: fpkg::RepositoryManagerProxy,
323) -> Result<Vec<pkg::RepositoryConfig>, anyhow::Error> {
324 let (iter, server_end) = fidl::endpoints::create_proxy();
325 repo_manager.list(server_end)?;
326 let mut repos = vec![];
327
328 loop {
329 let chunk = iter.next().await?;
330 if chunk.is_empty() {
331 break;
332 }
333 repos.extend(chunk);
334 }
335
336 repos
337 .into_iter()
338 .map(|repo| pkg::RepositoryConfig::try_from(repo).map_err(anyhow::Error::from))
339 .collect()
340}
341
342async fn fetch_url<T: Into<String>>(url_string: T) -> Result<Vec<u8>, anyhow::Error> {
343 let http_svc = connect_to_protocol::<http::LoaderMarker>()
344 .context("Unable to connect to fuchsia.net.http.Loader")?;
345
346 let url_request = http::Request {
347 url: Some(url_string.into()),
348 method: Some(String::from("GET")),
349 headers: None,
350 body: None,
351 deadline: None,
352 ..Default::default()
353 };
354
355 let response =
356 http_svc.fetch(url_request).await.context("Error while calling Loader::Fetch")?;
357
358 if let Some(e) = response.error {
359 return Err(format_err!("LoaderProxy error - {:?}", e));
360 }
361
362 let socket = match response.body {
363 Some(s) => fasync::Socket::from_socket(s),
364 _ => {
365 return Err(format_err!("failed to read UrlBody from the stream"));
366 }
367 };
368
369 let mut body = Vec::new();
370 let bytes_received =
371 copy(socket, &mut body).await.context("Failed to read bytes from the socket")?;
372
373 if bytes_received < 1 {
374 return Err(format_err!(
375 "Failed to download data from url! bytes_received = {}",
376 bytes_received
377 ));
378 }
379
380 Ok(body)
381}