1#![allow(clippy::let_unit_value)]
6
7use crate::args::{
8 Args, Command, GcCommand, GetHashCommand, OpenCommand, PkgStatusCommand, RepoAddCommand,
9 RepoAddFileCommand, RepoAddSubCommand, RepoAddUrlCommand, RepoCommand, RepoRemoveCommand,
10 RepoShowCommand, RepoSubCommand, ResolveCommand, RuleClearCommand, RuleCommand,
11 RuleDumpDynamicCommand, RuleListCommand, RuleReplaceCommand, RuleReplaceFileCommand,
12 RuleReplaceJsonCommand, RuleReplaceSubCommand, RuleSubCommand,
13};
14use anyhow::{Context as _, bail, format_err};
15use fetch_url::fetch_url;
16use fidl_fuchsia_pkg_rewrite::EngineMarker;
17use fidl_fuchsia_pkg_rewrite_ext::{Rule as RewriteRule, RuleConfig, do_transaction};
18use fidl_fuchsia_space::ManagerMarker as SpaceManagerMarker;
19use fuchsia_component::client::connect_to_protocol;
20use fuchsia_url::RepositoryUrl;
21use futures::stream::TryStreamExt;
22use std::fs::File;
23use std::io;
24use std::process::exit;
25use {fidl_fuchsia_pkg as fpkg, fidl_fuchsia_pkg_ext as pkg, fuchsia_async as fasync};
26
27mod args;
28
29pub fn main() -> Result<(), anyhow::Error> {
30 let mut executor = fasync::LocalExecutorBuilder::new().build();
31 let Args { command } = argh::from_env();
32 exit(executor.run_singlethreaded(main_helper(command))?)
33}
34
35async fn main_helper(command: Command) -> Result<i32, anyhow::Error> {
36 match command {
37 Command::Resolve(ResolveCommand { pkg_url, verbose }) => {
38 let resolver = connect_to_protocol::<fpkg::PackageResolverMarker>()
39 .context("Failed to connect to resolver service")?;
40 println!("resolving {pkg_url}");
41
42 let (dir, dir_server_end) = fidl::endpoints::create_proxy();
43
44 let _: fpkg::ResolutionContext = resolver
45 .resolve(&pkg_url, dir_server_end)
46 .await?
47 .map_err(fidl_fuchsia_pkg_ext::ResolveError::from)
48 .with_context(|| format!("Failed to resolve {pkg_url}"))?;
49
50 if verbose {
51 println!("package contents:");
52 let mut stream =
53 fuchsia_fs::directory::readdir_recursive(&dir, None);
54 while let Some(entry) = stream.try_next().await? {
55 println!("/{}", entry.name);
56 }
57 }
58
59 Ok(0)
60 }
61 Command::GetHash(GetHashCommand { pkg_url }) => {
62 let resolver = connect_to_protocol::<fpkg::PackageResolverMarker>()
63 .context("Failed to connect to resolver service")?;
64 let blob_id =
65 resolver.get_hash(&fpkg::PackageUrl { url: pkg_url }).await?.map_err(|i| {
66 format_err!(
67 "Failed to get package hash with error: {}",
68 zx::Status::from_raw(i)
69 )
70 })?;
71 println!("{}", pkg::BlobId::from(blob_id));
72 Ok(0)
73 }
74 Command::PkgStatus(PkgStatusCommand { pkg_url }) => {
75 let resolver = connect_to_protocol::<fpkg::PackageResolverMarker>()
76 .context("Failed to connect to resolver service")?;
77 let blob_id = match resolver.get_hash(&fpkg::PackageUrl { url: pkg_url }).await? {
78 Ok(blob_id) => pkg::BlobId::from(blob_id),
79 Err(status) => match zx::Status::from_raw(status) {
80 zx::Status::NOT_FOUND => {
81 println!("Package in registered TUF repo: no");
82 println!("Package on disk: unknown (did not check since not in tuf repo)");
83 return Ok(3);
84 }
85 other_failure_status => {
86 bail!(
87 "Cannot determine pkg status. Failed fuchsia.pkg.PackageResolver.GetHash with unexpected status: {:?}",
88 other_failure_status
89 );
90 }
91 },
92 };
93 println!("Package in registered TUF repo: yes (merkle={blob_id})");
94
95 let cache = pkg::cache::Client::from_proxy(
96 connect_to_protocol::<fpkg::PackageCacheMarker>()
97 .context("Failed to connect to cache service")?,
98 );
99
100 match cache.get_already_cached(blob_id).await {
101 Ok(_) => {}
102 Err(e) if e.was_not_cached() => {
103 println!("Package on disk: no");
104 return Ok(2);
105 }
106 Err(e) => {
107 bail!(
108 "Cannot determine pkg status. Failed fuchsia.pkg.PackageCache.Get: {:?}",
109 e
110 );
111 }
112 }
113 println!("Package on disk: yes");
114 Ok(0)
115 }
116 Command::Open(OpenCommand { meta_far_blob_id }) => {
117 let cache = pkg::cache::Client::from_proxy(
118 connect_to_protocol::<fpkg::PackageCacheMarker>()
119 .context("Failed to connect to cache service")?,
120 );
121 println!("opening {meta_far_blob_id}");
122
123 let dir = cache.get_already_cached(meta_far_blob_id).await?.into_proxy();
124 let entries = fuchsia_fs::directory::readdir_recursive(&dir, None)
125 .try_collect::<Vec<_>>()
126 .await?;
127 println!("package contents:");
128 for entry in entries {
129 println!("/{}", entry.name);
130 }
131
132 Ok(0)
133 }
134 Command::Repo(RepoCommand { verbose, subcommand }) => {
135 let repo_manager = connect_to_protocol::<fpkg::RepositoryManagerMarker>()
136 .context("Failed to connect to resolver service")?;
137
138 match subcommand {
139 None => {
140 if !verbose {
141 let repos = fetch_repos(repo_manager).await?;
143
144 let mut urls =
145 repos.into_iter().map(|r| r.repo_url().to_string()).collect::<Vec<_>>();
146 urls.sort_unstable();
147 urls.into_iter().for_each(|url| println!("{url}"));
148 } else {
149 let repos = fetch_repos(repo_manager).await?;
150
151 let s = serde_json::to_string_pretty(&repos).expect("valid json");
152 println!("{s}");
153 }
154 Ok(0)
155 }
156 Some(RepoSubCommand::Add(RepoAddCommand { subcommand })) => {
157 match subcommand {
158 RepoAddSubCommand::File(RepoAddFileCommand { persist, name, file }) => {
159 let mut repo: pkg::RepositoryConfig =
160 serde_json::from_reader(io::BufReader::new(File::open(file)?))?;
161 if let Some(n) = name {
164 repo = pkg::RepositoryConfigBuilder::from(repo)
165 .repo_url(RepositoryUrl::parse_host(n)?)
166 .build();
167 }
168 if persist {
171 repo = pkg::RepositoryConfigBuilder::from(repo)
172 .repo_storage_type(pkg::RepositoryStorageType::Persistent)
173 .build();
174 }
175
176 let res = repo_manager.add(&repo.into()).await?;
177 let () = res.map_err(zx::Status::from_raw)?;
178 }
179 RepoAddSubCommand::Url(RepoAddUrlCommand { persist, name, repo_url }) => {
180 let res = fetch_url(repo_url, None).await?;
181 let mut repo: pkg::RepositoryConfig = serde_json::from_slice(&res)?;
182 if let Some(n) = name {
185 repo = pkg::RepositoryConfigBuilder::from(repo)
186 .repo_url(RepositoryUrl::parse_host(n)?)
187 .build();
188 }
189 if persist {
192 repo = pkg::RepositoryConfigBuilder::from(repo)
193 .repo_storage_type(pkg::RepositoryStorageType::Persistent)
194 .build();
195 }
196
197 let res = repo_manager.add(&repo.into()).await?;
198 let () = res.map_err(zx::Status::from_raw)?;
199 }
200 }
201
202 Ok(0)
203 }
204
205 Some(RepoSubCommand::Remove(RepoRemoveCommand { repo_url })) => {
206 let res = repo_manager.remove(&repo_url).await?;
207 let () = res.map_err(zx::Status::from_raw)?;
208
209 Ok(0)
210 }
211
212 Some(RepoSubCommand::Show(RepoShowCommand { repo_url })) => {
213 let repos = fetch_repos(repo_manager).await?;
214 for repo in repos.into_iter() {
215 if repo.repo_url().to_string() == repo_url {
216 let s = serde_json::to_string_pretty(&repo).expect("valid json");
217 println!("{s}");
218 return Ok(0);
219 }
220 }
221
222 println!("Package repository not found: {repo_url:?}");
223 Ok(1)
224 }
225 }
226 }
227 Command::Rule(RuleCommand { subcommand }) => {
228 let engine = connect_to_protocol::<EngineMarker>()
229 .context("Failed to connect to rewrite engine service")?;
230
231 match subcommand {
232 RuleSubCommand::List(RuleListCommand {}) => {
233 let (iter, iter_server_end) = fidl::endpoints::create_proxy();
234 engine.list(iter_server_end)?;
235
236 let mut rules = Vec::new();
237 loop {
238 let more = iter.next().await?;
239 if more.is_empty() {
240 break;
241 }
242 rules.extend(more);
243 }
244 let rules = rules.into_iter().map(|rule| rule.try_into()).collect::<Result<
245 Vec<RewriteRule>,
246 _,
247 >>(
248 )?;
249
250 for rule in rules {
251 println!("{rule:#?}");
252 }
253 }
254 RuleSubCommand::Clear(RuleClearCommand {}) => {
255 do_transaction(&engine, |transaction| async move {
256 transaction.reset_all()?;
257 Ok(transaction)
258 })
259 .await?;
260 }
261 RuleSubCommand::DumpDynamic(RuleDumpDynamicCommand {}) => {
262 let (transaction, transaction_server_end) = fidl::endpoints::create_proxy();
263 let () = engine.start_edit_transaction(transaction_server_end)?;
264 let (iter, iter_server_end) = fidl::endpoints::create_proxy();
265 transaction.list_dynamic(iter_server_end)?;
266 let mut rules = Vec::new();
267 loop {
268 let more = iter.next().await?;
269 if more.is_empty() {
270 break;
271 }
272 rules.extend(more);
273 }
274 let rules = rules.into_iter().map(|rule| rule.try_into()).collect::<Result<
275 Vec<RewriteRule>,
276 _,
277 >>(
278 )?;
279 let rule_configs = RuleConfig::Version1(rules);
280 let dynamic_rules = serde_json::to_string_pretty(&rule_configs)?;
281 println!("{dynamic_rules}");
282 }
283 RuleSubCommand::Replace(RuleReplaceCommand { subcommand }) => {
284 let RuleConfig::Version1(ref rules) = match subcommand {
285 RuleReplaceSubCommand::File(RuleReplaceFileCommand { file }) => {
286 serde_json::from_reader(io::BufReader::new(File::open(file)?))?
287 }
288 RuleReplaceSubCommand::Json(RuleReplaceJsonCommand { config }) => config,
289 };
290
291 do_transaction(&engine, |transaction| {
292 async move {
293 transaction.reset_all()?;
294 for rule in rules.iter().rev() {
297 let () = transaction.add(rule.clone()).await?;
298 }
299 Ok(transaction)
300 }
301 })
302 .await?;
303 }
304 }
305
306 Ok(0)
307 }
308 Command::Gc(GcCommand {}) => {
309 let space_manager = connect_to_protocol::<SpaceManagerMarker>()
310 .context("Failed to connect to space manager service")?;
311 space_manager
312 .gc()
313 .await?
314 .map_err(|err| format_err!("Garbage collection failed with error: {:?}", err))
315 .map(|_| 0i32)
316 }
317 }
318}
319
320async fn fetch_repos(
321 repo_manager: fpkg::RepositoryManagerProxy,
322) -> Result<Vec<pkg::RepositoryConfig>, anyhow::Error> {
323 let (iter, server_end) = fidl::endpoints::create_proxy();
324 repo_manager.list(server_end)?;
325 let mut repos = vec![];
326
327 loop {
328 let chunk = iter.next().await?;
329 if chunk.is_empty() {
330 break;
331 }
332 repos.extend(chunk);
333 }
334
335 repos
336 .into_iter()
337 .map(|repo| pkg::RepositoryConfig::try_from(repo).map_err(anyhow::Error::from))
338 .collect()
339}