]> git.proxmox.com Git - proxmox-offline-mirror.git/blob - src/mirror.rs
bump version to 0.6.6
[proxmox-offline-mirror.git] / src / mirror.rs
1 use std::{
2 cmp::max,
3 collections::HashMap,
4 io::Read,
5 path::{Path, PathBuf},
6 };
7
8 use anyhow::{bail, format_err, Error};
9 use flate2::bufread::GzDecoder;
10 use globset::{Glob, GlobSet, GlobSetBuilder};
11 use nix::libc;
12 use proxmox_http::{client::sync::Client, HttpClient, HttpOptions, ProxyConfig};
13 use proxmox_schema::{ApiType, Schema};
14 use proxmox_sys::fs::file_get_contents;
15
16 use crate::{
17 config::{MirrorConfig, SkipConfig, SubscriptionKey, WeakCryptoConfig},
18 convert_repo_line,
19 pool::Pool,
20 types::{Diff, Snapshot, SNAPSHOT_REGEX},
21 FetchResult, Progress,
22 };
23 use proxmox_apt::{
24 deb822::{
25 CheckSums, CompressionType, FileReference, FileReferenceType, PackagesFile, ReleaseFile,
26 SourcesFile,
27 },
28 repositories::{APTRepository, APTRepositoryPackageType},
29 };
30
31 use crate::helpers;
32
33 fn mirror_dir(config: &MirrorConfig) -> PathBuf {
34 PathBuf::from(&config.base_dir).join(&config.id)
35 }
36
37 pub(crate) fn pool(config: &MirrorConfig) -> Result<Pool, Error> {
38 let pool_dir = PathBuf::from(&config.base_dir).join(".pool");
39 Pool::open(&mirror_dir(config), &pool_dir)
40 }
41
42 /// `MirrorConfig`, but some fields converted/parsed into usable types.
43 struct ParsedMirrorConfig {
44 pub repository: APTRepository,
45 pub architectures: Vec<String>,
46 pub pool: Pool,
47 pub key: Vec<u8>,
48 pub verify: bool,
49 pub sync: bool,
50 pub auth: Option<String>,
51 pub client: Client,
52 pub ignore_errors: bool,
53 pub skip: SkipConfig,
54 pub weak_crypto: WeakCryptoConfig,
55 }
56
57 impl TryInto<ParsedMirrorConfig> for MirrorConfig {
58 type Error = anyhow::Error;
59
60 fn try_into(self) -> Result<ParsedMirrorConfig, Self::Error> {
61 let pool = pool(&self)?;
62
63 let repository = convert_repo_line(self.repository.clone())?;
64
65 let key = file_get_contents(Path::new(&self.key_path))?;
66
67 let options = HttpOptions {
68 user_agent: Some(
69 concat!("proxmox-offline-mirror/", env!("CARGO_PKG_VERSION")).to_string(),
70 ),
71 proxy_config: ProxyConfig::from_proxy_env()?,
72 ..Default::default()
73 }; // TODO actually read version ;)
74
75 let client = Client::new(options);
76
77 let weak_crypto = match self.weak_crypto {
78 Some(property_string) => {
79 let value = (WeakCryptoConfig::API_SCHEMA as Schema)
80 .parse_property_string(&property_string)?;
81 serde_json::from_value(value)?
82 }
83 None => WeakCryptoConfig::default(),
84 };
85
86 Ok(ParsedMirrorConfig {
87 repository,
88 architectures: self.architectures,
89 pool,
90 key,
91 verify: self.verify,
92 sync: self.sync,
93 auth: None,
94 client,
95 ignore_errors: self.ignore_errors,
96 skip: self.skip,
97 weak_crypto,
98 })
99 }
100 }
101
102 // Helper to get absolute URL for dist-specific relative `path`.
103 fn get_dist_url(repo: &APTRepository, path: &str) -> String {
104 let dist_root = format!("{}/dists/{}", repo.uris[0], repo.suites[0]);
105
106 format!("{}/{}", dist_root, path)
107 }
108
109 // Helper to get dist-specific path given a `prefix` (snapshot dir) and relative `path`.
110 fn get_dist_path(repo: &APTRepository, prefix: &Path, path: &str) -> PathBuf {
111 let mut base = PathBuf::from(prefix);
112 base.push("dists");
113 base.push(&repo.suites[0]);
114 base.push(path);
115 base
116 }
117
118 // Helper to get generic URL given a `repo` and `path`.
119 fn get_repo_url(repo: &APTRepository, path: &str) -> String {
120 format!("{}/{}", repo.uris[0], path)
121 }
122
123 /// Helper to fetch file from URI and optionally verify the responses checksum.
124 ///
125 /// Only fetches and returns data, doesn't store anything anywhere.
126 fn fetch_repo_file(
127 client: &Client,
128 uri: &str,
129 max_size: usize,
130 checksums: Option<&CheckSums>,
131 auth: Option<&str>,
132 ) -> Result<FetchResult, Error> {
133 println!("-> GET '{}'..", uri);
134
135 let headers = if let Some(auth) = auth {
136 let mut map = HashMap::new();
137 map.insert("Authorization".to_string(), auth.to_string());
138 Some(map)
139 } else {
140 None
141 };
142
143 let response = client.get(uri, headers.as_ref())?;
144
145 let reader: Box<dyn Read> = response.into_body();
146 let mut reader = reader.take(max_size as u64);
147 let mut data = Vec::new();
148 reader.read_to_end(&mut data)?;
149
150 if let Some(checksums) = checksums {
151 checksums.verify(&data)?;
152 }
153
154 Ok(FetchResult {
155 fetched: data.len(),
156 data,
157 })
158 }
159
160 /// Helper to fetch InRelease or Release/Release.gpg files from repository.
161 ///
162 /// Set `detached` == false to fetch InRelease or to `detached` == true for Release/Release.gpg.
163 /// Verifies the contained/detached signature and stores all fetched files under `prefix`.
164 ///
165 /// Returns the verified raw release file data, or None if the "fetch" part itself fails.
166 fn fetch_release(
167 config: &ParsedMirrorConfig,
168 prefix: &Path,
169 detached: bool,
170 dry_run: bool,
171 ) -> Result<Option<FetchResult>, Error> {
172 let (name, fetched, sig) = if detached {
173 println!("Fetching Release/Release.gpg files");
174 let sig = match fetch_repo_file(
175 &config.client,
176 &get_dist_url(&config.repository, "Release.gpg"),
177 1024 * 1024,
178 None,
179 config.auth.as_deref(),
180 ) {
181 Ok(res) => res,
182 Err(err) => {
183 eprintln!("Release.gpg fetch failure: {err}");
184 return Ok(None);
185 }
186 };
187
188 let mut fetched = match fetch_repo_file(
189 &config.client,
190 &get_dist_url(&config.repository, "Release"),
191 256 * 1024 * 1024,
192 None,
193 config.auth.as_deref(),
194 ) {
195 Ok(res) => res,
196 Err(err) => {
197 eprintln!("Release fetch failure: {err}");
198 return Ok(None);
199 }
200 };
201 fetched.fetched += sig.fetched;
202 ("Release(.gpg)", fetched, Some(sig.data()))
203 } else {
204 println!("Fetching InRelease file");
205 let fetched = match fetch_repo_file(
206 &config.client,
207 &get_dist_url(&config.repository, "InRelease"),
208 256 * 1024 * 1024,
209 None,
210 config.auth.as_deref(),
211 ) {
212 Ok(res) => res,
213 Err(err) => {
214 eprintln!("InRelease fetch failure: {err}");
215 return Ok(None);
216 }
217 };
218 ("InRelease", fetched, None)
219 };
220
221 println!("Verifying '{name}' signature using provided repository key..");
222 let content = fetched.data_ref();
223 let verified =
224 helpers::verify_signature(content, &config.key, sig.as_deref(), &config.weak_crypto)?;
225 println!("Success");
226
227 let sha512 = Some(openssl::sha::sha512(content));
228 let csums = CheckSums {
229 sha512,
230 ..Default::default()
231 };
232
233 if dry_run {
234 return Ok(Some(FetchResult {
235 data: verified,
236 fetched: fetched.fetched,
237 }));
238 }
239
240 let locked = &config.pool.lock()?;
241
242 if !locked.contains(&csums) {
243 locked.add_file(content, &csums, config.sync)?;
244 }
245
246 if detached {
247 locked.link_file(
248 &csums,
249 Path::new(&get_dist_path(&config.repository, prefix, "Release")),
250 )?;
251 let sig = sig.unwrap();
252 let sha512 = Some(openssl::sha::sha512(&sig));
253 let csums = CheckSums {
254 sha512,
255 ..Default::default()
256 };
257 if !locked.contains(&csums) {
258 locked.add_file(&sig, &csums, config.sync)?;
259 }
260 locked.link_file(
261 &csums,
262 Path::new(&get_dist_path(&config.repository, prefix, "Release.gpg")),
263 )?;
264 } else {
265 locked.link_file(
266 &csums,
267 Path::new(&get_dist_path(&config.repository, prefix, "InRelease")),
268 )?;
269 }
270
271 Ok(Some(FetchResult {
272 data: verified,
273 fetched: fetched.fetched,
274 }))
275 }
276
277 /// Helper to fetch an index file referenced by a `ReleaseFile`.
278 ///
279 /// Since these usually come in compressed and uncompressed form, with the latter often not
280 /// actually existing in the source repository as file, this fetches and if necessary decompresses
281 /// to obtain a copy of the uncompressed data.
282 /// Will skip fetching if both references are already available with the expected checksum in the
283 /// pool, in which case they will just be re-linked under the new path.
284 ///
285 /// Returns the uncompressed data.
286 fn fetch_index_file(
287 config: &ParsedMirrorConfig,
288 prefix: &Path,
289 reference: &FileReference,
290 uncompressed: Option<&FileReference>,
291 by_hash: bool,
292 dry_run: bool,
293 ) -> Result<FetchResult, Error> {
294 let url = get_dist_url(&config.repository, &reference.path);
295 let path = get_dist_path(&config.repository, prefix, &reference.path);
296
297 if let Some(uncompressed) = uncompressed {
298 let uncompressed_path = get_dist_path(&config.repository, prefix, &uncompressed.path);
299
300 if config.pool.contains(&reference.checksums)
301 && config.pool.contains(&uncompressed.checksums)
302 {
303 let data = config
304 .pool
305 .get_contents(&uncompressed.checksums, config.verify)?;
306
307 if dry_run {
308 return Ok(FetchResult { data, fetched: 0 });
309 }
310 // Ensure they're linked at current path
311 config.pool.lock()?.link_file(&reference.checksums, &path)?;
312 config
313 .pool
314 .lock()?
315 .link_file(&uncompressed.checksums, &uncompressed_path)?;
316 return Ok(FetchResult { data, fetched: 0 });
317 }
318 }
319
320 let urls = if by_hash {
321 let mut urls = Vec::new();
322 if let Some((base_url, _file_name)) = url.rsplit_once('/') {
323 if let Some(sha512) = reference.checksums.sha512 {
324 urls.push(format!("{base_url}/by-hash/SHA512/{}", hex::encode(sha512)));
325 }
326 if let Some(sha256) = reference.checksums.sha256 {
327 urls.push(format!("{base_url}/by-hash/SHA256/{}", hex::encode(sha256)));
328 }
329 }
330 urls.push(url);
331 urls
332 } else {
333 vec![url]
334 };
335
336 let res = urls
337 .iter()
338 .fold(None, |res, url| match res {
339 Some(Ok(res)) => Some(Ok(res)),
340 _ => Some(fetch_plain_file(
341 config,
342 url,
343 &path,
344 reference.size,
345 &reference.checksums,
346 true,
347 dry_run,
348 )),
349 })
350 .ok_or_else(|| format_err!("Failed to retrieve {}", reference.path))??;
351
352 let mut buf = Vec::new();
353 let raw = res.data_ref();
354
355 let decompressed = match reference.file_type.compression() {
356 None => raw,
357 Some(CompressionType::Gzip) => {
358 let mut gz = GzDecoder::new(raw);
359 gz.read_to_end(&mut buf)?;
360 &buf[..]
361 }
362 Some(CompressionType::Bzip2) => {
363 let mut bz = bzip2::read::BzDecoder::new(raw);
364 bz.read_to_end(&mut buf)?;
365 &buf[..]
366 }
367 Some(CompressionType::Lzma) | Some(CompressionType::Xz) => {
368 let mut xz = xz2::read::XzDecoder::new_multi_decoder(raw);
369 xz.read_to_end(&mut buf)?;
370 &buf[..]
371 }
372 };
373 let res = FetchResult {
374 data: decompressed.to_owned(),
375 fetched: res.fetched,
376 };
377
378 if dry_run {
379 return Ok(res);
380 }
381
382 let locked = &config.pool.lock()?;
383 if let Some(uncompressed) = uncompressed {
384 if !locked.contains(&uncompressed.checksums) {
385 locked.add_file(decompressed, &uncompressed.checksums, config.sync)?;
386 }
387
388 // Ensure it's linked at current path
389 let uncompressed_path = get_dist_path(&config.repository, prefix, &uncompressed.path);
390 locked.link_file(&uncompressed.checksums, &uncompressed_path)?;
391 }
392
393 Ok(res)
394 }
395
396 /// Helper to fetch arbitrary files like binary packages.
397 ///
398 /// Will skip fetching if matching file already exists locally, in which case it will just be
399 /// re-linked under the new path.
400 ///
401 /// If need_data is false and the mirror config is set to skip verification, reading the file's
402 /// content will be skipped as well if fetching was skipped.
403 fn fetch_plain_file(
404 config: &ParsedMirrorConfig,
405 url: &str,
406 file: &Path,
407 max_size: usize,
408 checksums: &CheckSums,
409 need_data: bool,
410 dry_run: bool,
411 ) -> Result<FetchResult, Error> {
412 let locked = &config.pool.lock()?;
413 let res = if locked.contains(checksums) {
414 if need_data || config.verify {
415 locked
416 .get_contents(checksums, config.verify)
417 .map(|data| FetchResult { data, fetched: 0 })?
418 } else {
419 // performance optimization for .deb files if verify is false
420 // we never need the file contents and they make up the bulk of a repo
421 FetchResult {
422 data: vec![],
423 fetched: 0,
424 }
425 }
426 } else if dry_run && !need_data {
427 FetchResult {
428 data: vec![],
429 fetched: 0,
430 }
431 } else {
432 let fetched = fetch_repo_file(
433 &config.client,
434 url,
435 max_size,
436 Some(checksums),
437 config.auth.as_deref(),
438 )?;
439 locked.add_file(fetched.data_ref(), checksums, config.verify)?;
440 fetched
441 };
442
443 if !dry_run {
444 // Ensure it's linked at current path
445 locked.link_file(checksums, file)?;
446 }
447
448 Ok(res)
449 }
450
451 /// Initialize a new mirror (by creating the corresponding pool).
452 pub fn init(config: &MirrorConfig) -> Result<(), Error> {
453 let pool_dir = PathBuf::from(&config.base_dir).join(".pool");
454
455 let dir = mirror_dir(config);
456
457 Pool::create(&dir, &pool_dir)?;
458 Ok(())
459 }
460
461 /// Destroy a mirror (by destroying the corresponding pool's link dir followed by GC).
462 pub fn destroy(config: &MirrorConfig) -> Result<(), Error> {
463 let pool: Pool = pool(config)?;
464 pool.lock()?.destroy()?;
465
466 Ok(())
467 }
468
469 /// List snapshots
470 pub fn list_snapshots(config: &MirrorConfig) -> Result<Vec<Snapshot>, Error> {
471 let _pool: Pool = pool(config)?;
472
473 let mut list: Vec<Snapshot> = vec![];
474
475 let path = mirror_dir(config);
476
477 proxmox_sys::fs::scandir(
478 libc::AT_FDCWD,
479 &path,
480 &SNAPSHOT_REGEX,
481 |_l2_fd, snapshot, file_type| {
482 if file_type != nix::dir::Type::Directory {
483 return Ok(());
484 }
485
486 list.push(snapshot.parse()?);
487
488 Ok(())
489 },
490 )?;
491
492 list.sort_unstable();
493
494 Ok(list)
495 }
496
497 struct MirrorProgress {
498 warnings: Vec<String>,
499 dry_run: Progress,
500 total: Progress,
501 skip_count: usize,
502 skip_bytes: usize,
503 }
504
505 fn convert_to_globset(config: &ParsedMirrorConfig) -> Result<Option<GlobSet>, Error> {
506 Ok(if let Some(skipped_packages) = &config.skip.skip_packages {
507 let mut globs = GlobSetBuilder::new();
508 for glob in skipped_packages {
509 let glob = Glob::new(glob)?;
510 globs.add(glob);
511 }
512 let globs = globs.build()?;
513 Some(globs)
514 } else {
515 None
516 })
517 }
518
519 fn fetch_binary_packages(
520 config: &ParsedMirrorConfig,
521 component: &str,
522 packages_indices: HashMap<&String, PackagesFile>,
523 dry_run: bool,
524 prefix: &Path,
525 progress: &mut MirrorProgress,
526 ) -> Result<(), Error> {
527 let skipped_package_globs = convert_to_globset(config)?;
528
529 for (basename, references) in packages_indices {
530 let total_files = references.files.len();
531 if total_files == 0 {
532 println!("\n{basename} - no files, skipping.");
533 continue;
534 } else {
535 println!("\n{basename} - {total_files} total file(s)");
536 }
537
538 let mut fetch_progress = Progress::new();
539 let mut skip_count = 0usize;
540 let mut skip_bytes = 0usize;
541
542 for package in references.files {
543 if let Some(ref sections) = &config.skip.skip_sections {
544 if sections.iter().any(|section| {
545 package.section == *section
546 || package.section == format!("{component}/{section}")
547 }) {
548 println!(
549 "\tskipping {} - {}b (section '{}')",
550 package.package, package.size, package.section
551 );
552 skip_count += 1;
553 skip_bytes += package.size;
554 continue;
555 }
556 }
557 if let Some(skipped_package_globs) = &skipped_package_globs {
558 let matches = skipped_package_globs.matches(&package.package);
559 if !matches.is_empty() {
560 // safety, skipped_package_globs is set based on this
561 let globs = config.skip.skip_packages.as_ref().unwrap();
562 let matches: Vec<String> = matches.iter().map(|i| globs[*i].clone()).collect();
563 println!(
564 "\tskipping {} - {}b (package glob(s): {})",
565 package.package,
566 package.size,
567 matches.join(", ")
568 );
569 skip_count += 1;
570 skip_bytes += package.size;
571 continue;
572 }
573 }
574 let url = get_repo_url(&config.repository, &package.file);
575
576 if dry_run {
577 if config.pool.contains(&package.checksums) {
578 fetch_progress.update(&FetchResult {
579 data: vec![],
580 fetched: 0,
581 });
582 } else {
583 println!("\t(dry-run) GET missing '{url}' ({}b)", package.size);
584 fetch_progress.update(&FetchResult {
585 data: vec![],
586 fetched: package.size,
587 });
588 }
589 } else {
590 let mut full_path = PathBuf::from(prefix);
591 full_path.push(&package.file);
592
593 match fetch_plain_file(
594 config,
595 &url,
596 &full_path,
597 package.size,
598 &package.checksums,
599 false,
600 dry_run,
601 ) {
602 Ok(res) => fetch_progress.update(&res),
603 Err(err) if config.ignore_errors => {
604 let msg = format!(
605 "{}: failed to fetch package '{}' - {}",
606 basename, package.file, err,
607 );
608 eprintln!("{msg}");
609 progress.warnings.push(msg);
610 }
611 Err(err) => return Err(err),
612 }
613 }
614
615 if fetch_progress.file_count() % (max(total_files / 100, 1)) == 0 {
616 println!("\tProgress: {fetch_progress}");
617 }
618 }
619 println!("\tProgress: {fetch_progress}");
620 if dry_run {
621 progress.dry_run += fetch_progress;
622 } else {
623 progress.total += fetch_progress;
624 }
625 if skip_count > 0 {
626 progress.skip_count += skip_count;
627 progress.skip_bytes += skip_bytes;
628 println!("Skipped downloading {skip_count} packages totalling {skip_bytes}b");
629 }
630 }
631
632 Ok(())
633 }
634
635 fn fetch_source_packages(
636 config: &ParsedMirrorConfig,
637 component: &str,
638 source_packages_indices: HashMap<&String, SourcesFile>,
639 dry_run: bool,
640 prefix: &Path,
641 progress: &mut MirrorProgress,
642 ) -> Result<(), Error> {
643 let skipped_package_globs = convert_to_globset(config)?;
644
645 for (basename, references) in source_packages_indices {
646 let total_source_packages = references.source_packages.len();
647 if total_source_packages == 0 {
648 println!("\n{basename} - no files, skipping.");
649 continue;
650 } else {
651 println!("\n{basename} - {total_source_packages} total source package(s)");
652 }
653
654 let mut fetch_progress = Progress::new();
655 let mut skip_count = 0usize;
656 let mut skip_bytes = 0usize;
657 for package in references.source_packages {
658 if let Some(ref sections) = &config.skip.skip_sections {
659 if sections.iter().any(|section| {
660 package.section.as_ref() == Some(section)
661 || package.section == Some(format!("{component}/{section}"))
662 }) {
663 println!(
664 "\tskipping {} - {}b (section '{}')",
665 package.package,
666 package.size(),
667 package.section.as_ref().unwrap(),
668 );
669 skip_count += 1;
670 skip_bytes += package.size();
671 continue;
672 }
673 }
674 if let Some(skipped_package_globs) = &skipped_package_globs {
675 let matches = skipped_package_globs.matches(&package.package);
676 if !matches.is_empty() {
677 // safety, skipped_package_globs is set based on this
678 let globs = config.skip.skip_packages.as_ref().unwrap();
679 let matches: Vec<String> = matches.iter().map(|i| globs[*i].clone()).collect();
680 println!(
681 "\tskipping {} - {}b (package glob(s): {})",
682 package.package,
683 package.size(),
684 matches.join(", ")
685 );
686 skip_count += 1;
687 skip_bytes += package.size();
688 continue;
689 }
690 }
691
692 for file_reference in package.files.values() {
693 let path = format!("{}/{}", package.directory, file_reference.file);
694 let url = get_repo_url(&config.repository, &path);
695
696 if dry_run {
697 if config.pool.contains(&file_reference.checksums) {
698 fetch_progress.update(&FetchResult {
699 data: vec![],
700 fetched: 0,
701 });
702 } else {
703 println!("\t(dry-run) GET missing '{url}' ({}b)", file_reference.size);
704 fetch_progress.update(&FetchResult {
705 data: vec![],
706 fetched: file_reference.size,
707 });
708 }
709 } else {
710 let mut full_path = PathBuf::from(prefix);
711 full_path.push(&path);
712
713 match fetch_plain_file(
714 config,
715 &url,
716 &full_path,
717 file_reference.size,
718 &file_reference.checksums,
719 false,
720 dry_run,
721 ) {
722 Ok(res) => fetch_progress.update(&res),
723 Err(err) if config.ignore_errors => {
724 let msg = format!(
725 "{}: failed to fetch package '{}' - {}",
726 basename, file_reference.file, err,
727 );
728 eprintln!("{msg}");
729 progress.warnings.push(msg);
730 }
731 Err(err) => return Err(err),
732 }
733 }
734
735 if fetch_progress.file_count() % (max(total_source_packages / 100, 1)) == 0 {
736 println!("\tProgress: {fetch_progress}");
737 }
738 }
739 }
740 println!("\tProgress: {fetch_progress}");
741 if dry_run {
742 progress.dry_run += fetch_progress;
743 } else {
744 progress.total += fetch_progress;
745 }
746 if skip_count > 0 {
747 progress.skip_count += skip_count;
748 progress.skip_bytes += skip_bytes;
749 println!("Skipped downloading {skip_count} packages totalling {skip_bytes}b");
750 }
751 }
752
753 Ok(())
754 }
755
756 /// Create a new snapshot of the remote repository, fetching and storing files as needed.
757 ///
758 /// Operates in three phases:
759 /// - Fetch and verify release files
760 /// - Fetch referenced indices according to config
761 /// - Fetch binary packages referenced by package indices
762 ///
763 /// Files will be linked in a temporary directory and only renamed to the final, valid snapshot
764 /// directory at the end. In case of error, leftover `XXX.tmp` directories at the top level of
765 /// `base_dir` can be safely removed once the next snapshot was successfully created, as they only
766 /// contain hardlinks.
767 pub fn create_snapshot(
768 config: MirrorConfig,
769 snapshot: &Snapshot,
770 subscription: Option<SubscriptionKey>,
771 dry_run: bool,
772 ) -> Result<(), Error> {
773 let auth = if let Some(product) = &config.use_subscription {
774 match subscription {
775 None => {
776 bail!(
777 "Mirror {} requires a subscription key, but none given.",
778 config.id
779 );
780 }
781 Some(key) if key.product() == *product => {
782 let base64 = base64::encode(format!("{}:{}", key.key, key.server_id));
783 Some(format!("basic {base64}"))
784 }
785 Some(key) => {
786 bail!(
787 "Repository product type '{}' and key product type '{}' don't match.",
788 product,
789 key.product()
790 );
791 }
792 }
793 } else {
794 None
795 };
796
797 let mut config: ParsedMirrorConfig = config.try_into()?;
798 config.auth = auth;
799
800 let prefix = format!("{snapshot}.tmp");
801 let prefix = Path::new(&prefix);
802
803 let mut progress = MirrorProgress {
804 warnings: Vec::new(),
805 skip_count: 0,
806 skip_bytes: 0,
807 dry_run: Progress::new(),
808 total: Progress::new(),
809 };
810
811 let parse_release = |res: FetchResult, name: &str| -> Result<ReleaseFile, Error> {
812 println!("Parsing {name}..");
813 let parsed: ReleaseFile = res.data[..].try_into()?;
814 println!(
815 "'{name}' file has {} referenced files..",
816 parsed.files.len()
817 );
818 Ok(parsed)
819 };
820
821 // we want both on-disk for compat reasons, if both are available
822 let release = fetch_release(&config, prefix, true, dry_run)?
823 .map(|res| {
824 progress.total.update(&res);
825 parse_release(res, "Release")
826 })
827 .transpose()?;
828
829 let in_release = fetch_release(&config, prefix, false, dry_run)?
830 .map(|res| {
831 progress.total.update(&res);
832 parse_release(res, "InRelease")
833 })
834 .transpose()?;
835
836 // at least one must be available to proceed
837 let release = release
838 .or(in_release)
839 .ok_or_else(|| format_err!("Neither Release(.gpg) nor InRelease available!"))?;
840
841 let mut per_component = HashMap::new();
842 let mut others = Vec::new();
843 let binary = &config
844 .repository
845 .types
846 .contains(&APTRepositoryPackageType::Deb);
847 let source = &config
848 .repository
849 .types
850 .contains(&APTRepositoryPackageType::DebSrc);
851
852 for (basename, references) in &release.files {
853 let reference = references.first();
854 let reference = if let Some(reference) = reference {
855 reference.clone()
856 } else {
857 continue;
858 };
859 let skip_components = !&config.repository.components.contains(&reference.component);
860
861 let skip = skip_components
862 || match &reference.file_type {
863 FileReferenceType::Ignored => true,
864 FileReferenceType::PDiff => true, // would require fetching the patches as well
865 FileReferenceType::Sources(_) => !source,
866 _ => {
867 if let Some(arch) = reference.file_type.architecture() {
868 !binary || !config.architectures.contains(arch)
869 } else {
870 false
871 }
872 }
873 };
874 if skip {
875 println!("Skipping {}", reference.path);
876 others.push(reference);
877 } else {
878 let list = per_component
879 .entry(reference.component)
880 .or_insert_with(Vec::new);
881 list.push(basename);
882 }
883 }
884 println!();
885
886 let mut indices_size = 0_usize;
887 let mut total_count = 0;
888
889 for (component, references) in &per_component {
890 println!("Component '{component}'");
891
892 let mut component_indices_size = 0;
893
894 for basename in references {
895 for reference in release.files.get(*basename).unwrap() {
896 println!("\t{:?}: {:?}", reference.path, reference.file_type);
897 component_indices_size += reference.size;
898 }
899 }
900 indices_size += component_indices_size;
901
902 let component_count = references.len();
903 total_count += component_count;
904
905 println!("Component references count: {component_count}");
906 println!("Component indices size: {component_indices_size}");
907 if references.is_empty() {
908 println!("\tNo references found..");
909 }
910 }
911 println!("Total indices count: {total_count}");
912 println!("Total indices size: {indices_size}");
913
914 if !others.is_empty() {
915 println!("Skipped {} references", others.len());
916 }
917 println!();
918
919 let mut packages_size = 0_usize;
920 #[allow(clippy::type_complexity)]
921 let mut per_component_indices: HashMap<
922 String,
923 (
924 HashMap<&String, PackagesFile>,
925 HashMap<&String, SourcesFile>,
926 ),
927 > = HashMap::new();
928
929 let mut failed_references = Vec::new();
930 for (component, references) in per_component {
931 println!("\nFetching indices for component '{component}'");
932 let mut component_deb_size = 0;
933 let mut component_dsc_size = 0;
934
935 let mut fetch_progress = Progress::new();
936
937 let (packages_indices, source_packages_indices) =
938 per_component_indices.entry(component.clone()).or_default();
939
940 for basename in references {
941 println!("\tFetching '{basename}'..");
942 let files = release.files.get(basename).unwrap();
943 let uncompressed_ref = files.iter().find(|reference| reference.path == *basename);
944
945 let mut package_index_data = None;
946
947 for reference in files {
948 // if both compressed and uncompressed are referenced, the uncompressed file may
949 // not exist on the server
950 if Some(reference) == uncompressed_ref && files.len() > 1 {
951 continue;
952 }
953
954 // this will ensure the uncompressed file will be written locally
955 let res = match fetch_index_file(
956 &config,
957 prefix,
958 reference,
959 uncompressed_ref,
960 release.aquire_by_hash,
961 dry_run,
962 ) {
963 Ok(res) => res,
964 Err(err) if !reference.file_type.is_package_index() => {
965 let msg = format!(
966 "Failed to fetch '{:?}' type reference '{}', skipping - {err}",
967 reference.file_type, reference.path
968 );
969 eprintln!("{msg}");
970 progress.warnings.push(msg);
971 failed_references.push(reference);
972 continue;
973 }
974 Err(err) => return Err(err),
975 };
976 fetch_progress.update(&res);
977
978 if package_index_data.is_none() && reference.file_type.is_package_index() {
979 package_index_data = Some((&reference.file_type, res.data()));
980 }
981 }
982 if let Some((reference_type, data)) = package_index_data {
983 match reference_type {
984 FileReferenceType::Packages(_, _) => {
985 let packages: PackagesFile = data[..].try_into()?;
986 let size: usize = packages.files.iter().map(|p| p.size).sum();
987 println!("\t{} packages totalling {size}", packages.files.len());
988 component_deb_size += size;
989
990 packages_indices.entry(basename).or_insert(packages);
991 }
992 FileReferenceType::Sources(_) => {
993 let source_packages: SourcesFile = data[..].try_into()?;
994 let size: usize = source_packages
995 .source_packages
996 .iter()
997 .map(|s| s.size())
998 .sum();
999 println!(
1000 "\t{} source packages totalling {size}",
1001 source_packages.source_packages.len()
1002 );
1003 component_dsc_size += size;
1004 source_packages_indices
1005 .entry(basename)
1006 .or_insert(source_packages);
1007 }
1008 unknown => {
1009 eprintln!("Unknown package index '{unknown:?}', skipping processing..")
1010 }
1011 }
1012 }
1013 println!("Progress: {fetch_progress}");
1014 }
1015
1016 println!("Total deb size for component: {component_deb_size}");
1017 packages_size += component_deb_size;
1018
1019 println!("Total dsc size for component: {component_dsc_size}");
1020 packages_size += component_dsc_size;
1021
1022 progress.total += fetch_progress;
1023 }
1024 println!("Total deb size: {packages_size}");
1025 if !failed_references.is_empty() {
1026 eprintln!("Failed to download non-package-index references:");
1027 for reference in failed_references {
1028 eprintln!("\t{}", reference.path);
1029 }
1030 }
1031
1032 for (component, (packages_indices, source_packages_indices)) in per_component_indices {
1033 println!("\nFetching {component} packages..");
1034 fetch_binary_packages(
1035 &config,
1036 &component,
1037 packages_indices,
1038 dry_run,
1039 prefix,
1040 &mut progress,
1041 )?;
1042
1043 fetch_source_packages(
1044 &config,
1045 &component,
1046 source_packages_indices,
1047 dry_run,
1048 prefix,
1049 &mut progress,
1050 )?;
1051 }
1052
1053 if dry_run {
1054 println!(
1055 "\nDry-run Stats (indices, downloaded but not persisted):\n{}",
1056 progress.total
1057 );
1058 println!(
1059 "\nDry-run stats (packages, new == missing):\n{}",
1060 progress.dry_run
1061 );
1062 } else {
1063 println!("\nStats: {}", progress.total);
1064 }
1065 if total_count > 0 {
1066 println!(
1067 "Skipped downloading {} packages totalling {}b",
1068 progress.skip_count, progress.skip_bytes,
1069 );
1070 }
1071
1072 if !progress.warnings.is_empty() {
1073 eprintln!("Warnings:");
1074 for msg in progress.warnings {
1075 eprintln!("- {msg}");
1076 }
1077 }
1078
1079 if !dry_run {
1080 println!("\nRotating temp. snapshot in-place: {prefix:?} -> \"{snapshot}\"");
1081 let locked = config.pool.lock()?;
1082 locked.rename(prefix, Path::new(&format!("{snapshot}")))?;
1083 }
1084
1085 Ok(())
1086 }
1087
1088 /// Remove a snapshot by removing the corresponding snapshot directory. To actually free up space,
1089 /// a garbage collection needs to be run afterwards.
1090 pub fn remove_snapshot(config: &MirrorConfig, snapshot: &Snapshot) -> Result<(), Error> {
1091 let pool: Pool = pool(config)?;
1092 let path = pool.get_path(Path::new(&snapshot.to_string()))?;
1093
1094 pool.lock()?.remove_dir(&path)
1095 }
1096
1097 /// Run a garbage collection on the underlying pool.
1098 pub fn gc(config: &MirrorConfig) -> Result<(usize, u64), Error> {
1099 let pool: Pool = pool(config)?;
1100
1101 pool.lock()?.gc()
1102 }
1103
1104 /// Print differences between two snapshots
1105 pub fn diff_snapshots(
1106 config: &MirrorConfig,
1107 snapshot: &Snapshot,
1108 other_snapshot: &Snapshot,
1109 ) -> Result<Diff, Error> {
1110 let pool = pool(config)?;
1111 pool.lock()?.diff_dirs(
1112 Path::new(&format!("{snapshot}")),
1113 Path::new(&format!("{other_snapshot}")),
1114 )
1115 }